Compare commits

..

No commits in common. "main" and "Boost_CFS" have entirely different histories.

253 changed files with 9290 additions and 43119 deletions

View File

@ -5,12 +5,11 @@ Checks: '-*,
cppcoreguidelines-*,
modernize-*,
performance-*,
-modernize-use-nodiscard,
-cppcoreguidelines-pro-type-vararg,
-modernize-use-trailing-return-type,
-bugprone-exception-escape'
HeaderFilterRegex: 'bayesnet/*'
HeaderFilterRegex: 'src/*'
AnalyzeTemporaryDtors: false
WarningsAsErrors: ''
FormatStyle: file

View File

@ -1,39 +1,31 @@
compilation_database_dir: build_Debug
output_directory: diagrams
compilation_database_dir: build
output_directory: puml
diagrams:
BayesNet:
type: class
glob:
- bayesnet/*.h
- bayesnet/classifiers/*.h
- bayesnet/classifiers/*.cc
- bayesnet/ensembles/*.h
- bayesnet/ensembles/*.cc
- bayesnet/feature_selection/*.h
- bayesnet/feature_selection/*.cc
- bayesnet/network/*.h
- bayesnet/network/*.cc
- bayesnet/utils/*.h
- bayesnet/utils/*.cc
- src/BayesNet/*.cc
- src/Platform/*.cc
using_namespace: bayesnet
include:
# Only include entities from the following namespaces
namespaces:
- bayesnet
exclude:
access:
- private
- platform
plantuml:
style:
# Apply this style to all classes in the diagram
class: "#aliceblue;line:blue;line.dotted;text:blue"
# Apply this style to all packages in the diagram
package: "#back:grey"
# Make all template instantiation relations point upwards and draw them
# as green and dotted lines
instantiation: "up[#green,dotted]"
cmd: "/usr/bin/plantuml -tsvg \"diagrams/{}.puml\""
before:
- 'title clang-uml class diagram model'
mermaid:
before:
- 'classDiagram'
after:
- "note left of {{ alias(\"MyProjectMain\") }}: Main class of myproject library."
sequence:
type: sequence
glob:
- src/Platform/main.cc
combine_free_functions_into_file_participants: true
using_namespace:
- std
- bayesnet
- platform
include:
paths:
- src/BayesNet
- src/Platform
start_from:
- function: main(int,const char **)

View File

@ -1,57 +0,0 @@
FROM mcr.microsoft.com/devcontainers/cpp:ubuntu22.04
ARG REINSTALL_CMAKE_VERSION_FROM_SOURCE="3.29.3"
# Optionally install the cmake for vcpkg
COPY ./reinstall-cmake.sh /tmp/
RUN if [ "${REINSTALL_CMAKE_VERSION_FROM_SOURCE}" != "none" ]; then \
chmod +x /tmp/reinstall-cmake.sh && /tmp/reinstall-cmake.sh ${REINSTALL_CMAKE_VERSION_FROM_SOURCE}; \
fi \
&& rm -f /tmp/reinstall-cmake.sh
# [Optional] Uncomment this section to install additional vcpkg ports.
# RUN su vscode -c "${VCPKG_ROOT}/vcpkg install <your-port-name-here>"
# [Optional] Uncomment this section to install additional packages.
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
&& apt-get -y install --no-install-recommends wget software-properties-common libdatetime-perl libcapture-tiny-perl libdatetime-format-dateparse-perl libgd-perl
# Add PPA for GCC 13
RUN add-apt-repository ppa:ubuntu-toolchain-r/test
RUN apt-get update
# Install GCC 13.1
RUN apt-get install -y gcc-13 g++-13 doxygen
# Install lcov 2.1
RUN wget --quiet https://github.com/linux-test-project/lcov/releases/download/v2.1/lcov-2.1.tar.gz && \
tar -xvf lcov-2.1.tar.gz && \
cd lcov-2.1 && \
make install
RUN rm lcov-2.1.tar.gz
RUN rm -fr lcov-2.1
# Install Miniconda
RUN mkdir -p /opt/conda
RUN wget --quiet "https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-aarch64.sh" -O /opt/conda/miniconda.sh && \
bash /opt/conda/miniconda.sh -b -p /opt/miniconda
# Add conda to PATH
ENV PATH=/opt/miniconda/bin:$PATH
# add CXX and CC to the environment with gcc 13
ENV CXX=/usr/bin/g++-13
ENV CC=/usr/bin/gcc-13
# link the last gcov version
RUN rm /usr/bin/gcov
RUN ln -s /usr/bin/gcov-13 /usr/bin/gcov
# change ownership of /opt/miniconda to vscode user
RUN chown -R vscode:vscode /opt/miniconda
USER vscode
RUN conda init
RUN conda install -y -c conda-forge yaml pytorch

View File

@ -1,37 +0,0 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
// README at: https://github.com/devcontainers/templates/tree/main/src/cpp
{
"name": "C++",
"build": {
"dockerfile": "Dockerfile"
},
// "features": {
// "ghcr.io/devcontainers/features/conda:1": {}
// }
// Features to add to the dev container. More info: https://containers.dev/features.
// "features": {},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Use 'postCreateCommand' to run commands after the container is created.
"postCreateCommand": "make release && make debug && echo 'Done!'",
// Configure tool-specific properties.
// "customizations": {},
"customizations": {
// Configure properties specific to VS Code.
"vscode": {
"settings": {},
"extensions": [
"ms-vscode.cpptools",
"ms-vscode.cpptools-extension-pack",
"ms-vscode.cpptools-themes",
"ms-vscode.cmake-tools",
"ms-azuretools.vscode-docker",
"jbenden.c-cpp-flylint",
"matepek.vscode-catch2-test-adapter",
"GitHub.copilot"
]
}
}
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
// "remoteUser": "root"
}

View File

@ -1,59 +0,0 @@
#!/usr/bin/env bash
#-------------------------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
#-------------------------------------------------------------------------------------------------------------
#
set -e
CMAKE_VERSION=${1:-"none"}
if [ "${CMAKE_VERSION}" = "none" ]; then
echo "No CMake version specified, skipping CMake reinstallation"
exit 0
fi
# Cleanup temporary directory and associated files when exiting the script.
cleanup() {
EXIT_CODE=$?
set +e
if [[ -n "${TMP_DIR}" ]]; then
echo "Executing cleanup of tmp files"
rm -Rf "${TMP_DIR}"
fi
exit $EXIT_CODE
}
trap cleanup EXIT
echo "Installing CMake..."
apt-get -y purge --auto-remove cmake
mkdir -p /opt/cmake
architecture=$(dpkg --print-architecture)
case "${architecture}" in
arm64)
ARCH=aarch64 ;;
amd64)
ARCH=x86_64 ;;
*)
echo "Unsupported architecture ${architecture}."
exit 1
;;
esac
CMAKE_BINARY_NAME="cmake-${CMAKE_VERSION}-linux-${ARCH}.sh"
CMAKE_CHECKSUM_NAME="cmake-${CMAKE_VERSION}-SHA-256.txt"
TMP_DIR=$(mktemp -d -t cmake-XXXXXXXXXX)
echo "${TMP_DIR}"
cd "${TMP_DIR}"
curl -sSL "https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/${CMAKE_BINARY_NAME}" -O
curl -sSL "https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/${CMAKE_CHECKSUM_NAME}" -O
sha256sum -c --ignore-missing "${CMAKE_CHECKSUM_NAME}"
sh "${TMP_DIR}/${CMAKE_BINARY_NAME}" --prefix=/opt/cmake --skip-license
ln -s /opt/cmake/bin/cmake /usr/local/bin/cmake
ln -s /opt/cmake/bin/ctest /usr/local/bin/ctest

View File

@ -1,12 +0,0 @@
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for more information:
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
# https://containers.dev/guide/dependabot
version: 2
updates:
- package-ecosystem: "devcontainers"
directory: "/"
schedule:
interval: weekly

10
.gitignore vendored
View File

@ -32,16 +32,10 @@
*.out
*.app
build/**
build_*/**
build_debug/**
build_release/**
*.dSYM/**
cmake-build*/**
.idea
puml/**
.vscode/settings.json
sample/build
**/.DS_Store
docs/manual
docs/man3
docs/man
docs/Doxyfile

30
.gitmodules vendored
View File

@ -1,21 +1,15 @@
[submodule "lib/json"]
path = lib/json
url = https://github.com/nlohmann/json.git
master = master
update = merge
[submodule "lib/folding"]
path = lib/folding
url = https://github.com/rmontanana/folding
main = main
update = merge
[submodule "tests/lib/catch2"]
path = tests/lib/catch2
url = https://github.com/catchorg/Catch2.git
main = main
update = merge
[submodule "tests/lib/Files"]
path = tests/lib/Files
url = https://github.com/rmontanana/ArffFiles
[submodule "lib/mdlp"]
path = lib/mdlp
url = https://github.com/rmontanana/mdlp
[submodule "lib/catch2"]
path = lib/catch2
url = https://github.com/catchorg/Catch2.git
[submodule "lib/argparse"]
path = lib/argparse
url = https://github.com/p-ranav/argparse
[submodule "lib/json"]
path = lib/json
url = https://github.com/nlohmann/json.git
[submodule "lib/libxlsxwriter"]
path = lib/libxlsxwriter
url = https://github.com/jmcnamara/libxlsxwriter.git

View File

@ -1,4 +0,0 @@
{
"sonarCloudOrganization": "rmontanana",
"projectKey": "rmontanana_BayesNet"
}

View File

@ -3,47 +3,15 @@
{
"name": "Mac",
"includePath": [
"/Users/rmontanana/Code/BayesNet/**"
"${workspaceFolder}/**"
],
"defines": [],
"macFrameworkPath": [
"/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include"
"/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks"
],
"cStandard": "c17",
"cppStandard": "c++17",
"compileCommands": "",
"intelliSenseMode": "macos-clang-arm64",
"mergeConfigurations": false,
"browse": {
"path": [
"/Users/rmontanana/Code/BayesNet/**",
"${workspaceFolder}"
],
"limitSymbolsToIncludedHeaders": true
},
"configurationProvider": "ms-vscode.cmake-tools"
},
{
"name": "Linux",
"includePath": [
"/home/rmontanana/Code/BayesNet/**",
"/home/rmontanana/Code/libtorch/include/torch/csrc/api/include/",
"/home/rmontanana/Code/BayesNet/lib/"
],
"defines": [],
"cStandard": "c17",
"cppStandard": "c++17",
"intelliSenseMode": "linux-gcc-x64",
"mergeConfigurations": false,
"compilerPath": "/usr/bin/g++",
"browse": {
"path": [
"/home/rmontanana/Code/BayesNet/**",
"${workspaceFolder}"
],
"limitSymbolsToIncludedHeaders": true
},
"configurationProvider": "ms-vscode.cmake-tools"
"compileCommands": "${workspaceFolder}/cmake-build-release/compile_commands.json"
}
],
"version": 4

93
.vscode/launch.json vendored
View File

@ -5,44 +5,93 @@
"type": "lldb",
"request": "launch",
"name": "sample",
"program": "${workspaceFolder}/build_release/sample/bayesnet_sample",
"program": "${workspaceFolder}/build/sample/BayesNetSample",
"args": [
"${workspaceFolder}/tests/data/glass.arff"
]
"-d",
"iris",
"-m",
"TANLd",
"-s",
"271",
"-p",
"/Users/rmontanana/Code/discretizbench/datasets/",
],
//"cwd": "${workspaceFolder}/build/sample/",
},
{
"type": "lldb",
"request": "launch",
"name": "experiment",
"program": "${workspaceFolder}/build/src/Platform/b_main",
"args": [
"-m",
"TAN",
"--stratified",
"-d",
"zoo",
"--discretize"
// "--hyperparameters",
// "{\"repeatSparent\": true, \"maxModels\": 12}"
],
"cwd": "/Users/rmontanana/Code/odtebench",
},
{
"type": "lldb",
"request": "launch",
"name": "best",
"program": "${workspaceFolder}/build/src/Platform/b_best",
"args": [
"-m",
"BoostAODE",
"-s",
"accuracy",
"--build",
],
"cwd": "/Users/rmontanana/Code/discretizbench",
},
{
"type": "lldb",
"request": "launch",
"name": "manage",
"program": "${workspaceFolder}/build/src/Platform/b_manage",
"args": [
"-n",
"20"
],
"cwd": "/Users/rmontanana/Code/discretizbench",
},
{
"type": "lldb",
"request": "launch",
"name": "list",
"program": "${workspaceFolder}/build/src/Platform/b_list",
"args": [],
//"cwd": "/Users/rmontanana/Code/discretizbench",
"cwd": "/home/rmontanana/Code/covbench",
},
{
"type": "lldb",
"request": "launch",
"name": "test",
"program": "${workspaceFolder}/build_Debug/tests/TestBayesNet",
"program": "${workspaceFolder}/build/tests/unit_tests",
"args": [
"No features selected"
"-c=\"Metrics Test\"",
// "-s",
],
"cwd": "${workspaceFolder}/build_Debug/tests"
"cwd": "${workspaceFolder}/build/tests",
},
{
"name": "(gdb) Launch",
"name": "Build & debug active file",
"type": "cppdbg",
"request": "launch",
"program": "enter program name, for example ${workspaceFolder}/a.out",
"program": "${workspaceFolder}/build/bayesnet",
"args": [],
"stopAtEntry": false,
"cwd": "${fileDirname}",
"cwd": "${workspaceFolder}",
"environment": [],
"externalConsole": false,
"MIMode": "gdb",
"setupCommands": [
{
"description": "Enable pretty-printing for gdb",
"text": "-enable-pretty-printing",
"ignoreFailures": true
},
{
"description": "Set Disassembly Flavor to Intel",
"text": "-gdb-set disassembly-flavor intel",
"ignoreFailures": true
}
]
"MIMode": "lldb",
"preLaunchTask": "CMake: build"
}
]
}

View File

@ -1,129 +0,0 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [1.0.6] 2024-11-23
### Fixed
- Prevent existing edges to be added to the network in the `add_edge` method.
- Don't allow to add nodes or edges on already fiited networks.
- Number of threads spawned
- Network class tests
### Added
- Library logo generated with <https://openart.ai> to README.md
- Link to the coverage report in the README.md coverage label.
- *convergence_best* hyperparameter to the BoostAODE class, to control the way the prior accuracy is computed if convergence is set. Default value is *false*.
- SPnDE model.
- A2DE model.
- BoostA2DE model.
- A2DE & SPnDE tests.
- Add tests to reach 99% of coverage.
- Add tests to check the correct version of the mdlp, folding and json libraries.
- Library documentation generated with Doxygen.
- Link to documentation in the README.md.
- Three types of smoothing the Bayesian Network ORIGINAL, LAPLACE and CESTNIK.
### Internal
- Fixed doxygen optional dependency
- Add env parallel variable to Makefile
- Add CountingSemaphore class to manage the number of threads spawned.
- Ignore CUDA language in CMake CodeCoverage module.
- Update mdlp library as a git submodule.
- Create library ShuffleArffFile to limit the number of samples with a parameter and shuffle them.
- Refactor catch2 library location to test/lib
- Refactor loadDataset function in tests.
- Remove conditionalEdgeWeights method in BayesMetrics.
- Refactor Coverage Report generation.
- Add devcontainer to work on apple silicon.
- Change build cmake folder names to Debug & Release.
- Add a Makefile target (doc) to generate the documentation.
- Add a Makefile target (doc-install) to install the documentation.
### Libraries versions
- mdlp: 2.0.1
- Folding: 1.1.0
- json: 3.11
- ArffFiles: 1.1.0
## [1.0.5] 2024-04-20
### Added
- Install command and instructions in README.md
- Prefix to install command to install the package in the any location.
- The 'block_update' hyperparameter to the BoostAODE class, to control the way weights/significances are updated. Default value is false.
- Html report of coverage in the coverage folder. It is created with *make viewcoverage*
- Badges of coverage and code quality (codacy) in README.md. Coverage badge is updated with *make viewcoverage*
- Tests to reach 97% of coverage.
- Copyright header to source files.
- Diagrams to README.md: UML class diagram & dependency diagram
- Action to create diagrams to Makefile: *make diagrams*
### Changed
- Sample app now is a separate target in the Makefile and shows how to use the library with a sample dataset
- The worse model count in BoostAODE is reset to 0 every time a new model produces better accuracy, so the tolerance of the model is meant to be the number of **consecutive** models that produce worse accuracy.
- Default hyperparameter values in BoostAODE: bisection is true, maxTolerance is 3, convergence is true
### Removed
- The 'predict_single' hyperparameter from the BoostAODE class.
- The 'repeatSparent' hyperparameter from the BoostAODE class.
## [1.0.4] 2024-03-06
### Added
- Change *ascending* hyperparameter to *order* with these possible values *{"asc", "desc", "rand"}*, Default is *"desc"*.
- Add the *predict_single* hyperparameter to control if only the last model created is used to predict in boost training or the whole ensemble (all the models built so far). Default is true.
- sample app to show how to use the library (make sample)
### Changed
- Change the library structure adding folders for each group of classes (classifiers, ensembles, etc).
- The significances of the models generated under the feature selection algorithm are now computed after all the models have been generated and an &alpha;<sub>t</sub> value is computed and assigned to each model.
## [1.0.3] 2024-02-25
### Added
- Voting / probability aggregation in Ensemble classes
- predict_proba method in Classifier
- predict_proba method in BoostAODE
- predict_voting parameter in BoostAODE constructor to use voting or probability to predict (default is voting)
- hyperparameter predict_voting to AODE, AODELd and BoostAODE (Ensemble child classes)
- tests to check predict & predict_proba coherence
## [1.0.2] - 2024-02-20
### Fixed
- Fix bug in BoostAODE: do not include the model if epsilon sub t is greater than 0.5
- Fix bug in BoostAODE: compare accuracy with previous accuracy instead of the first of the ensemble if convergence true
## [1.0.1] - 2024-02-12
### Added
- Notes in Classifier class
- BoostAODE: Add note with used features in initialization with feature selection
- BoostAODE: Add note with the number of models
- BoostAODE: Add note with the number of features used to create models if not all features are used
- Test version number in TestBayesModels
- Add tests with feature_select and notes on BoostAODE
### Fixed
- Network predict test
- Network predict_proba test
- Network score test

View File

@ -1,5 +0,0 @@
# Set the default graph title
set(GRAPHVIZ_GRAPH_NAME "BayesNet dependency graph")
set(GRAPHVIZ_SHARED_LIBS OFF)
set(GRAPHVIZ_STATIC_LIBS ON)

View File

@ -1,7 +1,7 @@
cmake_minimum_required(VERSION 3.20)
project(BayesNet
VERSION 1.0.6
VERSION 0.2.0
DESCRIPTION "Bayesian Network and basic classifiers Library."
HOMEPAGE_URL "https://github.com/rmontanana/bayesnet"
LANGUAGES CXX
@ -24,38 +24,34 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fprofile-arcs -ftest-coverage -fno-elide-constructors")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -Ofast")
if (NOT ${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-default-inline")
endif()
# Options
# -------
option(ENABLE_CLANG_TIDY "Enable to add clang tidy." OFF)
option(ENABLE_TESTING "Unit testing build" OFF)
option(CODE_COVERAGE "Collect coverage from test library" OFF)
option(INSTALL_GTEST "Enable installation of googletest." OFF)
# Boost Library
set(Boost_USE_STATIC_LIBS OFF)
set(Boost_USE_MULTITHREADED ON)
set(Boost_USE_STATIC_RUNTIME OFF)
find_package(Boost 1.66.0 REQUIRED)
if(Boost_FOUND)
message("Boost_INCLUDE_DIRS=${Boost_INCLUDE_DIRS}")
include_directories(${Boost_INCLUDE_DIRS})
endif()
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread")
# CMakes modules
# --------------
set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules ${CMAKE_MODULE_PATH})
include(AddGitSubmodule)
if (CMAKE_BUILD_TYPE STREQUAL "Debug")
MESSAGE("Debug mode")
set(ENABLE_TESTING ON)
set(CODE_COVERAGE ON)
endif (CMAKE_BUILD_TYPE STREQUAL "Debug")
get_property(LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES)
message(STATUS "Languages=${LANGUAGES}")
if (CODE_COVERAGE)
enable_testing()
include(CodeCoverage)
MESSAGE(STATUS "Code coverage enabled")
SET(GCC_COVERAGE_LINK_FLAGS " ${GCC_COVERAGE_LINK_FLAGS} -lgcov --coverage")
enable_testing()
include(CodeCoverage)
MESSAGE("Code coverage enabled")
set(CMAKE_CXX_FLAGS " ${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage -O0 -g")
SET(GCC_COVERAGE_LINK_FLAGS " ${GCC_COVERAGE_LINK_FLAGS} -lgcov --coverage")
endif (CODE_COVERAGE)
if (ENABLE_CLANG_TIDY)
@ -64,45 +60,34 @@ endif (ENABLE_CLANG_TIDY)
# External libraries - dependencies of BayesNet
# ---------------------------------------------
# include(FetchContent)
add_git_submodule("lib/json")
add_git_submodule("lib/mdlp")
add_git_submodule("lib/argparse")
add_git_submodule("lib/json")
find_library(XLSXWRITER_LIB NAMES libxlsxwriter.dylib libxlsxwriter.so PATHS ${BayesNet_SOURCE_DIR}/lib/libxlsxwriter/lib)
message("XLSXWRITER_LIB=${XLSXWRITER_LIB}")
# Subdirectories
# --------------
add_subdirectory(config)
add_subdirectory(bayesnet)
add_subdirectory(lib/Files)
add_subdirectory(src/BayesNet)
add_subdirectory(src/Platform)
add_subdirectory(sample)
file(GLOB BayesNet_HEADERS CONFIGURE_DEPENDS ${BayesNet_SOURCE_DIR}/src/BayesNet/*.h ${BayesNet_SOURCE_DIR}/BayesNet/*.h)
file(GLOB BayesNet_SOURCES CONFIGURE_DEPENDS ${BayesNet_SOURCE_DIR}/src/BayesNet/*.cc ${BayesNet_SOURCE_DIR}/src/BayesNet/*.cpp)
file(GLOB Platform_SOURCES CONFIGURE_DEPENDS ${BayesNet_SOURCE_DIR}/src/Platform/*.cc ${BayesNet_SOURCE_DIR}/src/Platform/*.cpp)
# Testing
# -------
if (ENABLE_TESTING)
MESSAGE(STATUS "Testing enabled")
add_subdirectory(tests/lib/catch2)
MESSAGE("Testing enabled")
add_git_submodule("lib/catch2")
include(CTest)
add_subdirectory(tests)
endif (ENABLE_TESTING)
# Installation
# ------------
install(TARGETS BayesNet
ARCHIVE DESTINATION lib
LIBRARY DESTINATION lib
CONFIGURATIONS Release)
install(DIRECTORY bayesnet/ DESTINATION include/bayesnet FILES_MATCHING CONFIGURATIONS Release PATTERN "*.h")
install(FILES ${CMAKE_BINARY_DIR}/configured_files/include/bayesnet/config.h DESTINATION include/bayesnet CONFIGURATIONS Release)
# Documentation
# -------------
find_package(Doxygen)
if (Doxygen_FOUND)
set(DOC_DIR ${CMAKE_CURRENT_SOURCE_DIR}/docs)
set(doxyfile_in ${DOC_DIR}/Doxyfile.in)
set(doxyfile ${DOC_DIR}/Doxyfile)
configure_file(${doxyfile_in} ${doxyfile} @ONLY)
doxygen_add_docs(doxygen
WORKING_DIRECTORY ${DOC_DIR}
CONFIG_FILE ${doxyfile})
else (Doxygen_FOUND)
MESSAGE("* Doxygen not found")
endif (Doxygen_FOUND)

View File

@ -1,6 +1,6 @@
MIT License
Copyright (c) 2023 Ricardo Montañana Gómez
Copyright (c) <year> <copyright holders>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

159
Makefile
View File

@ -1,22 +1,12 @@
SHELL := /bin/bash
.DEFAULT_GOAL := help
.PHONY: viewcoverage coverage setup help install uninstall diagrams buildr buildd test clean debug release sample updatebadge doc doc-install
.PHONY: coverage setup help build test clean debug release
f_release = build_Release
f_debug = build_Debug
f_diagrams = diagrams
app_targets = BayesNet
test_targets = TestBayesNet
clang-uml = clang-uml
plantuml = plantuml
lcov = lcov
genhtml = genhtml
dot = dot
docsrcdir = docs/manual
mansrcdir = docs/man3
mandestdir = /usr/local/share/man
sed_command_link = 's/e">LCOV -/e"><a href="https:\/\/rmontanana.github.io\/bayesnet">Back to manual<\/a> LCOV -/g'
sed_command_diagram = 's/Diagram"/Diagram" width="100%" height="100%" /g'
f_release = build_release
f_debug = build_debug
app_targets = b_best b_list b_main b_manage
test_targets = unit_tests_bayesnet unit_tests_platform
n_procs = -j 16
define ClearTests
@for t in $(test_targets); do \
@ -39,45 +29,36 @@ setup: ## Install dependencies for tests and coverage
fi
@if [ "$(shell uname)" = "Linux" ]; then \
pip install gcovr; \
sudo dnf install lcov;\
fi
@echo "* You should install plantuml & graphviz for the diagrams"
diagrams: ## Create an UML class diagram & dependency of the project (diagrams/BayesNet.png)
@which $(plantuml) || (echo ">>> Please install plantuml"; exit 1)
@which $(dot) || (echo ">>> Please install graphviz"; exit 1)
@which $(clang-uml) || (echo ">>> Please install clang-uml"; exit 1)
@export PLANTUML_LIMIT_SIZE=16384
@echo ">>> Creating UML class diagram of the project...";
@$(clang-uml) -p
@cd $(f_diagrams); \
$(plantuml) -tsvg BayesNet.puml
dest ?= ${HOME}/bin
install: ## Copy binary files to bin folder
@echo "Destination folder: $(dest)"
make buildr
@echo ">>> Copying files to $(dest)"
@cp $(f_release)/src/Platform/b_main $(dest)
@cp $(f_release)/src/Platform/b_list $(dest)
@cp $(f_release)/src/Platform/b_manage $(dest)
@cp $(f_release)/src/Platform/b_best $(dest)
dependency: ## Create a dependency graph diagram of the project (build/dependency.png)
@echo ">>> Creating dependency graph diagram of the project...";
$(MAKE) debug
cd $(f_debug) && cmake .. --graphviz=dependency.dot
@$(dot) -Tsvg $(f_debug)/dependency.dot.BayesNet -o $(f_diagrams)/dependency.svg
cd $(f_debug) && cmake .. --graphviz=dependency.dot && dot -Tpng dependency.dot -o dependency.png
buildd: ## Build the debug targets
cmake --build $(f_debug) -t $(app_targets) --parallel $(CMAKE_BUILD_PARALLEL_LEVEL)
cmake --build $(f_debug) -t $(app_targets) $(n_procs)
buildr: ## Build the release targets
cmake --build $(f_release) -t $(app_targets) --parallel $(CMAKE_BUILD_PARALLEL_LEVEL)
cmake --build $(f_release) -t $(app_targets) $(n_procs)
clean: ## Clean the tests info
@echo ">>> Cleaning Debug BayesNet tests...";
$(call ClearTests)
@echo ">>> Done";
uninstall: ## Uninstall library
@echo ">>> Uninstalling BayesNet...";
xargs rm < $(f_release)/install_manifest.txt
@echo ">>> Done";
prefix = "/usr/local"
install: ## Install library
@echo ">>> Installing BayesNet...";
@cmake --install $(f_release) --prefix $(prefix)
@echo ">>> Done";
clang-uml: ## Create uml class and sequence diagrams
clang-uml -p --add-compile-flag -I /usr/lib/gcc/x86_64-redhat-linux/8/include/
debug: ## Build a debug version of the project
@echo ">>> Building Debug BayesNet...";
@ -91,96 +72,44 @@ release: ## Build a Release version of the project
@if [ -d ./$(f_release) ]; then rm -rf ./$(f_release); fi
@mkdir $(f_release);
@cmake -S . -B $(f_release) -D CMAKE_BUILD_TYPE=Release
@echo ">>> Done";
fname = "tests/data/iris.arff"
sample: ## Build sample
@echo ">>> Building Sample...";
@if [ -d ./sample/build ]; then rm -rf ./sample/build; fi
@cd sample && cmake -B build -S . && cmake --build build -t bayesnet_sample
sample/build/bayesnet_sample $(fname)
@echo ">>> Done";
opt = ""
test: ## Run tests (opt="-s") to verbose output the tests, (opt="-c='Test Maximum Spanning Tree'") to run only that section
@echo ">>> Running BayesNet tests...";
@echo ">>> Running BayesNet & Platform tests...";
@$(MAKE) clean
@cmake --build $(f_debug) -t $(test_targets) --parallel $(CMAKE_BUILD_PARALLEL_LEVEL)
@cmake --build $(f_debug) -t $(test_targets) $(n_procs)
@for t in $(test_targets); do \
echo ">>> Running $$t...";\
if [ -f $(f_debug)/tests/$$t ]; then \
cd $(f_debug)/tests ; \
./$$t $(opt) ; \
cd ../.. ; \
fi ; \
done
@echo ">>> Done";
opt = ""
testp: ## Run platform tests (opt="-s") to verbose output the tests, (opt="-c='Stratified Fold Test'") to run only that section
@echo ">>> Running Platform tests...";
@$(MAKE) clean
@cmake --build $(f_debug) --target unit_tests_platform $(n_procs)
@if [ -f $(f_debug)/tests/unit_tests_platform ]; then cd $(f_debug)/tests ; ./unit_tests_platform $(opt) ; fi ;
@echo ">>> Done";
opt = ""
testb: ## Run BayesNet tests (opt="-s") to verbose output the tests, (opt="-c='Test Maximum Spanning Tree'") to run only that section
@echo ">>> Running BayesNet tests...";
@$(MAKE) clean
@cmake --build $(f_debug) --target unit_tests_bayesnet $(n_procs)
@if [ -f $(f_debug)/tests/unit_tests_bayesnet ]; then cd $(f_debug)/tests ; ./unit_tests_bayesnet $(opt) ; fi ;
@echo ">>> Done";
coverage: ## Run tests and generate coverage report (build/index.html)
@echo ">>> Building tests with coverage..."
@which $(lcov) || (echo ">>ease install lcov"; exit 1)
@if [ ! -f $(f_debug)/tests/coverage.info ] ; then $(MAKE) test ; fi
@echo ">>> Building report..."
@cd $(f_debug)/tests; \
$(lcov) --directory CMakeFiles --capture --demangle-cpp --ignore-errors source,source --output-file coverage.info >/dev/null 2>&1; \
$(lcov) --remove coverage.info '/usr/*' --output-file coverage.info >/dev/null 2>&1; \
$(lcov) --remove coverage.info 'lib/*' --output-file coverage.info >/dev/null 2>&1; \
$(lcov) --remove coverage.info 'libtorch/*' --output-file coverage.info >/dev/null 2>&1; \
$(lcov) --remove coverage.info 'tests/*' --output-file coverage.info >/dev/null 2>&1; \
$(lcov) --remove coverage.info 'bayesnet/utils/loguru.*' --ignore-errors unused --output-file coverage.info >/dev/null 2>&1; \
$(lcov) --remove coverage.info '/opt/miniconda/*' --ignore-errors unused --output-file coverage.info >/dev/null 2>&1; \
$(lcov) --summary coverage.info
@$(MAKE) updatebadge
@echo ">>> Building tests with coverage...";
@$(MAKE) test
@cd $(f_debug) ; \
gcovr --config ../gcovr.cfg tests ;
@echo ">>> Done";
viewcoverage: ## View the html coverage report
@which $(genhtml) >/dev/null || (echo ">>> Please install lcov (genhtml not found)"; exit 1)
@if [ ! -d $(docsrcdir)/coverage ]; then mkdir -p $(docsrcdir)/coverage; fi
@if [ ! -f $(f_debug)/tests/coverage.info ]; then \
echo ">>> No coverage.info file found. Run make coverage first!"; \
exit 1; \
fi
@$(genhtml) $(f_debug)/tests/coverage.info --demangle-cpp --output-directory $(docsrcdir)/coverage --title "BayesNet Coverage Report" -s -k -f --legend >/dev/null 2>&1;
@xdg-open $(docsrcdir)/coverage/index.html || open $(docsrcdir)/coverage/index.html 2>/dev/null
@echo ">>> Done";
updatebadge: ## Update the coverage badge in README.md
@which python || (echo ">>> Please install python"; exit 1)
@if [ ! -f $(f_debug)/tests/coverage.info ]; then \
echo ">>> No coverage.info file found. Run make coverage first!"; \
exit 1; \
fi
@echo ">>> Updating coverage badge..."
@env python update_coverage.py $(f_debug)/tests
@echo ">>> Done";
doc: ## Generate documentation
@echo ">>> Generating documentation..."
@cmake --build $(f_release) -t doxygen
@cp -rp diagrams $(docsrcdir)
@
@if [ "$(shell uname)" = "Darwin" ]; then \
sed -i "" $(sed_command_link) $(docsrcdir)/coverage/index.html ; \
sed -i "" $(sed_command_diagram) $(docsrcdir)/index.html ; \
else \
sed -i $(sed_command_link) $(docsrcdir)/coverage/index.html ; \
sed -i $(sed_command_diagram) $(docsrcdir)/index.html ; \
fi
@echo ">>> Done";
docdir = ""
doc-install: ## Install documentation
@echo ">>> Installing documentation..."
@if [ "$(docdir)" = "" ]; then \
echo "docdir parameter has to be set when calling doc-install, i.e. docdir=../bayesnet_help"; \
exit 1; \
fi
@if [ ! -d $(docdir) ]; then \
@$(MAKE) doc; \
fi
@cp -rp $(docsrcdir)/* $(docdir)
@sudo cp -rp $(mansrcdir) $(mandestdir)
@echo ">>> Done";
help: ## Show help message
@IFS=$$'\n' ; \

108
README.md
View File

@ -1,105 +1,51 @@
# <img src="logo.png" alt="logo" width="50"/> BayesNet
# BayesNet
![C++](https://img.shields.io/badge/c++-%2300599C.svg?style=flat&logo=c%2B%2B&logoColor=white)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](<https://opensource.org/licenses/MIT>)
![Gitea Release](https://img.shields.io/gitea/v/release/rmontanana/bayesnet?gitea_url=https://gitea.rmontanana.es:3000)
[![Codacy Badge](https://app.codacy.com/project/badge/Grade/cf3e0ac71d764650b1bf4d8d00d303b1)](https://app.codacy.com/gh/Doctorado-ML/BayesNet/dashboard?utm_source=gh&utm_medium=referral&utm_content=&utm_campaign=Badge_grade)
[![Security Rating](https://sonarcloud.io/api/project_badges/measure?project=rmontanana_BayesNet&metric=security_rating)](https://sonarcloud.io/summary/new_code?id=rmontanana_BayesNet)
[![Reliability Rating](https://sonarcloud.io/api/project_badges/measure?project=rmontanana_BayesNet&metric=reliability_rating)](https://sonarcloud.io/summary/new_code?id=rmontanana_BayesNet)
![Gitea Last Commit](https://img.shields.io/gitea/last-commit/rmontanana/bayesnet?gitea_url=https://gitea.rmontanana.es:3000&logo=gitea)
[![Coverage Badge](https://img.shields.io/badge/Coverage-99,1%25-green)](html/index.html)
[![DOI](https://zenodo.org/badge/667782806.svg)](https://doi.org/10.5281/zenodo.14210344)
Bayesian Network Classifier with libtorch from scratch
Bayesian Network Classifiers library
## 0. Setup
## Dependencies
Before compiling BayesNet.
The only external dependency is [libtorch](https://pytorch.org/cppdocs/installing.html) which can be installed with the following commands:
### boost library
[Getting Started](<https://www.boost.org/doc/libs/1_83_0/more/getting_started/index.html>)
The best option is install the packages that the Linux distribution have in its repository. If this is the case:
```bash
wget https://download.pytorch.org/libtorch/nightly/cpu/libtorch-shared-with-deps-latest.zip
unzip libtorch-shared-with-deps-latest.zips
sudo dnf install boost-devel
```
## Setup
### Getting the code
If this is not possible and the compressed packaged is installed, the following environment variable has to be set:
```bash
git clone --recurse-submodules https://github.com/doctorado-ml/bayesnet
export BOOST_ROOT=/path/to/library/
```
### libxlswriter
```bash
cd lib/libxlsxwriter
make
make install DESTDIR=/home/rmontanana/Code PREFIX=
```
Environment variable has to be set:
```bash
export LD_LIBRARY_PATH=/usr/local/lib
```
### Release
```bash
make release
make buildr
sudo make install
```
### Debug & Tests
```bash
make debug
make test
```
### Coverage
```bash
make coverage
make viewcoverage
```
### Sample app
After building and installing the release version, you can run the sample app with the following commands:
```bash
make sample
make sample fname=tests/data/glass.arff
```
## Models
#### - TAN
#### - KDB
#### - SPODE
#### - SPnDE
#### - AODE
#### - A2DE
#### - [BoostAODE](docs/BoostAODE.md)
#### - BoostA2DE
### With Local Discretization
#### - TANLd
#### - KDBLd
#### - SPODELd
#### - AODELd
## Documentation
### [Manual](https://rmontanana.github.io/bayesnet/)
### [Coverage report](https://rmontanana.github.io/bayesnet/coverage/index.html)
## Diagrams
### UML Class Diagram
![BayesNet UML Class Diagram](diagrams/BayesNet.svg)
### Dependency Diagram
![BayesNet Dependency Diagram](diagrams/dependency.svg)
## 1. Introduction

View File

@ -1,47 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#pragma once
#include <vector>
#include <torch/torch.h>
#include <nlohmann/json.hpp>
#include "bayesnet/network/Network.h"
namespace bayesnet {
enum status_t { NORMAL, WARNING, ERROR };
class BaseClassifier {
public:
// X is nxm std::vector, y is nx1 std::vector
virtual BaseClassifier& fit(std::vector<std::vector<int>>& X, std::vector<int>& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) = 0;
// X is nxm tensor, y is nx1 tensor
virtual BaseClassifier& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) = 0;
virtual BaseClassifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) = 0;
virtual BaseClassifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing) = 0;
virtual ~BaseClassifier() = default;
torch::Tensor virtual predict(torch::Tensor& X) = 0;
std::vector<int> virtual predict(std::vector<std::vector<int >>& X) = 0;
torch::Tensor virtual predict_proba(torch::Tensor& X) = 0;
std::vector<std::vector<double>> virtual predict_proba(std::vector<std::vector<int >>& X) = 0;
status_t virtual getStatus() const = 0;
float virtual score(std::vector<std::vector<int>>& X, std::vector<int>& y) = 0;
float virtual score(torch::Tensor& X, torch::Tensor& y) = 0;
int virtual getNumberOfNodes()const = 0;
int virtual getNumberOfEdges()const = 0;
int virtual getNumberOfStates() const = 0;
int virtual getClassNumStates() const = 0;
std::vector<std::string> virtual show() const = 0;
std::vector<std::string> virtual graph(const std::string& title = "") const = 0;
virtual std::string getVersion() = 0;
std::vector<std::string> virtual topological_order() = 0;
std::vector<std::string> virtual getNotes() const = 0;
std::string virtual dump_cpt()const = 0;
virtual void setHyperparameters(const nlohmann::json& hyperparameters) = 0;
std::vector<std::string>& getValidHyperparameters() { return validHyperparameters; }
protected:
virtual void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) = 0;
std::vector<std::string> validHyperparameters;
};
}

View File

@ -1,12 +0,0 @@
include_directories(
${BayesNet_SOURCE_DIR}/lib/mdlp/src
${BayesNet_SOURCE_DIR}/lib/folding
${BayesNet_SOURCE_DIR}/lib/json/include
${BayesNet_SOURCE_DIR}
${CMAKE_BINARY_DIR}/configured_files/include
)
file(GLOB_RECURSE Sources "*.cc")
add_library(BayesNet ${Sources})
target_link_libraries(BayesNet fimdlp "${TORCH_LIBRARIES}")

View File

@ -1,194 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#include <sstream>
#include "bayesnet/utils/bayesnetUtils.h"
#include "Classifier.h"
namespace bayesnet {
Classifier::Classifier(Network model) : model(model), m(0), n(0), metrics(Metrics()), fitted(false) {}
const std::string CLASSIFIER_NOT_FITTED = "Classifier has not been fitted";
Classifier& Classifier::build(const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing)
{
this->features = features;
this->className = className;
this->states = states;
m = dataset.size(1);
n = features.size();
checkFitParameters();
auto n_classes = states.at(className).size();
metrics = Metrics(dataset, features, className, n_classes);
model.initialize();
buildModel(weights);
trainModel(weights, smoothing);
fitted = true;
return *this;
}
void Classifier::buildDataset(torch::Tensor& ytmp)
{
try {
auto yresized = torch::transpose(ytmp.view({ ytmp.size(0), 1 }), 0, 1);
dataset = torch::cat({ dataset, yresized }, 0);
}
catch (const std::exception& e) {
std::stringstream oss;
oss << "* Error in X and y dimensions *\n";
oss << "X dimensions: " << dataset.sizes() << "\n";
oss << "y dimensions: " << ytmp.sizes();
throw std::runtime_error(oss.str());
}
}
void Classifier::trainModel(const torch::Tensor& weights, Smoothing_t smoothing)
{
model.fit(dataset, weights, features, className, states, smoothing);
}
// X is nxm where n is the number of features and m the number of samples
Classifier& Classifier::fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing)
{
dataset = X;
buildDataset(y);
const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);
return build(features, className, states, weights, smoothing);
}
// X is nxm where n is the number of features and m the number of samples
Classifier& Classifier::fit(std::vector<std::vector<int>>& X, std::vector<int>& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing)
{
dataset = torch::zeros({ static_cast<int>(X.size()), static_cast<int>(X[0].size()) }, torch::kInt32);
for (int i = 0; i < X.size(); ++i) {
dataset.index_put_({ i, "..." }, torch::tensor(X[i], torch::kInt32));
}
auto ytmp = torch::tensor(y, torch::kInt32);
buildDataset(ytmp);
const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);
return build(features, className, states, weights, smoothing);
}
Classifier& Classifier::fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing)
{
this->dataset = dataset;
const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);
return build(features, className, states, weights, smoothing);
}
Classifier& Classifier::fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing)
{
this->dataset = dataset;
return build(features, className, states, weights, smoothing);
}
void Classifier::checkFitParameters()
{
if (torch::is_floating_point(dataset)) {
throw std::invalid_argument("dataset (X, y) must be of type Integer");
}
if (dataset.size(0) - 1 != features.size()) {
throw std::invalid_argument("Classifier: X " + std::to_string(dataset.size(0) - 1) + " and features " + std::to_string(features.size()) + " must have the same number of features");
}
if (states.find(className) == states.end()) {
throw std::invalid_argument("class name not found in states");
}
for (auto feature : features) {
if (states.find(feature) == states.end()) {
throw std::invalid_argument("feature [" + feature + "] not found in states");
}
}
}
torch::Tensor Classifier::predict(torch::Tensor& X)
{
if (!fitted) {
throw std::logic_error(CLASSIFIER_NOT_FITTED);
}
return model.predict(X);
}
std::vector<int> Classifier::predict(std::vector<std::vector<int>>& X)
{
if (!fitted) {
throw std::logic_error(CLASSIFIER_NOT_FITTED);
}
auto m_ = X[0].size();
auto n_ = X.size();
std::vector<std::vector<int>> Xd(n_, std::vector<int>(m_, 0));
for (auto i = 0; i < n_; i++) {
Xd[i] = std::vector<int>(X[i].begin(), X[i].end());
}
auto yp = model.predict(Xd);
return yp;
}
torch::Tensor Classifier::predict_proba(torch::Tensor& X)
{
if (!fitted) {
throw std::logic_error(CLASSIFIER_NOT_FITTED);
}
return model.predict_proba(X);
}
std::vector<std::vector<double>> Classifier::predict_proba(std::vector<std::vector<int>>& X)
{
if (!fitted) {
throw std::logic_error(CLASSIFIER_NOT_FITTED);
}
auto m_ = X[0].size();
auto n_ = X.size();
std::vector<std::vector<int>> Xd(n_, std::vector<int>(m_, 0));
// Convert to nxm vector
for (auto i = 0; i < n_; i++) {
Xd[i] = std::vector<int>(X[i].begin(), X[i].end());
}
auto yp = model.predict_proba(Xd);
return yp;
}
float Classifier::score(torch::Tensor& X, torch::Tensor& y)
{
torch::Tensor y_pred = predict(X);
return (y_pred == y).sum().item<float>() / y.size(0);
}
float Classifier::score(std::vector<std::vector<int>>& X, std::vector<int>& y)
{
if (!fitted) {
throw std::logic_error(CLASSIFIER_NOT_FITTED);
}
return model.score(X, y);
}
std::vector<std::string> Classifier::show() const
{
return model.show();
}
void Classifier::addNodes()
{
// Add all nodes to the network
for (const auto& feature : features) {
model.addNode(feature);
}
model.addNode(className);
}
int Classifier::getNumberOfNodes() const
{
// Features does not include class
return fitted ? model.getFeatures().size() : 0;
}
int Classifier::getNumberOfEdges() const
{
return fitted ? model.getNumEdges() : 0;
}
int Classifier::getNumberOfStates() const
{
return fitted ? model.getStates() : 0;
}
int Classifier::getClassNumStates() const
{
return fitted ? model.getClassNumStates() : 0;
}
std::vector<std::string> Classifier::topological_order()
{
return model.topological_sort();
}
std::string Classifier::dump_cpt() const
{
return model.dump_cpt();
}
void Classifier::setHyperparameters(const nlohmann::json& hyperparameters)
{
if (!hyperparameters.empty()) {
throw std::invalid_argument("Invalid hyperparameters" + hyperparameters.dump());
}
}
}

View File

@ -1,64 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef CLASSIFIER_H
#define CLASSIFIER_H
#include <torch/torch.h>
#include "bayesnet/utils/BayesMetrics.h"
#include "bayesnet/BaseClassifier.h"
namespace bayesnet {
class Classifier : public BaseClassifier {
public:
Classifier(Network model);
virtual ~Classifier() = default;
Classifier& fit(std::vector<std::vector<int>>& X, std::vector<int>& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
Classifier& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
Classifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
Classifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing) override;
void addNodes();
int getNumberOfNodes() const override;
int getNumberOfEdges() const override;
int getNumberOfStates() const override;
int getClassNumStates() const override;
torch::Tensor predict(torch::Tensor& X) override;
std::vector<int> predict(std::vector<std::vector<int>>& X) override;
torch::Tensor predict_proba(torch::Tensor& X) override;
std::vector<std::vector<double>> predict_proba(std::vector<std::vector<int>>& X) override;
status_t getStatus() const override { return status; }
std::string getVersion() override { return { project_version.begin(), project_version.end() }; };
float score(torch::Tensor& X, torch::Tensor& y) override;
float score(std::vector<std::vector<int>>& X, std::vector<int>& y) override;
std::vector<std::string> show() const override;
std::vector<std::string> topological_order() override;
std::vector<std::string> getNotes() const override { return notes; }
std::string dump_cpt() const override;
void setHyperparameters(const nlohmann::json& hyperparameters) override; //For classifiers that don't have hyperparameters
protected:
bool fitted;
unsigned int m, n; // m: number of samples, n: number of features
Network model;
Metrics metrics;
std::vector<std::string> features;
std::string className;
std::map<std::string, std::vector<int>> states;
torch::Tensor dataset; // (n+1)xm tensor
status_t status = NORMAL;
std::vector<std::string> notes; // Used to store messages occurred during the fit process
void checkFitParameters();
virtual void buildModel(const torch::Tensor& weights) = 0;
void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
void buildDataset(torch::Tensor& y);
private:
Classifier& build(const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing);
};
}
#endif

View File

@ -1,27 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef KDB_H
#define KDB_H
#include <torch/torch.h>
#include "bayesnet/utils/bayesnetUtils.h"
#include "Classifier.h"
namespace bayesnet {
class KDB : public Classifier {
private:
int k;
float theta;
void add_m_edges(int idx, std::vector<int>& S, torch::Tensor& weights);
protected:
void buildModel(const torch::Tensor& weights) override;
public:
explicit KDB(int k, float theta = 0.03);
virtual ~KDB() = default;
void setHyperparameters(const nlohmann::json& hyperparameters_) override;
std::vector<std::string> graph(const std::string& name = "KDB") const override;
};
}
#endif

View File

@ -1,24 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef KDBLD_H
#define KDBLD_H
#include "Proposal.h"
#include "KDB.h"
namespace bayesnet {
class KDBLd : public KDB, public Proposal {
private:
public:
explicit KDBLd(int k);
virtual ~KDBLd() = default;
KDBLd& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
std::vector<std::string> graph(const std::string& name = "KDB") const override;
torch::Tensor predict(torch::Tensor& X) override;
static inline std::string version() { return "0.0.1"; };
};
}
#endif // !KDBLD_H

View File

@ -1,37 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef PROPOSAL_H
#define PROPOSAL_H
#include <string>
#include <map>
#include <torch/torch.h>
#include <CPPFImdlp.h>
#include "bayesnet/network/Network.h"
#include "Classifier.h"
namespace bayesnet {
class Proposal {
public:
Proposal(torch::Tensor& pDataset, std::vector<std::string>& features_, std::string& className_);
virtual ~Proposal();
protected:
void checkInput(const torch::Tensor& X, const torch::Tensor& y);
torch::Tensor prepareX(torch::Tensor& X);
map<std::string, std::vector<int>> localDiscretizationProposal(const map<std::string, std::vector<int>>& states, Network& model);
map<std::string, std::vector<int>> fit_local_discretization(const torch::Tensor& y);
torch::Tensor Xf; // X continuous nxm tensor
torch::Tensor y; // y discrete nx1 tensor
map<std::string, mdlp::CPPFImdlp*> discretizers;
private:
std::vector<int> factorize(const std::vector<std::string>& labels_t);
torch::Tensor& pDataset; // (n+1)xm tensor
std::vector<std::string>& pFeatures;
std::string& pClassName;
};
}
#endif

View File

@ -1,23 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef SPODE_H
#define SPODE_H
#include "Classifier.h"
namespace bayesnet {
class SPODE : public Classifier {
private:
int root;
protected:
void buildModel(const torch::Tensor& weights) override;
public:
explicit SPODE(int root);
virtual ~SPODE() = default;
std::vector<std::string> graph(const std::string& name = "SPODE") const override;
};
}
#endif

View File

@ -1,50 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#include "SPODELd.h"
namespace bayesnet {
SPODELd::SPODELd(int root) : SPODE(root), Proposal(dataset, features, className) {}
SPODELd& SPODELd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing)
{
checkInput(X_, y_);
Xf = X_;
y = y_;
return commonFit(features_, className_, states_, smoothing);
}
SPODELd& SPODELd::fit(torch::Tensor& dataset, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing)
{
if (!torch::is_floating_point(dataset)) {
throw std::runtime_error("Dataset must be a floating point tensor");
}
Xf = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), "..." }).clone();
y = dataset.index({ -1, "..." }).clone().to(torch::kInt32);
return commonFit(features_, className_, states_, smoothing);
}
SPODELd& SPODELd::commonFit(const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing)
{
features = features_;
className = className_;
// Fills std::vectors Xv & yv with the data from tensors X_ (discretized) & y
states = fit_local_discretization(y);
// We have discretized the input data
// 1st we need to fit the model to build the normal SPODE structure, SPODE::fit initializes the base Bayesian network
SPODE::fit(dataset, features, className, states, smoothing);
states = localDiscretizationProposal(states, model);
return *this;
}
torch::Tensor SPODELd::predict(torch::Tensor& X)
{
auto Xt = prepareX(X);
return SPODE::predict(Xt);
}
std::vector<std::string> SPODELd::graph(const std::string& name) const
{
return SPODE::graph(name);
}
}

View File

@ -1,25 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef SPODELD_H
#define SPODELD_H
#include "SPODE.h"
#include "Proposal.h"
namespace bayesnet {
class SPODELd : public SPODE, public Proposal {
public:
explicit SPODELd(int root);
virtual ~SPODELd() = default;
SPODELd& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
SPODELd& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
SPODELd& commonFit(const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states, const Smoothing_t smoothing);
std::vector<std::string> graph(const std::string& name = "SPODELd") const override;
torch::Tensor predict(torch::Tensor& X) override;
static inline std::string version() { return "0.0.1"; };
};
}
#endif // !SPODELD_H

View File

@ -1,38 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#include "SPnDE.h"
namespace bayesnet {
SPnDE::SPnDE(std::vector<int> parents) : Classifier(Network()), parents(parents) {}
void SPnDE::buildModel(const torch::Tensor& weights)
{
// 0. Add all nodes to the model
addNodes();
std::vector<int> attributes;
for (int i = 0; i < static_cast<int>(features.size()); ++i) {
if (std::find(parents.begin(), parents.end(), i) == parents.end()) {
attributes.push_back(i);
}
}
// 1. Add edges from the class node to all other nodes
// 2. Add edges from the parents nodes to all other nodes
for (const auto& attribute : attributes) {
model.addEdge(className, features[attribute]);
for (const auto& root : parents) {
model.addEdge(features[root], features[attribute]);
}
}
}
std::vector<std::string> SPnDE::graph(const std::string& name) const
{
return model.graph(name);
}
}

View File

@ -1,26 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef SPnDE_H
#define SPnDE_H
#include <vector>
#include "Classifier.h"
namespace bayesnet {
class SPnDE : public Classifier {
public:
explicit SPnDE(std::vector<int> parents);
virtual ~SPnDE() = default;
std::vector<std::string> graph(const std::string& name = "SPnDE") const override;
protected:
void buildModel(const torch::Tensor& weights) override;
private:
std::vector<int> parents;
};
}
#endif

View File

@ -1,21 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef TAN_H
#define TAN_H
#include "Classifier.h"
namespace bayesnet {
class TAN : public Classifier {
private:
protected:
void buildModel(const torch::Tensor& weights) override;
public:
TAN();
virtual ~TAN() = default;
std::vector<std::string> graph(const std::string& name = "TAN") const override;
};
}
#endif

View File

@ -1,23 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef TANLD_H
#define TANLD_H
#include "TAN.h"
#include "Proposal.h"
namespace bayesnet {
class TANLd : public TAN, public Proposal {
private:
public:
TANLd();
virtual ~TANLd() = default;
TANLd& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
std::vector<std::string> graph(const std::string& name = "TANLd") const override;
torch::Tensor predict(torch::Tensor& X) override;
};
}
#endif // !TANLD_H

View File

@ -1,40 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#include "A2DE.h"
namespace bayesnet {
A2DE::A2DE(bool predict_voting) : Ensemble(predict_voting)
{
validHyperparameters = { "predict_voting" };
}
void A2DE::setHyperparameters(const nlohmann::json& hyperparameters_)
{
auto hyperparameters = hyperparameters_;
if (hyperparameters.contains("predict_voting")) {
predict_voting = hyperparameters["predict_voting"];
hyperparameters.erase("predict_voting");
}
Classifier::setHyperparameters(hyperparameters);
}
void A2DE::buildModel(const torch::Tensor& weights)
{
models.clear();
significanceModels.clear();
for (int i = 0; i < features.size() - 1; ++i) {
for (int j = i + 1; j < features.size(); ++j) {
auto model = std::make_unique<SPnDE>(std::vector<int>({ i, j }));
models.push_back(std::move(model));
}
}
n_models = static_cast<unsigned>(models.size());
significanceModels = std::vector<double>(n_models, 1.0);
}
std::vector<std::string> A2DE::graph(const std::string& title) const
{
return Ensemble::graph(title);
}
}

View File

@ -1,22 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef A2DE_H
#define A2DE_H
#include "bayesnet/classifiers/SPnDE.h"
#include "Ensemble.h"
namespace bayesnet {
class A2DE : public Ensemble {
public:
A2DE(bool predict_voting = false);
virtual ~A2DE() {};
void setHyperparameters(const nlohmann::json& hyperparameters) override;
std::vector<std::string> graph(const std::string& title = "A2DE") const override;
protected:
void buildModel(const torch::Tensor& weights) override;
};
}
#endif

View File

@ -1,38 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#include "AODE.h"
namespace bayesnet {
AODE::AODE(bool predict_voting) : Ensemble(predict_voting)
{
validHyperparameters = { "predict_voting" };
}
void AODE::setHyperparameters(const nlohmann::json& hyperparameters_)
{
auto hyperparameters = hyperparameters_;
if (hyperparameters.contains("predict_voting")) {
predict_voting = hyperparameters["predict_voting"];
hyperparameters.erase("predict_voting");
}
Classifier::setHyperparameters(hyperparameters);
}
void AODE::buildModel(const torch::Tensor& weights)
{
models.clear();
significanceModels.clear();
for (int i = 0; i < features.size(); ++i) {
models.push_back(std::make_unique<SPODE>(i));
}
n_models = models.size();
significanceModels = std::vector<double>(n_models, 1.0);
}
std::vector<std::string> AODE::graph(const std::string& title) const
{
return Ensemble::graph(title);
}
}

View File

@ -1,22 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef AODE_H
#define AODE_H
#include "bayesnet/classifiers/SPODE.h"
#include "Ensemble.h"
namespace bayesnet {
class AODE : public Ensemble {
public:
AODE(bool predict_voting = false);
virtual ~AODE() {};
void setHyperparameters(const nlohmann::json& hyperparameters) override;
std::vector<std::string> graph(const std::string& title = "AODE") const override;
protected:
void buildModel(const torch::Tensor& weights) override;
};
}
#endif

View File

@ -1,48 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#include "AODELd.h"
namespace bayesnet {
AODELd::AODELd(bool predict_voting) : Ensemble(predict_voting), Proposal(dataset, features, className)
{
}
AODELd& AODELd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing)
{
checkInput(X_, y_);
features = features_;
className = className_;
Xf = X_;
y = y_;
// Fills std::vectors Xv & yv with the data from tensors X_ (discretized) & y
states = fit_local_discretization(y);
// We have discretized the input data
// 1st we need to fit the model to build the normal AODE structure, Ensemble::fit
// calls buildModel to initialize the base models
Ensemble::fit(dataset, features, className, states, smoothing);
return *this;
}
void AODELd::buildModel(const torch::Tensor& weights)
{
models.clear();
for (int i = 0; i < features.size(); ++i) {
models.push_back(std::make_unique<SPODELd>(i));
}
n_models = models.size();
significanceModels = std::vector<double>(n_models, 1.0);
}
void AODELd::trainModel(const torch::Tensor& weights, const Smoothing_t smoothing)
{
for (const auto& model : models) {
model->fit(Xf, y, features, className, states, smoothing);
}
}
std::vector<std::string> AODELd::graph(const std::string& name) const
{
return Ensemble::graph(name);
}
}

View File

@ -1,25 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef AODELD_H
#define AODELD_H
#include "bayesnet/classifiers/Proposal.h"
#include "bayesnet/classifiers/SPODELd.h"
#include "Ensemble.h"
namespace bayesnet {
class AODELd : public Ensemble, public Proposal {
public:
AODELd(bool predict_voting = true);
virtual ~AODELd() = default;
AODELd& fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing) override;
std::vector<std::string> graph(const std::string& name = "AODELd") const override;
protected:
void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
void buildModel(const torch::Tensor& weights) override;
};
}
#endif // !AODELD_H

View File

@ -1,246 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#include <folding.hpp>
#include "bayesnet/feature_selection/CFS.h"
#include "bayesnet/feature_selection/FCBF.h"
#include "bayesnet/feature_selection/IWSS.h"
#include "Boost.h"
namespace bayesnet {
Boost::Boost(bool predict_voting) : Ensemble(predict_voting)
{
validHyperparameters = { "order", "convergence", "convergence_best", "bisection", "threshold", "maxTolerance",
"predict_voting", "select_features", "block_update" };
}
void Boost::setHyperparameters(const nlohmann::json& hyperparameters_)
{
auto hyperparameters = hyperparameters_;
if (hyperparameters.contains("order")) {
std::vector<std::string> algos = { Orders.ASC, Orders.DESC, Orders.RAND };
order_algorithm = hyperparameters["order"];
if (std::find(algos.begin(), algos.end(), order_algorithm) == algos.end()) {
throw std::invalid_argument("Invalid order algorithm, valid values [" + Orders.ASC + ", " + Orders.DESC + ", " + Orders.RAND + "]");
}
hyperparameters.erase("order");
}
if (hyperparameters.contains("convergence")) {
convergence = hyperparameters["convergence"];
hyperparameters.erase("convergence");
}
if (hyperparameters.contains("convergence_best")) {
convergence_best = hyperparameters["convergence_best"];
hyperparameters.erase("convergence_best");
}
if (hyperparameters.contains("bisection")) {
bisection = hyperparameters["bisection"];
hyperparameters.erase("bisection");
}
if (hyperparameters.contains("threshold")) {
threshold = hyperparameters["threshold"];
hyperparameters.erase("threshold");
}
if (hyperparameters.contains("maxTolerance")) {
maxTolerance = hyperparameters["maxTolerance"];
if (maxTolerance < 1 || maxTolerance > 4)
throw std::invalid_argument("Invalid maxTolerance value, must be greater in [1, 4]");
hyperparameters.erase("maxTolerance");
}
if (hyperparameters.contains("predict_voting")) {
predict_voting = hyperparameters["predict_voting"];
hyperparameters.erase("predict_voting");
}
if (hyperparameters.contains("select_features")) {
auto selectedAlgorithm = hyperparameters["select_features"];
std::vector<std::string> algos = { SelectFeatures.IWSS, SelectFeatures.CFS, SelectFeatures.FCBF };
selectFeatures = true;
select_features_algorithm = selectedAlgorithm;
if (std::find(algos.begin(), algos.end(), selectedAlgorithm) == algos.end()) {
throw std::invalid_argument("Invalid selectFeatures value, valid values [" + SelectFeatures.IWSS + ", " + SelectFeatures.CFS + ", " + SelectFeatures.FCBF + "]");
}
hyperparameters.erase("select_features");
}
if (hyperparameters.contains("block_update")) {
block_update = hyperparameters["block_update"];
hyperparameters.erase("block_update");
}
Classifier::setHyperparameters(hyperparameters);
}
void Boost::buildModel(const torch::Tensor& weights)
{
// Models shall be built in trainModel
models.clear();
significanceModels.clear();
n_models = 0;
// Prepare the validation dataset
auto y_ = dataset.index({ -1, "..." });
if (convergence) {
// Prepare train & validation sets from train data
auto fold = folding::StratifiedKFold(5, y_, 271);
auto [train, test] = fold.getFold(0);
auto train_t = torch::tensor(train);
auto test_t = torch::tensor(test);
// Get train and validation sets
X_train = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), train_t });
y_train = dataset.index({ -1, train_t });
X_test = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), test_t });
y_test = dataset.index({ -1, test_t });
dataset = X_train;
m = X_train.size(1);
auto n_classes = states.at(className).size();
// Build dataset with train data
buildDataset(y_train);
metrics = Metrics(dataset, features, className, n_classes);
} else {
// Use all data to train
X_train = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), "..." });
y_train = y_;
}
}
std::vector<int> Boost::featureSelection(torch::Tensor& weights_)
{
int maxFeatures = 0;
if (select_features_algorithm == SelectFeatures.CFS) {
featureSelector = new CFS(dataset, features, className, maxFeatures, states.at(className).size(), weights_);
} else if (select_features_algorithm == SelectFeatures.IWSS) {
if (threshold < 0 || threshold >0.5) {
throw std::invalid_argument("Invalid threshold value for " + SelectFeatures.IWSS + " [0, 0.5]");
}
featureSelector = new IWSS(dataset, features, className, maxFeatures, states.at(className).size(), weights_, threshold);
} else if (select_features_algorithm == SelectFeatures.FCBF) {
if (threshold < 1e-7 || threshold > 1) {
throw std::invalid_argument("Invalid threshold value for " + SelectFeatures.FCBF + " [1e-7, 1]");
}
featureSelector = new FCBF(dataset, features, className, maxFeatures, states.at(className).size(), weights_, threshold);
}
featureSelector->fit();
auto featuresUsed = featureSelector->getFeatures();
delete featureSelector;
return featuresUsed;
}
std::tuple<torch::Tensor&, double, bool> Boost::update_weights(torch::Tensor& ytrain, torch::Tensor& ypred, torch::Tensor& weights)
{
bool terminate = false;
double alpha_t = 0;
auto mask_wrong = ypred != ytrain;
auto mask_right = ypred == ytrain;
auto masked_weights = weights * mask_wrong.to(weights.dtype());
double epsilon_t = masked_weights.sum().item<double>();
if (epsilon_t > 0.5) {
// Inverse the weights policy (plot ln(wt))
// "In each round of AdaBoost, there is a sanity check to ensure that the current base
// learner is better than random guess" (Zhi-Hua Zhou, 2012)
terminate = true;
} else {
double wt = (1 - epsilon_t) / epsilon_t;
alpha_t = epsilon_t == 0 ? 1 : 0.5 * log(wt);
// Step 3.2: Update weights for next classifier
// Step 3.2.1: Update weights of wrong samples
weights += mask_wrong.to(weights.dtype()) * exp(alpha_t) * weights;
// Step 3.2.2: Update weights of right samples
weights += mask_right.to(weights.dtype()) * exp(-alpha_t) * weights;
// Step 3.3: Normalise the weights
double totalWeights = torch::sum(weights).item<double>();
weights = weights / totalWeights;
}
return { weights, alpha_t, terminate };
}
std::tuple<torch::Tensor&, double, bool> Boost::update_weights_block(int k, torch::Tensor& ytrain, torch::Tensor& weights)
{
/* Update Block algorithm
k = # of models in block
n_models = # of models in ensemble to make predictions
n_models_bak = # models saved
models = vector of models to make predictions
models_bak = models not used to make predictions
significances_bak = backup of significances vector
Case list
A) k = 1, n_models = 1 => n = 0 , n_models = n + k
B) k = 1, n_models = n + 1 => n_models = n + k
C) k > 1, n_models = k + 1 => n= 1, n_models = n + k
D) k > 1, n_models = k => n = 0, n_models = n + k
E) k > 1, n_models = k + n => n_models = n + k
A, D) n=0, k > 0, n_models == k
1. n_models_bak <- n_models
2. significances_bak <- significances
3. significances = vector(k, 1)
4. Dont move any classifiers out of models
5. n_models <- k
6. Make prediction, compute alpha, update weights
7. Dont restore any classifiers to models
8. significances <- significances_bak
9. Update last k significances
10. n_models <- n_models_bak
B, C, E) n > 0, k > 0, n_models == n + k
1. n_models_bak <- n_models
2. significances_bak <- significances
3. significances = vector(k, 1)
4. Move first n classifiers to models_bak
5. n_models <- k
6. Make prediction, compute alpha, update weights
7. Insert classifiers in models_bak to be the first n models
8. significances <- significances_bak
9. Update last k significances
10. n_models <- n_models_bak
*/
//
// Make predict with only the last k models
//
std::unique_ptr<Classifier> model;
std::vector<std::unique_ptr<Classifier>> models_bak;
// 1. n_models_bak <- n_models 2. significances_bak <- significances
auto significance_bak = significanceModels;
auto n_models_bak = n_models;
// 3. significances = vector(k, 1)
significanceModels = std::vector<double>(k, 1.0);
// 4. Move first n classifiers to models_bak
// backup the first n_models - k models (if n_models == k, don't backup any)
for (int i = 0; i < n_models - k; ++i) {
model = std::move(models[0]);
models.erase(models.begin());
models_bak.push_back(std::move(model));
}
assert(models.size() == k);
// 5. n_models <- k
n_models = k;
// 6. Make prediction, compute alpha, update weights
auto ypred = predict(X_train);
//
// Update weights
//
double alpha_t;
bool terminate;
std::tie(weights, alpha_t, terminate) = update_weights(y_train, ypred, weights);
//
// Restore the models if needed
//
// 7. Insert classifiers in models_bak to be the first n models
// if n_models_bak == k, don't restore any, because none of them were moved
if (k != n_models_bak) {
// Insert in the same order as they were extracted
int bak_size = models_bak.size();
for (int i = 0; i < bak_size; ++i) {
model = std::move(models_bak[bak_size - 1 - i]);
models_bak.erase(models_bak.end() - 1);
models.insert(models.begin(), std::move(model));
}
}
// 8. significances <- significances_bak
significanceModels = significance_bak;
//
// Update the significance of the last k models
//
// 9. Update last k significances
for (int i = 0; i < k; ++i) {
significanceModels[n_models_bak - k + i] = alpha_t;
}
// 10. n_models <- n_models_bak
n_models = n_models_bak;
return { weights, alpha_t, terminate };
}
}

View File

@ -1,52 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef BOOST_H
#define BOOST_H
#include <string>
#include <tuple>
#include <vector>
#include <nlohmann/json.hpp>
#include <torch/torch.h>
#include "Ensemble.h"
#include "bayesnet/feature_selection/FeatureSelect.h"
namespace bayesnet {
const struct {
std::string CFS = "CFS";
std::string FCBF = "FCBF";
std::string IWSS = "IWSS";
}SelectFeatures;
const struct {
std::string ASC = "asc";
std::string DESC = "desc";
std::string RAND = "rand";
}Orders;
class Boost : public Ensemble {
public:
explicit Boost(bool predict_voting = false);
virtual ~Boost() = default;
void setHyperparameters(const nlohmann::json& hyperparameters_) override;
protected:
std::vector<int> featureSelection(torch::Tensor& weights_);
void buildModel(const torch::Tensor& weights) override;
std::tuple<torch::Tensor&, double, bool> update_weights(torch::Tensor& ytrain, torch::Tensor& ypred, torch::Tensor& weights);
std::tuple<torch::Tensor&, double, bool> update_weights_block(int k, torch::Tensor& ytrain, torch::Tensor& weights);
torch::Tensor X_train, y_train, X_test, y_test;
// Hyperparameters
bool bisection = true; // if true, use bisection stratety to add k models at once to the ensemble
int maxTolerance = 3;
std::string order_algorithm; // order to process the KBest features asc, desc, rand
bool convergence = true; //if true, stop when the model does not improve
bool convergence_best = false; // wether to keep the best accuracy to the moment or the last accuracy as prior accuracy
bool selectFeatures = false; // if true, use feature selection
std::string select_features_algorithm = Orders.DESC; // Selected feature selection algorithm
FeatureSelect* featureSelector = nullptr;
double threshold = -1;
bool block_update = false;
};
}
#endif

View File

@ -1,170 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#include <set>
#include <functional>
#include <limits.h>
#include <tuple>
#include <folding.hpp>
#include "bayesnet/feature_selection/CFS.h"
#include "bayesnet/feature_selection/FCBF.h"
#include "bayesnet/feature_selection/IWSS.h"
#include "BoostA2DE.h"
namespace bayesnet {
BoostA2DE::BoostA2DE(bool predict_voting) : Boost(predict_voting)
{
}
std::vector<int> BoostA2DE::initializeModels(const Smoothing_t smoothing)
{
torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);
std::vector<int> featuresSelected = featureSelection(weights_);
if (featuresSelected.size() < 2) {
notes.push_back("No features selected in initialization");
status = ERROR;
return std::vector<int>();
}
for (int i = 0; i < featuresSelected.size() - 1; i++) {
for (int j = i + 1; j < featuresSelected.size(); j++) {
auto parents = { featuresSelected[i], featuresSelected[j] };
std::unique_ptr<Classifier> model = std::make_unique<SPnDE>(parents);
model->fit(dataset, features, className, states, weights_, smoothing);
models.push_back(std::move(model));
significanceModels.push_back(1.0); // They will be updated later in trainModel
n_models++;
}
}
notes.push_back("Used features in initialization: " + std::to_string(featuresSelected.size()) + " of " + std::to_string(features.size()) + " with " + select_features_algorithm);
return featuresSelected;
}
void BoostA2DE::trainModel(const torch::Tensor& weights, const Smoothing_t smoothing)
{
//
// Logging setup
//
// loguru::set_thread_name("BoostA2DE");
// loguru::g_stderr_verbosity = loguru::Verbosity_OFF;
// loguru::add_file("boostA2DE.log", loguru::Truncate, loguru::Verbosity_MAX);
// Algorithm based on the adaboost algorithm for classification
// as explained in Ensemble methods (Zhi-Hua Zhou, 2012)
fitted = true;
double alpha_t = 0;
torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);
bool finished = false;
std::vector<int> featuresUsed;
if (selectFeatures) {
featuresUsed = initializeModels(smoothing);
if (featuresUsed.size() == 0) {
return;
}
auto ypred = predict(X_train);
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
// Update significance of the models
for (int i = 0; i < n_models; ++i) {
significanceModels[i] = alpha_t;
}
if (finished) {
return;
}
}
int numItemsPack = 0; // The counter of the models inserted in the current pack
// Variables to control the accuracy finish condition
double priorAccuracy = 0.0;
double improvement = 1.0;
double convergence_threshold = 1e-4;
int tolerance = 0; // number of times the accuracy is lower than the convergence_threshold
// Step 0: Set the finish condition
// epsilon sub t > 0.5 => inverse the weights policy
// validation error is not decreasing
// run out of features
bool ascending = order_algorithm == Orders.ASC;
std::mt19937 g{ 173 };
std::vector<std::pair<int, int>> pairSelection;
while (!finished) {
// Step 1: Build ranking with mutual information
pairSelection = metrics.SelectKPairs(weights_, featuresUsed, ascending, 0); // Get all the pairs sorted
if (order_algorithm == Orders.RAND) {
std::shuffle(pairSelection.begin(), pairSelection.end(), g);
}
int k = bisection ? pow(2, tolerance) : 1;
int counter = 0; // The model counter of the current pack
// VLOG_SCOPE_F(1, "counter=%d k=%d featureSelection.size: %zu", counter, k, featureSelection.size());
while (counter++ < k && pairSelection.size() > 0) {
auto feature_pair = pairSelection[0];
pairSelection.erase(pairSelection.begin());
std::unique_ptr<Classifier> model;
model = std::make_unique<SPnDE>(std::vector<int>({ feature_pair.first, feature_pair.second }));
model->fit(dataset, features, className, states, weights_, smoothing);
alpha_t = 0.0;
if (!block_update) {
auto ypred = model->predict(X_train);
// Step 3.1: Compute the classifier amout of say
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
}
// Step 3.4: Store classifier and its accuracy to weigh its future vote
numItemsPack++;
models.push_back(std::move(model));
significanceModels.push_back(alpha_t);
n_models++;
// VLOG_SCOPE_F(2, "numItemsPack: %d n_models: %d featuresUsed: %zu", numItemsPack, n_models, featuresUsed.size());
}
if (block_update) {
std::tie(weights_, alpha_t, finished) = update_weights_block(k, y_train, weights_);
}
if (convergence && !finished) {
auto y_val_predict = predict(X_test);
double accuracy = (y_val_predict == y_test).sum().item<double>() / (double)y_test.size(0);
if (priorAccuracy == 0) {
priorAccuracy = accuracy;
} else {
improvement = accuracy - priorAccuracy;
}
if (improvement < convergence_threshold) {
// VLOG_SCOPE_F(3, " (improvement<threshold) tolerance: %d numItemsPack: %d improvement: %f prior: %f current: %f", tolerance, numItemsPack, improvement, priorAccuracy, accuracy);
tolerance++;
} else {
// VLOG_SCOPE_F(3, "* (improvement>=threshold) Reset. tolerance: %d numItemsPack: %d improvement: %f prior: %f current: %f", tolerance, numItemsPack, improvement, priorAccuracy, accuracy);
tolerance = 0; // Reset the counter if the model performs better
numItemsPack = 0;
}
if (convergence_best) {
// Keep the best accuracy until now as the prior accuracy
priorAccuracy = std::max(accuracy, priorAccuracy);
} else {
// Keep the last accuray obtained as the prior accuracy
priorAccuracy = accuracy;
}
}
// VLOG_SCOPE_F(1, "tolerance: %d featuresUsed.size: %zu features.size: %zu", tolerance, featuresUsed.size(), features.size());
finished = finished || tolerance > maxTolerance || pairSelection.size() == 0;
}
if (tolerance > maxTolerance) {
if (numItemsPack < n_models) {
notes.push_back("Convergence threshold reached & " + std::to_string(numItemsPack) + " models eliminated");
// VLOG_SCOPE_F(4, "Convergence threshold reached & %d models eliminated of %d", numItemsPack, n_models);
for (int i = 0; i < numItemsPack; ++i) {
significanceModels.pop_back();
models.pop_back();
n_models--;
}
} else {
notes.push_back("Convergence threshold reached & 0 models eliminated");
// VLOG_SCOPE_F(4, "Convergence threshold reached & 0 models eliminated n_models=%d numItemsPack=%d", n_models, numItemsPack);
}
}
if (pairSelection.size() > 0) {
notes.push_back("Pairs not used in train: " + std::to_string(pairSelection.size()));
status = WARNING;
}
notes.push_back("Number of models: " + std::to_string(n_models));
}
std::vector<std::string> BoostA2DE::graph(const std::string& title) const
{
return Ensemble::graph(title);
}
}

View File

@ -1,25 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef BOOSTA2DE_H
#define BOOSTA2DE_H
#include <string>
#include <vector>
#include "bayesnet/classifiers/SPnDE.h"
#include "Boost.h"
namespace bayesnet {
class BoostA2DE : public Boost {
public:
explicit BoostA2DE(bool predict_voting = false);
virtual ~BoostA2DE() = default;
std::vector<std::string> graph(const std::string& title = "BoostA2DE") const override;
protected:
void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
private:
std::vector<int> initializeModels(const Smoothing_t smoothing);
};
}
#endif

View File

@ -1,161 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#include <random>
#include <set>
#include <functional>
#include <limits.h>
#include <tuple>
#include "BoostAODE.h"
namespace bayesnet {
BoostAODE::BoostAODE(bool predict_voting) : Boost(predict_voting)
{
}
std::vector<int> BoostAODE::initializeModels(const Smoothing_t smoothing)
{
torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);
std::vector<int> featuresSelected = featureSelection(weights_);
for (const int& feature : featuresSelected) {
std::unique_ptr<Classifier> model = std::make_unique<SPODE>(feature);
model->fit(dataset, features, className, states, weights_, smoothing);
models.push_back(std::move(model));
significanceModels.push_back(1.0); // They will be updated later in trainModel
n_models++;
}
notes.push_back("Used features in initialization: " + std::to_string(featuresSelected.size()) + " of " + std::to_string(features.size()) + " with " + select_features_algorithm);
return featuresSelected;
}
void BoostAODE::trainModel(const torch::Tensor& weights, const Smoothing_t smoothing)
{
//
// Logging setup
//
// loguru::set_thread_name("BoostAODE");
// loguru::g_stderr_verbosity = loguru::Verbosity_OFF;
// loguru::add_file("boostAODE.log", loguru::Truncate, loguru::Verbosity_MAX);
// Algorithm based on the adaboost algorithm for classification
// as explained in Ensemble methods (Zhi-Hua Zhou, 2012)
fitted = true;
double alpha_t = 0;
torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);
bool finished = false;
std::vector<int> featuresUsed;
if (selectFeatures) {
featuresUsed = initializeModels(smoothing);
auto ypred = predict(X_train);
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
// Update significance of the models
for (int i = 0; i < n_models; ++i) {
significanceModels[i] = alpha_t;
}
if (finished) {
return;
}
}
int numItemsPack = 0; // The counter of the models inserted in the current pack
// Variables to control the accuracy finish condition
double priorAccuracy = 0.0;
double improvement = 1.0;
double convergence_threshold = 1e-4;
int tolerance = 0; // number of times the accuracy is lower than the convergence_threshold
// Step 0: Set the finish condition
// epsilon sub t > 0.5 => inverse the weights policy
// validation error is not decreasing
// run out of features
bool ascending = order_algorithm == Orders.ASC;
std::mt19937 g{ 173 };
while (!finished) {
// Step 1: Build ranking with mutual information
auto featureSelection = metrics.SelectKBestWeighted(weights_, ascending, n); // Get all the features sorted
if (order_algorithm == Orders.RAND) {
std::shuffle(featureSelection.begin(), featureSelection.end(), g);
}
// Remove used features
featureSelection.erase(remove_if(begin(featureSelection), end(featureSelection), [&](auto x)
{ return std::find(begin(featuresUsed), end(featuresUsed), x) != end(featuresUsed);}),
end(featureSelection)
);
int k = bisection ? pow(2, tolerance) : 1;
int counter = 0; // The model counter of the current pack
// VLOG_SCOPE_F(1, "counter=%d k=%d featureSelection.size: %zu", counter, k, featureSelection.size());
while (counter++ < k && featureSelection.size() > 0) {
auto feature = featureSelection[0];
featureSelection.erase(featureSelection.begin());
std::unique_ptr<Classifier> model;
model = std::make_unique<SPODE>(feature);
model->fit(dataset, features, className, states, weights_, smoothing);
alpha_t = 0.0;
if (!block_update) {
auto ypred = model->predict(X_train);
// Step 3.1: Compute the classifier amout of say
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
}
// Step 3.4: Store classifier and its accuracy to weigh its future vote
numItemsPack++;
featuresUsed.push_back(feature);
models.push_back(std::move(model));
significanceModels.push_back(alpha_t);
n_models++;
// VLOG_SCOPE_F(2, "numItemsPack: %d n_models: %d featuresUsed: %zu", numItemsPack, n_models, featuresUsed.size());
}
if (block_update) {
std::tie(weights_, alpha_t, finished) = update_weights_block(k, y_train, weights_);
}
if (convergence && !finished) {
auto y_val_predict = predict(X_test);
double accuracy = (y_val_predict == y_test).sum().item<double>() / (double)y_test.size(0);
if (priorAccuracy == 0) {
priorAccuracy = accuracy;
} else {
improvement = accuracy - priorAccuracy;
}
if (improvement < convergence_threshold) {
// VLOG_SCOPE_F(3, " (improvement<threshold) tolerance: %d numItemsPack: %d improvement: %f prior: %f current: %f", tolerance, numItemsPack, improvement, priorAccuracy, accuracy);
tolerance++;
} else {
// VLOG_SCOPE_F(3, "* (improvement>=threshold) Reset. tolerance: %d numItemsPack: %d improvement: %f prior: %f current: %f", tolerance, numItemsPack, improvement, priorAccuracy, accuracy);
tolerance = 0; // Reset the counter if the model performs better
numItemsPack = 0;
}
if (convergence_best) {
// Keep the best accuracy until now as the prior accuracy
priorAccuracy = std::max(accuracy, priorAccuracy);
} else {
// Keep the last accuray obtained as the prior accuracy
priorAccuracy = accuracy;
}
}
// VLOG_SCOPE_F(1, "tolerance: %d featuresUsed.size: %zu features.size: %zu", tolerance, featuresUsed.size(), features.size());
finished = finished || tolerance > maxTolerance || featuresUsed.size() == features.size();
}
if (tolerance > maxTolerance) {
if (numItemsPack < n_models) {
notes.push_back("Convergence threshold reached & " + std::to_string(numItemsPack) + " models eliminated");
// VLOG_SCOPE_F(4, "Convergence threshold reached & %d models eliminated of %d", numItemsPack, n_models);
for (int i = 0; i < numItemsPack; ++i) {
significanceModels.pop_back();
models.pop_back();
n_models--;
}
} else {
notes.push_back("Convergence threshold reached & 0 models eliminated");
// VLOG_SCOPE_F(4, "Convergence threshold reached & 0 models eliminated n_models=%d numItemsPack=%d", n_models, numItemsPack);
}
}
if (featuresUsed.size() != features.size()) {
notes.push_back("Used features in train: " + std::to_string(featuresUsed.size()) + " of " + std::to_string(features.size()));
status = WARNING;
}
notes.push_back("Number of models: " + std::to_string(n_models));
}
std::vector<std::string> BoostAODE::graph(const std::string& title) const
{
return Ensemble::graph(title);
}
}

View File

@ -1,26 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef BOOSTAODE_H
#define BOOSTAODE_H
#include <string>
#include <vector>
#include "bayesnet/classifiers/SPODE.h"
#include "Boost.h"
namespace bayesnet {
class BoostAODE : public Boost {
public:
explicit BoostAODE(bool predict_voting = false);
virtual ~BoostAODE() = default;
std::vector<std::string> graph(const std::string& title = "BoostAODE") const override;
protected:
void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
private:
std::vector<int> initializeModels(const Smoothing_t smoothing);
};
}
#endif

View File

@ -1,197 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#include "Ensemble.h"
#include "bayesnet/utils/CountingSemaphore.h"
namespace bayesnet {
Ensemble::Ensemble(bool predict_voting) : Classifier(Network()), n_models(0), predict_voting(predict_voting)
{
};
const std::string ENSEMBLE_NOT_FITTED = "Ensemble has not been fitted";
void Ensemble::trainModel(const torch::Tensor& weights, const Smoothing_t smoothing)
{
n_models = models.size();
for (auto i = 0; i < n_models; ++i) {
// fit with std::vectors
models[i]->fit(dataset, features, className, states, smoothing);
}
}
std::vector<int> Ensemble::compute_arg_max(std::vector<std::vector<double>>& X)
{
std::vector<int> y_pred;
for (auto i = 0; i < X.size(); ++i) {
auto max = std::max_element(X[i].begin(), X[i].end());
y_pred.push_back(std::distance(X[i].begin(), max));
}
return y_pred;
}
torch::Tensor Ensemble::compute_arg_max(torch::Tensor& X)
{
auto y_pred = torch::argmax(X, 1);
return y_pred;
}
torch::Tensor Ensemble::voting(torch::Tensor& votes)
{
// Convert m x n_models tensor to a m x n_class_states with voting probabilities
auto y_pred_ = votes.accessor<int, 2>();
std::vector<int> y_pred_final;
int numClasses = states.at(className).size();
// votes is m x n_models with the prediction of every model for each sample
auto result = torch::zeros({ votes.size(0), numClasses }, torch::kFloat32);
auto sum = std::reduce(significanceModels.begin(), significanceModels.end());
for (int i = 0; i < votes.size(0); ++i) {
// n_votes store in each index (value of class) the significance added by each model
// i.e. n_votes[0] contains how much value has the value 0 of class. That value is generated by the models predictions
std::vector<double> n_votes(numClasses, 0.0);
for (int j = 0; j < n_models; ++j) {
n_votes[y_pred_[i][j]] += significanceModels.at(j);
}
result[i] = torch::tensor(n_votes);
}
// To only do one division and gain precision
result /= sum;
return result;
}
std::vector<std::vector<double>> Ensemble::predict_proba(std::vector<std::vector<int>>& X)
{
if (!fitted) {
throw std::logic_error(ENSEMBLE_NOT_FITTED);
}
return predict_voting ? predict_average_voting(X) : predict_average_proba(X);
}
torch::Tensor Ensemble::predict_proba(torch::Tensor& X)
{
if (!fitted) {
throw std::logic_error(ENSEMBLE_NOT_FITTED);
}
return predict_voting ? predict_average_voting(X) : predict_average_proba(X);
}
std::vector<int> Ensemble::predict(std::vector<std::vector<int>>& X)
{
auto res = predict_proba(X);
return compute_arg_max(res);
}
torch::Tensor Ensemble::predict(torch::Tensor& X)
{
auto res = predict_proba(X);
return compute_arg_max(res);
}
torch::Tensor Ensemble::predict_average_proba(torch::Tensor& X)
{
auto n_states = models[0]->getClassNumStates();
torch::Tensor y_pred = torch::zeros({ X.size(1), n_states }, torch::kFloat32);
for (auto i = 0; i < n_models; ++i) {
auto ypredict = models[i]->predict_proba(X);
y_pred += ypredict * significanceModels[i];
}
auto sum = std::reduce(significanceModels.begin(), significanceModels.end());
y_pred /= sum;
return y_pred;
}
std::vector<std::vector<double>> Ensemble::predict_average_proba(std::vector<std::vector<int>>& X)
{
auto n_states = models[0]->getClassNumStates();
std::vector<std::vector<double>> y_pred(X[0].size(), std::vector<double>(n_states, 0.0));
for (auto i = 0; i < n_models; ++i) {
auto ypredict = models[i]->predict_proba(X);
assert(ypredict.size() == y_pred.size());
assert(ypredict[0].size() == y_pred[0].size());
// Multiply each prediction by the significance of the model and then add it to the final prediction
for (auto j = 0; j < ypredict.size(); ++j) {
std::transform(y_pred[j].begin(), y_pred[j].end(), ypredict[j].begin(), y_pred[j].begin(),
[significanceModels = significanceModels[i]](double x, double y) { return x + y * significanceModels; });
}
}
auto sum = std::reduce(significanceModels.begin(), significanceModels.end());
//Divide each element of the prediction by the sum of the significances
for (auto j = 0; j < y_pred.size(); ++j) {
std::transform(y_pred[j].begin(), y_pred[j].end(), y_pred[j].begin(), [sum](double x) { return x / sum; });
}
return y_pred;
}
std::vector<std::vector<double>> Ensemble::predict_average_voting(std::vector<std::vector<int>>& X)
{
torch::Tensor Xt = bayesnet::vectorToTensor(X, false);
auto y_pred = predict_average_voting(Xt);
std::vector<std::vector<double>> result = tensorToVectorDouble(y_pred);
return result;
}
torch::Tensor Ensemble::predict_average_voting(torch::Tensor& X)
{
// Build a m x n_models tensor with the predictions of each model
torch::Tensor y_pred = torch::zeros({ X.size(1), n_models }, torch::kInt32);
for (auto i = 0; i < n_models; ++i) {
auto ypredict = models[i]->predict(X);
y_pred.index_put_({ "...", i }, ypredict);
}
return voting(y_pred);
}
float Ensemble::score(torch::Tensor& X, torch::Tensor& y)
{
auto y_pred = predict(X);
int correct = 0;
for (int i = 0; i < y_pred.size(0); ++i) {
if (y_pred[i].item<int>() == y[i].item<int>()) {
correct++;
}
}
return (double)correct / y_pred.size(0);
}
float Ensemble::score(std::vector<std::vector<int>>& X, std::vector<int>& y)
{
auto y_pred = predict(X);
int correct = 0;
for (int i = 0; i < y_pred.size(); ++i) {
if (y_pred[i] == y[i]) {
correct++;
}
}
return (double)correct / y_pred.size();
}
std::vector<std::string> Ensemble::show() const
{
auto result = std::vector<std::string>();
for (auto i = 0; i < n_models; ++i) {
auto res = models[i]->show();
result.insert(result.end(), res.begin(), res.end());
}
return result;
}
std::vector<std::string> Ensemble::graph(const std::string& title) const
{
auto result = std::vector<std::string>();
for (auto i = 0; i < n_models; ++i) {
auto res = models[i]->graph(title + "_" + std::to_string(i));
result.insert(result.end(), res.begin(), res.end());
}
return result;
}
int Ensemble::getNumberOfNodes() const
{
int nodes = 0;
for (auto i = 0; i < n_models; ++i) {
nodes += models[i]->getNumberOfNodes();
}
return nodes;
}
int Ensemble::getNumberOfEdges() const
{
int edges = 0;
for (auto i = 0; i < n_models; ++i) {
edges += models[i]->getNumberOfEdges();
}
return edges;
}
int Ensemble::getNumberOfStates() const
{
int nstates = 0;
for (auto i = 0; i < n_models; ++i) {
nstates += models[i]->getNumberOfStates();
}
return nstates;
}
}

View File

@ -1,53 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef ENSEMBLE_H
#define ENSEMBLE_H
#include <torch/torch.h>
#include "bayesnet/utils/BayesMetrics.h"
#include "bayesnet/utils/bayesnetUtils.h"
#include "bayesnet/classifiers/Classifier.h"
namespace bayesnet {
class Ensemble : public Classifier {
public:
Ensemble(bool predict_voting = true);
virtual ~Ensemble() = default;
torch::Tensor predict(torch::Tensor& X) override;
std::vector<int> predict(std::vector<std::vector<int>>& X) override;
torch::Tensor predict_proba(torch::Tensor& X) override;
std::vector<std::vector<double>> predict_proba(std::vector<std::vector<int>>& X) override;
float score(torch::Tensor& X, torch::Tensor& y) override;
float score(std::vector<std::vector<int>>& X, std::vector<int>& y) override;
int getNumberOfNodes() const override;
int getNumberOfEdges() const override;
int getNumberOfStates() const override;
std::vector<std::string> show() const override;
std::vector<std::string> graph(const std::string& title) const override;
std::vector<std::string> topological_order() override
{
return std::vector<std::string>();
}
std::string dump_cpt() const override
{
return "";
}
protected:
torch::Tensor predict_average_voting(torch::Tensor& X);
std::vector<std::vector<double>> predict_average_voting(std::vector<std::vector<int>>& X);
torch::Tensor predict_average_proba(torch::Tensor& X);
std::vector<std::vector<double>> predict_average_proba(std::vector<std::vector<int>>& X);
torch::Tensor compute_arg_max(torch::Tensor& X);
std::vector<int> compute_arg_max(std::vector<std::vector<double>>& X);
torch::Tensor voting(torch::Tensor& votes);
unsigned n_models;
std::vector<std::unique_ptr<Classifier>> models;
std::vector<double> significanceModels;
void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
bool predict_voting;
};
}
#endif

View File

@ -1,26 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef CFS_H
#define CFS_H
#include <torch/torch.h>
#include <vector>
#include "bayesnet/feature_selection/FeatureSelect.h"
namespace bayesnet {
class CFS : public FeatureSelect {
public:
// dataset is a n+1xm tensor of integers where dataset[-1] is the y std::vector
CFS(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights) :
FeatureSelect(samples, features, className, maxFeatures, classNumStates, weights)
{
}
virtual ~CFS() {};
void fit() override;
private:
bool computeContinueCondition(const std::vector<int>& featureOrder);
};
}
#endif

View File

@ -1,23 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef FCBF_H
#define FCBF_H
#include <torch/torch.h>
#include <vector>
#include "bayesnet/feature_selection/FeatureSelect.h"
namespace bayesnet {
class FCBF : public FeatureSelect {
public:
// dataset is a n+1xm tensor of integers where dataset[-1] is the y std::vector
FCBF(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights, const double threshold);
virtual ~FCBF() {};
void fit() override;
private:
double threshold = -1;
};
}
#endif

View File

@ -1,36 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef FEATURE_SELECT_H
#define FEATURE_SELECT_H
#include <torch/torch.h>
#include <vector>
#include "bayesnet/utils/BayesMetrics.h"
namespace bayesnet {
class FeatureSelect : public Metrics {
public:
// dataset is a n+1xm tensor of integers where dataset[-1] is the y std::vector
FeatureSelect(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights);
virtual ~FeatureSelect() {};
virtual void fit() = 0;
std::vector<int> getFeatures() const;
std::vector<double> getScores() const;
protected:
void initialize();
void computeSuLabels();
double computeSuFeatures(const int a, const int b);
double symmetricalUncertainty(int a, int b);
double computeMeritCFS();
const torch::Tensor& weights;
int maxFeatures;
std::vector<int> selectedFeatures;
std::vector<double> selectedScores;
std::vector<double> suLabels;
std::map<std::pair<int, int>, double> suFeatures;
bool fitted = false;
};
}
#endif

View File

@ -1,23 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef IWSS_H
#define IWSS_H
#include <vector>
#include <torch/torch.h>
#include "FeatureSelect.h"
namespace bayesnet {
class IWSS : public FeatureSelect {
public:
// dataset is a n+1xm tensor of integers where dataset[-1] is the y std::vector
IWSS(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights, const double threshold);
virtual ~IWSS() {};
void fit() override;
private:
double threshold = -1;
};
}
#endif

View File

@ -1,506 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#include <thread>
#include <sstream>
#include <numeric>
#include <algorithm>
#include "Network.h"
#include "bayesnet/utils/bayesnetUtils.h"
#include "bayesnet/utils/CountingSemaphore.h"
#include <pthread.h>
#include <fstream>
namespace bayesnet {
Network::Network() : fitted{ false }, classNumStates{ 0 }
{
}
Network::Network(const Network& other) : features(other.features), className(other.className), classNumStates(other.getClassNumStates()),
fitted(other.fitted), samples(other.samples)
{
if (samples.defined())
samples = samples.clone();
for (const auto& node : other.nodes) {
nodes[node.first] = std::make_unique<Node>(*node.second);
}
}
void Network::initialize()
{
features.clear();
className = "";
classNumStates = 0;
fitted = false;
nodes.clear();
samples = torch::Tensor();
}
torch::Tensor& Network::getSamples()
{
return samples;
}
void Network::addNode(const std::string& name)
{
if (fitted) {
throw std::invalid_argument("Cannot add node to a fitted network. Initialize first.");
}
if (name == "") {
throw std::invalid_argument("Node name cannot be empty");
}
if (nodes.find(name) != nodes.end()) {
return;
}
if (find(features.begin(), features.end(), name) == features.end()) {
features.push_back(name);
}
nodes[name] = std::make_unique<Node>(name);
}
std::vector<std::string> Network::getFeatures() const
{
return features;
}
int Network::getClassNumStates() const
{
return classNumStates;
}
int Network::getStates() const
{
int result = 0;
for (auto& node : nodes) {
result += node.second->getNumStates();
}
return result;
}
std::string Network::getClassName() const
{
return className;
}
bool Network::isCyclic(const std::string& nodeId, std::unordered_set<std::string>& visited, std::unordered_set<std::string>& recStack)
{
if (visited.find(nodeId) == visited.end()) // if node hasn't been visited yet
{
visited.insert(nodeId);
recStack.insert(nodeId);
for (Node* child : nodes[nodeId]->getChildren()) {
if (visited.find(child->getName()) == visited.end() && isCyclic(child->getName(), visited, recStack))
return true;
if (recStack.find(child->getName()) != recStack.end())
return true;
}
}
recStack.erase(nodeId); // remove node from recursion stack before function ends
return false;
}
void Network::addEdge(const std::string& parent, const std::string& child)
{
if (fitted) {
throw std::invalid_argument("Cannot add edge to a fitted network. Initialize first.");
}
if (nodes.find(parent) == nodes.end()) {
throw std::invalid_argument("Parent node " + parent + " does not exist");
}
if (nodes.find(child) == nodes.end()) {
throw std::invalid_argument("Child node " + child + " does not exist");
}
// Check if the edge is already in the graph
for (auto& node : nodes[parent]->getChildren()) {
if (node->getName() == child) {
throw std::invalid_argument("Edge " + parent + " -> " + child + " already exists");
}
}
// Temporarily add edge to check for cycles
nodes[parent]->addChild(nodes[child].get());
nodes[child]->addParent(nodes[parent].get());
std::unordered_set<std::string> visited;
std::unordered_set<std::string> recStack;
if (isCyclic(nodes[child]->getName(), visited, recStack)) // if adding this edge forms a cycle
{
// remove problematic edge
nodes[parent]->removeChild(nodes[child].get());
nodes[child]->removeParent(nodes[parent].get());
throw std::invalid_argument("Adding this edge forms a cycle in the graph.");
}
}
std::map<std::string, std::unique_ptr<Node>>& Network::getNodes()
{
return nodes;
}
void Network::checkFitData(int n_samples, int n_features, int n_samples_y, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights)
{
if (weights.size(0) != n_samples) {
throw std::invalid_argument("Weights (" + std::to_string(weights.size(0)) + ") must have the same number of elements as samples (" + std::to_string(n_samples) + ") in Network::fit");
}
if (n_samples != n_samples_y) {
throw std::invalid_argument("X and y must have the same number of samples in Network::fit (" + std::to_string(n_samples) + " != " + std::to_string(n_samples_y) + ")");
}
if (n_features != featureNames.size()) {
throw std::invalid_argument("X and features must have the same number of features in Network::fit (" + std::to_string(n_features) + " != " + std::to_string(featureNames.size()) + ")");
}
if (features.size() == 0) {
throw std::invalid_argument("The network has not been initialized. You must call addNode() before calling fit()");
}
if (n_features != features.size() - 1) {
throw std::invalid_argument("X and local features must have the same number of features in Network::fit (" + std::to_string(n_features) + " != " + std::to_string(features.size() - 1) + ")");
}
if (find(features.begin(), features.end(), className) == features.end()) {
throw std::invalid_argument("Class Name not found in Network::features");
}
for (auto& feature : featureNames) {
if (find(features.begin(), features.end(), feature) == features.end()) {
throw std::invalid_argument("Feature " + feature + " not found in Network::features");
}
if (states.find(feature) == states.end()) {
throw std::invalid_argument("Feature " + feature + " not found in states");
}
}
}
void Network::setStates(const std::map<std::string, std::vector<int>>& states)
{
// Set states to every Node in the network
for_each(features.begin(), features.end(), [this, &states](const std::string& feature) {
nodes.at(feature)->setNumStates(states.at(feature).size());
});
classNumStates = nodes.at(className)->getNumStates();
}
// X comes in nxm, where n is the number of features and m the number of samples
void Network::fit(const torch::Tensor& X, const torch::Tensor& y, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing)
{
checkFitData(X.size(1), X.size(0), y.size(0), featureNames, className, states, weights);
this->className = className;
torch::Tensor ytmp = torch::transpose(y.view({ y.size(0), 1 }), 0, 1);
samples = torch::cat({ X , ytmp }, 0);
for (int i = 0; i < featureNames.size(); ++i) {
auto row_feature = X.index({ i, "..." });
}
completeFit(states, weights, smoothing);
}
void Network::fit(const torch::Tensor& samples, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing)
{
checkFitData(samples.size(1), samples.size(0) - 1, samples.size(1), featureNames, className, states, weights);
this->className = className;
this->samples = samples;
completeFit(states, weights, smoothing);
}
// input_data comes in nxm, where n is the number of features and m the number of samples
void Network::fit(const std::vector<std::vector<int>>& input_data, const std::vector<int>& labels, const std::vector<double>& weights_, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing)
{
const torch::Tensor weights = torch::tensor(weights_, torch::kFloat64);
checkFitData(input_data[0].size(), input_data.size(), labels.size(), featureNames, className, states, weights);
this->className = className;
// Build tensor of samples (nxm) (n+1 because of the class)
samples = torch::zeros({ static_cast<int>(input_data.size() + 1), static_cast<int>(input_data[0].size()) }, torch::kInt32);
for (int i = 0; i < featureNames.size(); ++i) {
samples.index_put_({ i, "..." }, torch::tensor(input_data[i], torch::kInt32));
}
samples.index_put_({ -1, "..." }, torch::tensor(labels, torch::kInt32));
completeFit(states, weights, smoothing);
}
void Network::completeFit(const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing)
{
setStates(states);
std::vector<std::thread> threads;
auto& semaphore = CountingSemaphore::getInstance();
const double n_samples = static_cast<double>(samples.size(1));
auto worker = [&](std::pair<const std::string, std::unique_ptr<Node>>& node, int i) {
std::string threadName = "FitWorker-" + std::to_string(i);
#if defined(__linux__)
pthread_setname_np(pthread_self(), threadName.c_str());
#else
pthread_setname_np(threadName.c_str());
#endif
double numStates = static_cast<double>(node.second->getNumStates());
double smoothing_factor;
switch (smoothing) {
case Smoothing_t::ORIGINAL:
smoothing_factor = 1.0 / n_samples;
break;
case Smoothing_t::LAPLACE:
smoothing_factor = 1.0;
break;
case Smoothing_t::CESTNIK:
smoothing_factor = 1 / numStates;
break;
default:
smoothing_factor = 0.0; // No smoothing
}
node.second->computeCPT(samples, features, smoothing_factor, weights);
semaphore.release();
};
int i = 0;
for (auto& node : nodes) {
semaphore.acquire();
threads.emplace_back(worker, std::ref(node), i++);
}
for (auto& thread : threads) {
thread.join();
}
fitted = true;
}
torch::Tensor Network::predict_tensor(const torch::Tensor& samples, const bool proba)
{
if (!fitted) {
throw std::logic_error("You must call fit() before calling predict()");
}
// Ensure the sample size is equal to the number of features
if (samples.size(0) != features.size() - 1) {
throw std::invalid_argument("(T) Sample size (" + std::to_string(samples.size(0)) +
") does not match the number of features (" + std::to_string(features.size() - 1) + ")");
}
torch::Tensor result;
std::vector<std::thread> threads;
std::mutex mtx;
auto& semaphore = CountingSemaphore::getInstance();
result = torch::zeros({ samples.size(1), classNumStates }, torch::kFloat64);
auto worker = [&](const torch::Tensor& sample, int i) {
std::string threadName = "PredictWorker-" + std::to_string(i);
#if defined(__linux__)
pthread_setname_np(pthread_self(), threadName.c_str());
#else
pthread_setname_np(threadName.c_str());
#endif
auto psample = predict_sample(sample);
auto temp = torch::tensor(psample, torch::kFloat64);
{
std::lock_guard<std::mutex> lock(mtx);
result.index_put_({ i, "..." }, temp);
}
semaphore.release();
};
for (int i = 0; i < samples.size(1); ++i) {
semaphore.acquire();
const torch::Tensor sample = samples.index({ "...", i });
threads.emplace_back(worker, sample, i);
}
for (auto& thread : threads) {
thread.join();
}
if (proba)
return result;
return result.argmax(1);
}
// Return mxn tensor of probabilities
torch::Tensor Network::predict_proba(const torch::Tensor& samples)
{
return predict_tensor(samples, true);
}
// Return mxn tensor of probabilities
torch::Tensor Network::predict(const torch::Tensor& samples)
{
return predict_tensor(samples, false);
}
// Return mx1 std::vector of predictions
// tsamples is nxm std::vector of samples
std::vector<int> Network::predict(const std::vector<std::vector<int>>& tsamples)
{
if (!fitted) {
throw std::logic_error("You must call fit() before calling predict()");
}
// Ensure the sample size is equal to the number of features
if (tsamples.size() != features.size() - 1) {
throw std::invalid_argument("(V) Sample size (" + std::to_string(tsamples.size()) +
") does not match the number of features (" + std::to_string(features.size() - 1) + ")");
}
std::vector<int> predictions(tsamples[0].size(), 0);
std::vector<int> sample;
std::vector<std::thread> threads;
auto& semaphore = CountingSemaphore::getInstance();
auto worker = [&](const std::vector<int>& sample, const int row, int& prediction) {
std::string threadName = "(V)PWorker-" + std::to_string(row);
#if defined(__linux__)
pthread_setname_np(pthread_self(), threadName.c_str());
#else
pthread_setname_np(threadName.c_str());
#endif
auto classProbabilities = predict_sample(sample);
auto maxElem = max_element(classProbabilities.begin(), classProbabilities.end());
int predictedClass = distance(classProbabilities.begin(), maxElem);
prediction = predictedClass;
semaphore.release();
};
for (int row = 0; row < tsamples[0].size(); ++row) {
sample.clear();
for (int col = 0; col < tsamples.size(); ++col) {
sample.push_back(tsamples[col][row]);
}
semaphore.acquire();
threads.emplace_back(worker, sample, row, std::ref(predictions[row]));
}
for (auto& thread : threads) {
thread.join();
}
return predictions;
}
// Return mxn std::vector of probabilities
// tsamples is nxm std::vector of samples
std::vector<std::vector<double>> Network::predict_proba(const std::vector<std::vector<int>>& tsamples)
{
if (!fitted) {
throw std::logic_error("You must call fit() before calling predict_proba()");
}
// Ensure the sample size is equal to the number of features
if (tsamples.size() != features.size() - 1) {
throw std::invalid_argument("(V) Sample size (" + std::to_string(tsamples.size()) +
") does not match the number of features (" + std::to_string(features.size() - 1) + ")");
}
std::vector<std::vector<double>> predictions(tsamples[0].size(), std::vector<double>(classNumStates, 0.0));
std::vector<int> sample;
std::vector<std::thread> threads;
auto& semaphore = CountingSemaphore::getInstance();
auto worker = [&](const std::vector<int>& sample, int row, std::vector<double>& predictions) {
std::string threadName = "(V)PWorker-" + std::to_string(row);
#if defined(__linux__)
pthread_setname_np(pthread_self(), threadName.c_str());
#else
pthread_setname_np(threadName.c_str());
#endif
std::vector<double> classProbabilities = predict_sample(sample);
predictions = classProbabilities;
semaphore.release();
};
for (int row = 0; row < tsamples[0].size(); ++row) {
sample.clear();
for (int col = 0; col < tsamples.size(); ++col) {
sample.push_back(tsamples[col][row]);
}
semaphore.acquire();
threads.emplace_back(worker, sample, row, std::ref(predictions[row]));
}
for (auto& thread : threads) {
thread.join();
}
return predictions;
}
double Network::score(const std::vector<std::vector<int>>& tsamples, const std::vector<int>& labels)
{
std::vector<int> y_pred = predict(tsamples);
int correct = 0;
for (int i = 0; i < y_pred.size(); ++i) {
if (y_pred[i] == labels[i]) {
correct++;
}
}
return (double)correct / y_pred.size();
}
// Return 1xn std::vector of probabilities
std::vector<double> Network::predict_sample(const std::vector<int>& sample)
{
std::map<std::string, int> evidence;
for (int i = 0; i < sample.size(); ++i) {
evidence[features[i]] = sample[i];
}
return exactInference(evidence);
}
// Return 1xn std::vector of probabilities
std::vector<double> Network::predict_sample(const torch::Tensor& sample)
{
std::map<std::string, int> evidence;
for (int i = 0; i < sample.size(0); ++i) {
evidence[features[i]] = sample[i].item<int>();
}
return exactInference(evidence);
}
std::vector<double> Network::exactInference(std::map<std::string, int>& evidence)
{
std::vector<double> result(classNumStates, 0.0);
auto completeEvidence = std::map<std::string, int>(evidence);
for (int i = 0; i < classNumStates; ++i) {
completeEvidence[getClassName()] = i;
double partial = 1.0;
for (auto& node : getNodes()) {
partial *= node.second->getFactorValue(completeEvidence);
}
result[i] = partial;
}
// Normalize result
double sum = std::accumulate(result.begin(), result.end(), 0.0);
transform(result.begin(), result.end(), result.begin(), [sum](const double& value) { return value / sum; });
return result;
}
std::vector<std::string> Network::show() const
{
std::vector<std::string> result;
// Draw the network
for (auto& node : nodes) {
std::string line = node.first + " -> ";
for (auto child : node.second->getChildren()) {
line += child->getName() + ", ";
}
result.push_back(line);
}
return result;
}
std::vector<std::string> Network::graph(const std::string& title) const
{
auto output = std::vector<std::string>();
auto prefix = "digraph BayesNet {\nlabel=<BayesNet ";
auto suffix = ">\nfontsize=30\nfontcolor=blue\nlabelloc=t\nlayout=circo\n";
std::string header = prefix + title + suffix;
output.push_back(header);
for (auto& node : nodes) {
auto result = node.second->graph(className);
output.insert(output.end(), result.begin(), result.end());
}
output.push_back("}\n");
return output;
}
std::vector<std::pair<std::string, std::string>> Network::getEdges() const
{
auto edges = std::vector<std::pair<std::string, std::string>>();
for (const auto& node : nodes) {
auto head = node.first;
for (const auto& child : node.second->getChildren()) {
auto tail = child->getName();
edges.push_back({ head, tail });
}
}
return edges;
}
int Network::getNumEdges() const
{
return getEdges().size();
}
std::vector<std::string> Network::topological_sort()
{
/* Check if al the fathers of every node are before the node */
auto result = features;
result.erase(remove(result.begin(), result.end(), className), result.end());
bool ending{ false };
while (!ending) {
ending = true;
for (auto feature : features) {
auto fathers = nodes[feature]->getParents();
for (const auto& father : fathers) {
auto fatherName = father->getName();
if (fatherName == className) {
continue;
}
// Check if father is placed before the actual feature
auto it = find(result.begin(), result.end(), fatherName);
if (it != result.end()) {
auto it2 = find(result.begin(), result.end(), feature);
if (it2 != result.end()) {
if (distance(it, it2) < 0) {
// if it is not, insert it before the feature
result.erase(remove(result.begin(), result.end(), fatherName), result.end());
result.insert(it2, fatherName);
ending = false;
}
}
}
}
}
}
return result;
}
std::string Network::dump_cpt() const
{
std::stringstream oss;
for (auto& node : nodes) {
oss << "* " << node.first << ": (" << node.second->getNumStates() << ") : " << node.second->getCPT().sizes() << std::endl;
oss << node.second->getCPT() << std::endl;
}
return oss.str();
}
}

View File

@ -1,70 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef NETWORK_H
#define NETWORK_H
#include <map>
#include <vector>
#include "bayesnet/config.h"
#include "Node.h"
namespace bayesnet {
enum class Smoothing_t {
NONE = -1,
ORIGINAL = 0,
LAPLACE,
CESTNIK
};
class Network {
public:
Network();
explicit Network(const Network&);
~Network() = default;
torch::Tensor& getSamples();
void addNode(const std::string&);
void addEdge(const std::string&, const std::string&);
std::map<std::string, std::unique_ptr<Node>>& getNodes();
std::vector<std::string> getFeatures() const;
int getStates() const;
std::vector<std::pair<std::string, std::string>> getEdges() const;
int getNumEdges() const;
int getClassNumStates() const;
std::string getClassName() const;
/*
Notice: Nodes have to be inserted in the same order as they are in the dataset, i.e., first node is first column and so on.
*/
void fit(const std::vector<std::vector<int>>& input_data, const std::vector<int>& labels, const std::vector<double>& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing);
void fit(const torch::Tensor& X, const torch::Tensor& y, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing);
void fit(const torch::Tensor& samples, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing);
std::vector<int> predict(const std::vector<std::vector<int>>&); // Return mx1 std::vector of predictions
torch::Tensor predict(const torch::Tensor&); // Return mx1 tensor of predictions
torch::Tensor predict_tensor(const torch::Tensor& samples, const bool proba);
std::vector<std::vector<double>> predict_proba(const std::vector<std::vector<int>>&); // Return mxn std::vector of probabilities
torch::Tensor predict_proba(const torch::Tensor&); // Return mxn tensor of probabilities
double score(const std::vector<std::vector<int>>&, const std::vector<int>&);
std::vector<std::string> topological_sort();
std::vector<std::string> show() const;
std::vector<std::string> graph(const std::string& title) const; // Returns a std::vector of std::strings representing the graph in graphviz format
void initialize();
std::string dump_cpt() const;
inline std::string version() { return { project_version.begin(), project_version.end() }; }
private:
std::map<std::string, std::unique_ptr<Node>> nodes;
bool fitted;
int classNumStates;
std::vector<std::string> features; // Including classname
std::string className;
torch::Tensor samples; // n+1xm tensor used to fit the model
bool isCyclic(const std::string&, std::unordered_set<std::string>&, std::unordered_set<std::string>&);
std::vector<double> predict_sample(const std::vector<int>&);
std::vector<double> predict_sample(const torch::Tensor&);
std::vector<double> exactInference(std::map<std::string, int>&);
void completeFit(const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing);
void checkFitData(int n_samples, int n_features, int n_samples_y, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights);
void setStates(const std::map<std::string, std::vector<int>>&);
};
}
#endif

View File

@ -1,42 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef NODE_H
#define NODE_H
#include <unordered_set>
#include <vector>
#include <string>
#include <torch/torch.h>
namespace bayesnet {
class Node {
public:
explicit Node(const std::string&);
void clear();
void addParent(Node*);
void addChild(Node*);
void removeParent(Node*);
void removeChild(Node*);
std::string getName() const;
std::vector<Node*>& getParents();
std::vector<Node*>& getChildren();
torch::Tensor& getCPT();
void computeCPT(const torch::Tensor& dataset, const std::vector<std::string>& features, const double smoothing, const torch::Tensor& weights);
int getNumStates() const;
void setNumStates(int);
unsigned minFill();
std::vector<std::string> graph(const std::string& clasName); // Returns a std::vector of std::strings representing the graph in graphviz format
double getFactorValue(std::map<std::string, int>&);
private:
std::string name;
std::vector<Node*> parents;
std::vector<Node*> children;
int numStates = 0; // number of states of the variable
torch::Tensor cpTable; // Order of indices is 0-> node variable, 1-> 1st parent, 2-> 2nd parent, ...
std::vector<int64_t> dimensions; // dimensions of the cpTable
std::vector<std::pair<std::string, std::string>> combinations(const std::vector<std::string>&);
};
}
#endif

View File

@ -1,260 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#include <map>
#include <unordered_map>
#include <tuple>
#include "Mst.h"
#include "BayesMetrics.h"
namespace bayesnet {
//samples is n+1xm tensor used to fit the model
Metrics::Metrics(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int classNumStates)
: samples(samples)
, className(className)
, features(features)
, classNumStates(classNumStates)
{
}
//samples is n+1xm std::vector used to fit the model
Metrics::Metrics(const std::vector<std::vector<int>>& vsamples, const std::vector<int>& labels, const std::vector<std::string>& features, const std::string& className, const int classNumStates)
: samples(torch::zeros({ static_cast<int>(vsamples.size() + 1), static_cast<int>(vsamples[0].size()) }, torch::kInt32))
, className(className)
, features(features)
, classNumStates(classNumStates)
{
for (int i = 0; i < vsamples.size(); ++i) {
samples.index_put_({ i, "..." }, torch::tensor(vsamples[i], torch::kInt32));
}
samples.index_put_({ -1, "..." }, torch::tensor(labels, torch::kInt32));
}
std::vector<std::pair<int, int>> Metrics::SelectKPairs(const torch::Tensor& weights, std::vector<int>& featuresExcluded, bool ascending, unsigned k)
{
// Return the K Best features
auto n = features.size();
// compute scores
scoresKPairs.clear();
pairsKBest.clear();
auto labels = samples.index({ -1, "..." });
for (int i = 0; i < n - 1; ++i) {
if (std::find(featuresExcluded.begin(), featuresExcluded.end(), i) != featuresExcluded.end()) {
continue;
}
for (int j = i + 1; j < n; ++j) {
if (std::find(featuresExcluded.begin(), featuresExcluded.end(), j) != featuresExcluded.end()) {
continue;
}
auto key = std::make_pair(i, j);
auto value = conditionalMutualInformation(samples.index({ i, "..." }), samples.index({ j, "..." }), labels, weights);
scoresKPairs.push_back({ key, value });
}
}
// sort scores
if (ascending) {
sort(scoresKPairs.begin(), scoresKPairs.end(), [](auto& a, auto& b)
{ return a.second < b.second; });
} else {
sort(scoresKPairs.begin(), scoresKPairs.end(), [](auto& a, auto& b)
{ return a.second > b.second; });
}
for (auto& [pairs, score] : scoresKPairs) {
pairsKBest.push_back(pairs);
}
if (k != 0 && k < pairsKBest.size()) {
if (ascending) {
int limit = pairsKBest.size() - k;
for (int i = 0; i < limit; i++) {
pairsKBest.erase(pairsKBest.begin());
scoresKPairs.erase(scoresKPairs.begin());
}
} else {
pairsKBest.resize(k);
scoresKPairs.resize(k);
}
}
return pairsKBest;
}
std::vector<int> Metrics::SelectKBestWeighted(const torch::Tensor& weights, bool ascending, unsigned k)
{
// Return the K Best features
auto n = features.size();
if (k == 0) {
k = n;
}
// compute scores
scoresKBest.clear();
featuresKBest.clear();
auto label = samples.index({ -1, "..." });
for (int i = 0; i < n; ++i) {
scoresKBest.push_back(mutualInformation(label, samples.index({ i, "..." }), weights));
featuresKBest.push_back(i);
}
// sort & reduce scores and features
if (ascending) {
sort(featuresKBest.begin(), featuresKBest.end(), [&](int i, int j)
{ return scoresKBest[i] < scoresKBest[j]; });
sort(scoresKBest.begin(), scoresKBest.end(), std::less<double>());
if (k < n) {
for (int i = 0; i < n - k; ++i) {
featuresKBest.erase(featuresKBest.begin());
scoresKBest.erase(scoresKBest.begin());
}
}
} else {
sort(featuresKBest.begin(), featuresKBest.end(), [&](int i, int j)
{ return scoresKBest[i] > scoresKBest[j]; });
sort(scoresKBest.begin(), scoresKBest.end(), std::greater<double>());
featuresKBest.resize(k);
scoresKBest.resize(k);
}
return featuresKBest;
}
std::vector<double> Metrics::getScoresKBest() const
{
return scoresKBest;
}
std::vector<std::pair<std::pair<int, int>, double>> Metrics::getScoresKPairs() const
{
return scoresKPairs;
}
torch::Tensor Metrics::conditionalEdge(const torch::Tensor& weights)
{
auto result = std::vector<double>();
auto source = std::vector<std::string>(features);
source.push_back(className);
auto combinations = doCombinations(source);
// Compute class prior
auto margin = torch::zeros({ classNumStates }, torch::kFloat);
for (int value = 0; value < classNumStates; ++value) {
auto mask = samples.index({ -1, "..." }) == value;
margin[value] = mask.sum().item<double>() / samples.size(1);
}
for (auto [first, second] : combinations) {
int index_first = find(features.begin(), features.end(), first) - features.begin();
int index_second = find(features.begin(), features.end(), second) - features.begin();
double accumulated = 0;
for (int value = 0; value < classNumStates; ++value) {
auto mask = samples.index({ -1, "..." }) == value;
auto first_dataset = samples.index({ index_first, mask });
auto second_dataset = samples.index({ index_second, mask });
auto weights_dataset = weights.index({ mask });
auto mi = mutualInformation(first_dataset, second_dataset, weights_dataset);
auto pb = margin[value].item<double>();
accumulated += pb * mi;
}
result.push_back(accumulated);
}
long n_vars = source.size();
auto matrix = torch::zeros({ n_vars, n_vars });
auto indices = torch::triu_indices(n_vars, n_vars, 1);
for (auto i = 0; i < result.size(); ++i) {
auto x = indices[0][i];
auto y = indices[1][i];
matrix[x][y] = result[i];
matrix[y][x] = result[i];
}
return matrix;
}
// Measured in nats (natural logarithm (log) base e)
// Elements of Information Theory, 2nd Edition, Thomas M. Cover, Joy A. Thomas p. 14
double Metrics::entropy(const torch::Tensor& feature, const torch::Tensor& weights)
{
torch::Tensor counts = feature.bincount(weights);
double totalWeight = counts.sum().item<double>();
torch::Tensor probs = counts.to(torch::kFloat) / totalWeight;
torch::Tensor logProbs = torch::log(probs);
torch::Tensor entropy = -probs * logProbs;
return entropy.nansum().item<double>();
}
// H(Y|X) = sum_{x in X} p(x) H(Y|X=x)
double Metrics::conditionalEntropy(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights)
{
int numSamples = firstFeature.sizes()[0];
torch::Tensor featureCounts = secondFeature.bincount(weights);
std::unordered_map<int, std::unordered_map<int, double>> jointCounts;
double totalWeight = 0;
for (auto i = 0; i < numSamples; i++) {
jointCounts[secondFeature[i].item<int>()][firstFeature[i].item<int>()] += weights[i].item<double>();
totalWeight += weights[i].item<float>();
}
if (totalWeight == 0)
return 0;
double entropyValue = 0;
for (int value = 0; value < featureCounts.sizes()[0]; ++value) {
double p_f = featureCounts[value].item<double>() / totalWeight;
double entropy_f = 0;
for (auto& [label, jointCount] : jointCounts[value]) {
double p_l_f = jointCount / featureCounts[value].item<double>();
if (p_l_f > 0) {
entropy_f -= p_l_f * log(p_l_f);
} else {
entropy_f = 0;
}
}
entropyValue += p_f * entropy_f;
}
return entropyValue;
}
// H(X|Y,C) = sum_{y in Y, c in C} p(x,c) H(X|Y=y,C=c)
double Metrics::conditionalEntropy(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& labels, const torch::Tensor& weights)
{
// Ensure the tensors are of the same length
assert(firstFeature.size(0) == secondFeature.size(0) && firstFeature.size(0) == labels.size(0) && firstFeature.size(0) == weights.size(0));
// Convert tensors to vectors for easier processing
auto firstFeatureData = firstFeature.accessor<int, 1>();
auto secondFeatureData = secondFeature.accessor<int, 1>();
auto labelsData = labels.accessor<int, 1>();
auto weightsData = weights.accessor<double, 1>();
int numSamples = firstFeature.size(0);
// Maps for joint and marginal probabilities
std::map<std::tuple<int, int, int>, double> jointCount;
std::map<std::tuple<int, int>, double> marginalCount;
// Compute joint and marginal counts
for (int i = 0; i < numSamples; ++i) {
auto keyJoint = std::make_tuple(firstFeatureData[i], labelsData[i], secondFeatureData[i]);
auto keyMarginal = std::make_tuple(firstFeatureData[i], labelsData[i]);
jointCount[keyJoint] += weightsData[i];
marginalCount[keyMarginal] += weightsData[i];
}
// Total weight sum
double totalWeight = torch::sum(weights).item<double>();
if (totalWeight == 0)
return 0;
// Compute the conditional entropy
double conditionalEntropy = 0.0;
for (const auto& [keyJoint, jointFreq] : jointCount) {
auto [x, c, y] = keyJoint;
auto keyMarginal = std::make_tuple(x, c);
//double p_xc = marginalCount[keyMarginal] / totalWeight;
double p_y_given_xc = jointFreq / marginalCount[keyMarginal];
if (p_y_given_xc > 0) {
conditionalEntropy -= (jointFreq / totalWeight) * std::log(p_y_given_xc);
}
}
return conditionalEntropy;
}
// I(X;Y) = H(Y) - H(Y|X) ; I(X;Y) >= 0
double Metrics::mutualInformation(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights)
{
return std::max(entropy(firstFeature, weights) - conditionalEntropy(firstFeature, secondFeature, weights), 0.0);
}
// I(X;Y|C) = H(X|C) - H(X|Y,C) >= 0
double Metrics::conditionalMutualInformation(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& labels, const torch::Tensor& weights)
{
return std::max(conditionalEntropy(firstFeature, labels, weights) - conditionalEntropy(firstFeature, secondFeature, labels, weights), 0.0);
}
/*
Compute the maximum spanning tree considering the weights as distances
and the indices of the weights as nodes of this square matrix using
Kruskal algorithm
*/
std::vector<std::pair<int, int>> Metrics::maximumSpanningTree(const std::vector<std::string>& features, const torch::Tensor& weights, const int root)
{
auto mst = MST(features, weights, root);
return mst.maximumSpanningTree();
}
}

View File

@ -1,62 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef BAYESNET_METRICS_H
#define BAYESNET_METRICS_H
#include <vector>
#include <string>
#include <torch/torch.h>
namespace bayesnet {
class Metrics {
public:
Metrics() = default;
Metrics(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int classNumStates);
Metrics(const std::vector<std::vector<int>>& vsamples, const std::vector<int>& labels, const std::vector<std::string>& features, const std::string& className, const int classNumStates);
std::vector<int> SelectKBestWeighted(const torch::Tensor& weights, bool ascending = false, unsigned k = 0);
std::vector<std::pair<int, int>> SelectKPairs(const torch::Tensor& weights, std::vector<int>& featuresExcluded, bool ascending = false, unsigned k = 0);
std::vector<double> getScoresKBest() const;
std::vector<std::pair<std::pair<int, int>, double>> getScoresKPairs() const;
double mutualInformation(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights);
double conditionalMutualInformation(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& labels, const torch::Tensor& weights);
torch::Tensor conditionalEdge(const torch::Tensor& weights);
std::vector<std::pair<int, int>> maximumSpanningTree(const std::vector<std::string>& features, const torch::Tensor& weights, const int root);
// Measured in nats (natural logarithm (log) base e)
// Elements of Information Theory, 2nd Edition, Thomas M. Cover, Joy A. Thomas p. 14
double entropy(const torch::Tensor& feature, const torch::Tensor& weights);
double conditionalEntropy(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& labels, const torch::Tensor& weights);
protected:
torch::Tensor samples; // n+1xm torch::Tensor used to fit the model where samples[-1] is the y std::vector
std::string className;
std::vector<std::string> features;
template <class T>
std::vector<std::pair<T, T>> doCombinations(const std::vector<T>& source)
{
std::vector<std::pair<T, T>> result;
for (int i = 0; i < source.size() - 1; ++i) {
T temp = source[i];
for (int j = i + 1; j < source.size(); ++j) {
result.push_back({ temp, source[j] });
}
}
return result;
}
template <class T>
T pop_first(std::vector<T>& v)
{
T temp = v[0];
v.erase(v.begin());
return temp;
}
private:
int classNumStates = 0;
std::vector<double> scoresKBest;
std::vector<int> featuresKBest; // sorted indices of the features
std::vector<std::pair<int, int>> pairsKBest; // sorted indices of the pairs
std::vector<std::pair<std::pair<int, int>, double>> scoresKPairs;
double conditionalEntropy(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights);
};
}
#endif

View File

@ -1,46 +0,0 @@
#ifndef COUNTING_SEMAPHORE_H
#define COUNTING_SEMAPHORE_H
#include <mutex>
#include <condition_variable>
#include <algorithm>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <thread>
class CountingSemaphore {
public:
static CountingSemaphore& getInstance()
{
static CountingSemaphore instance;
return instance;
}
// Delete copy constructor and assignment operator
CountingSemaphore(const CountingSemaphore&) = delete;
CountingSemaphore& operator=(const CountingSemaphore&) = delete;
void acquire()
{
std::unique_lock<std::mutex> lock(mtx_);
cv_.wait(lock, [this]() { return count_ > 0; });
--count_;
}
void release()
{
std::lock_guard<std::mutex> lock(mtx_);
++count_;
if (count_ <= max_count_) {
cv_.notify_one();
}
}
private:
CountingSemaphore()
: max_count_(std::max(1u, static_cast<uint>(0.95 * std::thread::hardware_concurrency()))),
count_(max_count_)
{
}
std::mutex mtx_;
std::condition_variable cv_;
const uint max_count_;
uint count_;
};
#endif

View File

@ -1,40 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef MST_H
#define MST_H
#include <vector>
#include <string>
#include <torch/torch.h>
namespace bayesnet {
class MST {
public:
MST() = default;
MST(const std::vector<std::string>& features, const torch::Tensor& weights, const int root);
void insertElement(std::list<int>& variables, int variable);
std::vector<std::pair<int, int>> reorder(std::vector<std::pair<float, std::pair<int, int>>> T, int root_original);
std::vector<std::pair<int, int>> maximumSpanningTree();
private:
torch::Tensor weights;
std::vector<std::string> features;
int root = 0;
};
class Graph {
public:
explicit Graph(int V);
void addEdge(int u, int v, float wt);
int find_set(int i);
void union_set(int u, int v);
void kruskal_algorithm();
std::vector <std::pair<float, std::pair<int, int>>> get_mst() { return T; }
private:
int V; // number of nodes in graph
std::vector <std::pair<float, std::pair<int, int>>> G; // std::vector for graph
std::vector <std::pair<float, std::pair<int, int>>> T; // std::vector for mst
std::vector<int> parent;
};
}
#endif

View File

@ -1,44 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#include "bayesnetUtils.h"
namespace bayesnet {
// Return the indices in descending order
std::vector<int> argsort(std::vector<double>& nums)
{
int n = nums.size();
std::vector<int> indices(n);
iota(indices.begin(), indices.end(), 0);
sort(indices.begin(), indices.end(), [&nums](int i, int j) {return nums[i] > nums[j];});
return indices;
}
std::vector<std::vector<double>> tensorToVectorDouble(torch::Tensor& dtensor)
{
// convert mxn tensor to mxn std::vector
std::vector<std::vector<double>> result;
// Iterate over cols
for (int i = 0; i < dtensor.size(0); ++i) {
auto col_tensor = dtensor.index({ i, "..." });
auto col = std::vector<double>(col_tensor.data_ptr<float>(), col_tensor.data_ptr<float>() + dtensor.size(1));
result.push_back(col);
}
return result;
}
torch::Tensor vectorToTensor(std::vector<std::vector<int>>& vector, bool transpose)
{
// convert nxm std::vector to mxn tensor if transpose
long int m = transpose ? vector[0].size() : vector.size();
long int n = transpose ? vector.size() : vector[0].size();
auto tensor = torch::zeros({ m, n }, torch::kInt32);
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
tensor[i][j] = transpose ? vector[j][i] : vector[i][j];
}
}
return tensor;
}
}

View File

@ -1,16 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef BAYESNET_UTILS_H
#define BAYESNET_UTILS_H
#include <vector>
#include <torch/torch.h>
namespace bayesnet {
std::vector<int> argsort(std::vector<double>& nums);
std::vector<std::vector<double>> tensorToVectorDouble(torch::Tensor& dtensor);
torch::Tensor vectorToTensor(std::vector<std::vector<int>>& vector, bool transpose = true);
}
#endif //BAYESNET_UTILS_H

View File

@ -137,7 +137,7 @@
include(CMakeParseArguments)
option(CODE_COVERAGE_VERBOSE "Verbose information" TRUE)
option(CODE_COVERAGE_VERBOSE "Verbose information" FALSE)
# Check prereqs
find_program( GCOV_PATH gcov )
@ -160,11 +160,7 @@ foreach(LANG ${LANGUAGES})
endif()
elseif(NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "GNU"
AND NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(LLVM)?[Ff]lang")
if ("${LANG}" MATCHES "CUDA")
message(STATUS "Ignoring CUDA")
else()
message(FATAL_ERROR "Compiler is not GNU or Flang! Aborting...")
endif()
message(FATAL_ERROR "Compiler is not GNU or Flang! Aborting...")
endif()
endforeach()

View File

@ -1,4 +1,4 @@
configure_file(
"config.h.in"
"${CMAKE_BINARY_DIR}/configured_files/include/bayesnet/config.h" ESCAPE_QUOTES
"${CMAKE_BINARY_DIR}/configured_files/include/config.h" ESCAPE_QUOTES
)

View File

@ -7,8 +7,7 @@
#define PROJECT_VERSION_MINOR @PROJECT_VERSION_MINOR @
#define PROJECT_VERSION_PATCH @PROJECT_VERSION_PATCH @
static constexpr std::string_view project_name = "@PROJECT_NAME@";
static constexpr std::string_view project_name = " @PROJECT_NAME@ ";
static constexpr std::string_view project_version = "@PROJECT_VERSION@";
static constexpr std::string_view project_description = "@PROJECT_DESCRIPTION@";
static constexpr std::string_view git_sha = "@GIT_SHA@";
static constexpr std::string_view data_path = "@BayesNet_SOURCE_DIR@/tests/data/";

25
data/glass.net Normal file
View File

@ -0,0 +1,25 @@
Type Si
Type Fe
Type RI
Type Na
Type Ba
Type Ca
Type Al
Type K
Type Mg
Fe RI
Fe Ba
Fe Ca
RI Na
RI Ba
RI Ca
RI Al
RI K
RI Mg
Ba Ca
Ba Al
Ca Al
Ca K
Ca Mg
Al K
K Mg

645
data/mfeat-factors-kdb2.net Normal file
View File

@ -0,0 +1,645 @@
class att215
class att25
class att131
class att95
class att122
class att17
class att28
class att5
class att121
class att214
class att197
class att116
class att182
class att60
class att168
class att178
class att206
class att89
class att77
class att209
class att73
class att126
class att16
class att74
class att27
class att61
class att20
class att101
class att85
class att76
class att137
class att211
class att143
class att14
class att40
class att210
class att155
class att170
class att160
class att23
class att162
class att203
class att164
class att107
class att62
class att42
class att71
class att128
class att138
class att83
class att171
class att92
class att163
class att49
class att161
class att158
class att176
class att11
class att145
class att4
class att172
class att196
class att58
class att68
class att169
class att80
class att32
class att175
class att87
class att88
class att159
class att18
class att52
class att98
class att136
class att150
class att156
class att110
class att100
class att63
class att148
class att90
class att167
class att35
class att205
class att51
class att21
class att142
class att46
class att134
class att39
class att102
class att208
class att130
class att149
class att96
class att75
class att118
class att78
class att213
class att112
class att38
class att174
class att189
class att70
class att179
class att59
class att79
class att15
class att47
class att124
class att34
class att54
class att191
class att86
class att56
class att151
class att66
class att173
class att44
class att198
class att139
class att216
class att129
class att152
class att69
class att81
class att50
class att153
class att41
class att204
class att188
class att26
class att13
class att117
class att114
class att10
class att64
class att200
class att9
class att3
class att119
class att45
class att104
class att140
class att30
class att183
class att146
class att141
class att202
class att194
class att24
class att147
class att8
class att212
class att123
class att166
class att187
class att127
class att190
class att105
class att106
class att184
class att82
class att2
class att135
class att154
class att111
class att115
class att99
class att22
class att84
class att207
class att94
class att177
class att103
class att93
class att201
class att43
class att36
class att12
class att125
class att165
class att180
class att195
class att157
class att48
class att6
class att113
class att193
class att91
class att72
class att31
class att132
class att33
class att57
class att144
class att192
class att185
class att37
class att53
class att120
class att186
class att199
class att65
class att108
class att133
class att29
class att19
class att7
class att97
class att67
class att55
class att1
class att109
class att181
att215 att25
att215 att131
att215 att95
att25 att131
att25 att121
att25 att73
att25 att61
att25 att85
att25 att169
att25 att13
att131 att95
att131 att122
att131 att17
att131 att28
att131 att121
att131 att214
att131 att116
att131 att126
att131 att143
att95 att122
att95 att17
att95 att28
att95 att5
att95 att214
att95 att116
att95 att60
att95 att143
att95 att155
att95 att71
att122 att182
att122 att170
att17 att5
att17 att197
att17 att89
att17 att77
att17 att161
att28 att206
att28 att16
att28 att76
att28 att172
att28 att124
att28 att64
att5 att197
att5 att89
att5 att209
att121 att73
att214 att178
att214 att58
att214 att142
att197 att209
att197 att101
att116 att182
att116 att60
att116 att168
att116 att178
att116 att206
att116 att126
att116 att16
att116 att27
att116 att20
att116 att211
att116 att164
att116 att128
att182 att27
att182 att14
att60 att168
att60 att156
att168 att156
att168 att96
att178 att20
att178 att58
att178 att142
att178 att130
att206 att74
att206 att170
att206 att158
att89 att77
att89 att137
att89 att149
att89 att173
att77 att137
att77 att161
att209 att101
att209 att41
att73 att61
att73 att157
att126 att162
att126 att138
att126 att150
att16 att74
att16 att76
att16 att40
att16 att4
att74 att14
att74 att62
att27 att171
att61 att85
att61 att169
att20 att211
att20 att210
att20 att164
att20 att176
att101 att41
att85 att13
att76 att40
att76 att160
att137 att149
att211 att210
att211 att162
att211 att171
att211 att163
att211 att175
att211 att79
att143 att155
att143 att23
att143 att71
att143 att83
att143 att11
att14 att98
att40 att160
att40 att4
att40 att196
att40 att52
att210 att42
att210 att114
att155 att23
att155 att203
att155 att107
att155 att11
att170 att158
att160 att52
att23 att203
att162 att138
att162 att18
att162 att150
att162 att90
att162 att174
att203 att107
att203 att49
att203 att59
att203 att191
att203 att119
att164 att62
att164 att42
att164 att128
att164 att92
att164 att163
att164 att176
att164 att145
att164 att68
att164 att80
att164 att98
att164 att110
att164 att205
att164 att21
att164 att213
att164 att112
att164 att38
att164 att56
att164 att44
att107 att59
att107 att47
att107 att191
att71 att83
att71 att167
att71 att35
att128 att92
att138 att18
att83 att167
att171 att87
att171 att159
att171 att63
att171 att51
att171 att39
att171 att75
att163 att49
att163 att175
att163 att87
att163 att79
att163 att151
att163 att139
att163 att187
att163 att91
att161 att173
att176 att145
att176 att172
att176 att68
att176 att80
att176 att32
att176 att110
att176 att205
att176 att21
att176 att134
att176 att56
att4 att196
att4 att88
att4 att136
att4 att100
att4 att148
att4 att208
att172 att112
att172 att184
att196 att88
att196 att136
att196 att100
att196 att208
att58 att46
att68 att32
att32 att200
att87 att159
att87 att63
att87 att75
att87 att15
att87 att99
att159 att195
att18 att90
att18 att102
att18 att78
att18 att198
att52 att124
att98 att86
att150 att174
att150 att66
att156 att96
att156 att216
att156 att204
att156 att24
att156 att84
att100 att148
att63 att51
att63 att3
att63 att183
att90 att102
att90 att78
att167 att35
att167 att179
att35 att179
att51 att39
att51 att3
att21 att134
att21 att213
att21 att38
att21 att189
att21 att129
att21 att81
att21 att117
att21 att9
att142 att46
att142 att130
att142 att118
att142 att10
att142 att202
att142 att190
att142 att106
att46 att70
att46 att34
att46 att166
att134 att2
att102 att54
att130 att118
att130 att10
att130 att202
att149 att125
att96 att216
att96 att24
att75 att15
att75 att99
att118 att70
att78 att198
att213 att189
att38 att50
att38 att26
att174 att54
att174 att66
att174 att30
att189 att86
att189 att129
att189 att69
att189 att81
att189 att153
att189 att117
att189 att9
att189 att45
att189 att105
att70 att34
att59 att47
att79 att151
att79 att139
att79 att187
att79 att127
att79 att103
att79 att43
att79 att91
att79 att19
att124 att64
att54 att114
att54 att30
att191 att119
att86 att194
att56 att44
att56 att152
att56 att50
att56 att188
att56 att26
att56 att104
att56 att140
att56 att146
att56 att194
att56 att8
att56 att2
att56 att133
att56 att1
att173 att125
att173 att113
att44 att152
att44 att188
att44 att200
att44 att212
att44 att1
att139 att103
att139 att43
att139 att31
att139 att199
att139 att7
att216 att204
att216 att36
att216 att12
att216 att180
att216 att108
att129 att69
att152 att140
att69 att153
att81 att45
att153 att141
att41 att53
att204 att12
att13 att157
att114 att6
att114 att186
att10 att190
att64 att184
att200 att104
att9 att146
att9 att141
att9 att177
att9 att37
att9 att133
att9 att109
att9 att181
att3 att183
att3 att147
att3 att123
att3 att135
att3 att111
att45 att105
att45 att177
att45 att93
att45 att201
att45 att193
att45 att37
att45 att97
att140 att8
att30 att6
att183 att147
att183 att123
att202 att166
att202 att106
att202 att82
att24 att84
att24 att36
att147 att135
att8 att212
att166 att82
att187 att127
att187 att115
att127 att115
att105 att93
att106 att154
att82 att154
att82 att22
att135 att111
att135 att207
att154 att22
att154 att94
att111 att207
att22 att94
att84 att48
att177 att165
att103 att195
att103 att109
att93 att201
att93 att165
att93 att193
att93 att33
att201 att33
att201 att57
att36 att180
att36 att72
att36 att132
att36 att144
att125 att113
att125 att185
att125 att65
att125 att29
att180 att48
att180 att72
att180 att192
att180 att108
att6 att186
att113 att185
att113 att53
att193 att97
att91 att31
att91 att19
att72 att132
att72 att192
att31 att199
att31 att67
att132 att144
att132 att120
att33 att57
att144 att120
att185 att65
att199 att7
att199 att67
att199 att55
att65 att29
att67 att55
att109 att181

859
data/mfeat-factors-kdb3.net Normal file
View File

@ -0,0 +1,859 @@
class att215
class att25
class att131
class att95
class att122
class att17
class att28
class att5
class att121
class att214
class att197
class att116
class att182
class att60
class att168
class att178
class att206
class att89
class att77
class att209
class att73
class att126
class att16
class att74
class att27
class att61
class att20
class att101
class att85
class att76
class att137
class att211
class att143
class att14
class att40
class att210
class att155
class att170
class att160
class att23
class att162
class att203
class att164
class att107
class att62
class att42
class att71
class att128
class att138
class att83
class att171
class att92
class att163
class att49
class att161
class att158
class att176
class att11
class att145
class att4
class att172
class att196
class att58
class att68
class att169
class att80
class att32
class att175
class att87
class att88
class att159
class att18
class att52
class att98
class att136
class att150
class att156
class att110
class att100
class att63
class att148
class att90
class att167
class att35
class att205
class att51
class att21
class att142
class att46
class att134
class att39
class att102
class att208
class att130
class att149
class att96
class att75
class att118
class att78
class att213
class att112
class att38
class att174
class att189
class att70
class att179
class att59
class att79
class att15
class att47
class att124
class att34
class att54
class att191
class att86
class att56
class att151
class att66
class att173
class att44
class att198
class att139
class att216
class att129
class att152
class att69
class att81
class att50
class att153
class att41
class att204
class att188
class att26
class att13
class att117
class att114
class att10
class att64
class att200
class att9
class att3
class att119
class att45
class att104
class att140
class att30
class att183
class att146
class att141
class att202
class att194
class att24
class att147
class att8
class att212
class att123
class att166
class att187
class att127
class att190
class att105
class att106
class att184
class att82
class att2
class att135
class att154
class att111
class att115
class att99
class att22
class att84
class att207
class att94
class att177
class att103
class att93
class att201
class att43
class att36
class att12
class att125
class att165
class att180
class att195
class att157
class att48
class att6
class att113
class att193
class att91
class att72
class att31
class att132
class att33
class att57
class att144
class att192
class att185
class att37
class att53
class att120
class att186
class att199
class att65
class att108
class att133
class att29
class att19
class att7
class att97
class att67
class att55
class att1
class att109
class att181
att215 att25
att215 att131
att215 att95
att215 att17
att215 att214
att215 att143
att25 att131
att25 att95
att25 att122
att25 att121
att25 att73
att25 att61
att25 att85
att25 att169
att25 att13
att25 att157
att131 att95
att131 att122
att131 att17
att131 att28
att131 att5
att131 att121
att131 att214
att131 att116
att131 att182
att131 att60
att131 att126
att131 att16
att131 att27
att131 att20
att131 att143
att131 att155
att95 att122
att95 att17
att95 att28
att95 att5
att95 att121
att95 att214
att95 att197
att95 att116
att95 att60
att95 att168
att95 att178
att95 att143
att95 att155
att95 att23
att95 att71
att95 att167
att122 att28
att122 att182
att122 att170
att17 att5
att17 att197
att17 att89
att17 att77
att17 att209
att17 att137
att17 att161
att17 att41
att28 att206
att28 att16
att28 att76
att28 att40
att28 att210
att28 att160
att28 att172
att28 att124
att28 att64
att5 att197
att5 att89
att5 att77
att5 att209
att5 att101
att121 att73
att121 att61
att214 att116
att214 att178
att214 att206
att214 att58
att214 att142
att214 att46
att197 att89
att197 att209
att197 att101
att116 att182
att116 att60
att116 att168
att116 att178
att116 att206
att116 att73
att116 att126
att116 att16
att116 att74
att116 att27
att116 att20
att116 att211
att116 att164
att116 att128
att116 att92
att116 att176
att116 att68
att182 att27
att182 att14
att60 att168
att60 att156
att60 att96
att168 att126
att168 att156
att168 att96
att168 att216
att178 att20
att178 att211
att178 att58
att178 att142
att178 att130
att178 att166
att206 att74
att206 att170
att206 att158
att89 att77
att89 att137
att89 att149
att89 att173
att77 att137
att77 att161
att77 att149
att209 att101
att209 att41
att73 att61
att73 att85
att73 att13
att73 att157
att126 att162
att126 att138
att126 att18
att126 att150
att16 att74
att16 att76
att16 att40
att16 att4
att16 att196
att16 att136
att74 att14
att74 att62
att27 att171
att27 att63
att61 att85
att61 att169
att20 att76
att20 att211
att20 att210
att20 att170
att20 att164
att20 att128
att20 att176
att20 att80
att101 att41
att85 att169
att85 att13
att76 att14
att76 att40
att76 att160
att76 att4
att76 att52
att137 att161
att137 att149
att137 att173
att137 att125
att211 att210
att211 att162
att211 att164
att211 att62
att211 att42
att211 att171
att211 att163
att211 att175
att211 att79
att211 att151
att211 att43
att143 att155
att143 att23
att143 att203
att143 att71
att143 att83
att143 att11
att14 att98
att40 att160
att40 att4
att40 att196
att40 att88
att40 att52
att210 att162
att210 att42
att210 att114
att155 att23
att155 att203
att155 att107
att155 att11
att170 att158
att160 att52
att160 att124
att23 att203
att23 att107
att23 att71
att23 att11
att162 att138
att162 att18
att162 att150
att162 att90
att162 att102
att162 att174
att162 att66
att203 att107
att203 att49
att203 att59
att203 att47
att203 att191
att203 att119
att164 att62
att164 att42
att164 att128
att164 att171
att164 att92
att164 att163
att164 att158
att164 att176
att164 att145
att164 att172
att164 att58
att164 att68
att164 att80
att164 att32
att164 att98
att164 att156
att164 att110
att164 att205
att164 att21
att164 att134
att164 att213
att164 att112
att164 att38
att164 att189
att164 att56
att164 att44
att164 att152
att164 att8
att107 att83
att107 att49
att107 att59
att107 att47
att107 att191
att42 att138
att42 att54
att42 att114
att71 att83
att71 att167
att71 att35
att71 att179
att128 att92
att128 att112
att138 att18
att138 att150
att83 att167
att83 att35
att171 att87
att171 att159
att171 att63
att171 att51
att171 att39
att171 att75
att92 att163
att92 att145
att92 att56
att163 att49
att163 att175
att163 att87
att163 att79
att163 att151
att163 att139
att163 att187
att163 att127
att163 att103
att163 att91
att49 att37
att161 att173
att161 att113
att176 att145
att176 att172
att176 att68
att176 att80
att176 att32
att176 att175
att176 att98
att176 att110
att176 att205
att176 att21
att176 att134
att176 att213
att176 att56
att4 att196
att4 att88
att4 att136
att4 att100
att4 att148
att4 att208
att172 att112
att172 att184
att196 att88
att196 att136
att196 att100
att196 att148
att196 att208
att58 att142
att58 att46
att58 att34
att68 att32
att80 att38
att32 att110
att32 att21
att32 att44
att32 att200
att175 att87
att175 att159
att175 att79
att175 att187
att175 att115
att87 att159
att87 att63
att87 att51
att87 att75
att87 att15
att87 att99
att159 att75
att159 att15
att159 att195
att18 att90
att18 att102
att18 att78
att18 att198
att52 att124
att52 att64
att98 att86
att136 att100
att136 att208
att150 att90
att150 att174
att150 att66
att156 att205
att156 att96
att156 att216
att156 att204
att156 att24
att156 att84
att156 att36
att156 att12
att156 att108
att100 att148
att63 att51
att63 att39
att63 att3
att63 att183
att63 att147
att90 att102
att90 att78
att167 att35
att167 att179
att35 att179
att51 att39
att51 att3
att51 att183
att21 att134
att21 att213
att21 att38
att21 att189
att21 att129
att21 att81
att21 att153
att21 att117
att21 att9
att142 att46
att142 att130
att142 att118
att142 att70
att142 att10
att142 att202
att142 att190
att142 att106
att46 att130
att46 att118
att46 att70
att46 att34
att46 att166
att46 att82
att134 att2
att39 att3
att102 att78
att102 att174
att102 att54
att102 att198
att130 att118
att130 att10
att130 att202
att130 att190
att130 att106
att149 att125
att96 att216
att96 att204
att96 att24
att75 att15
att75 att99
att118 att70
att118 att10
att118 att202
att78 att198
att213 att189
att213 att129
att213 att69
att213 att81
att38 att50
att38 att26
att174 att54
att174 att66
att174 att30
att189 att86
att189 att129
att189 att69
att189 att81
att189 att153
att189 att117
att189 att9
att189 att45
att189 att141
att189 att105
att70 att34
att70 att154
att179 att59
att59 att47
att59 att191
att59 att119
att79 att86
att79 att151
att79 att139
att79 att187
att79 att127
att79 att103
att79 att43
att79 att193
att79 att91
att79 att19
att124 att64
att54 att114
att54 att30
att54 att6
att191 att119
att86 att194
att56 att44
att56 att152
att56 att50
att56 att188
att56 att26
att56 att200
att56 att104
att56 att140
att56 att146
att56 att194
att56 att8
att56 att2
att56 att133
att56 att1
att151 att139
att66 att30
att173 att125
att173 att113
att173 att185
att44 att152
att44 att50
att44 att188
att44 att200
att44 att104
att44 att140
att44 att194
att44 att212
att44 att1
att139 att26
att139 att99
att139 att103
att139 att43
att139 att91
att139 att31
att139 att199
att139 att7
att216 att204
att216 att24
att216 att84
att216 att36
att216 att12
att216 att180
att216 att108
att129 att69
att152 att188
att152 att140
att69 att153
att69 att9
att69 att177
att81 att45
att81 att105
att153 att117
att153 att141
att41 att53
att204 att12
att204 att180
att188 att146
att188 att212
att13 att157
att114 att6
att114 att186
att10 att190
att64 att184
att200 att104
att9 att45
att9 att146
att9 att141
att9 att177
att9 att37
att9 att133
att9 att109
att9 att181
att3 att183
att3 att147
att3 att123
att3 att135
att3 att111
att45 att105
att45 att177
att45 att93
att45 att201
att45 att165
att45 att193
att45 att33
att45 att37
att45 att133
att45 att97
att140 att8
att30 att6
att30 att186
att183 att147
att183 att123
att183 att135
att146 att2
att202 att166
att202 att106
att202 att82
att24 att84
att24 att36
att24 att132
att147 att123
att147 att135
att147 att111
att147 att207
att8 att212
att166 att82
att166 att22
att166 att94
att187 att127
att187 att115
att127 att115
att105 att184
att105 att93
att105 att201
att106 att154
att82 att154
att82 att22
att135 att111
att135 att207
att154 att22
att154 att94
att111 att207
att99 att195
att22 att94
att84 att48
att177 att93
att177 att165
att177 att181
att103 att195
att103 att97
att103 att109
att93 att201
att93 att165
att93 att193
att93 att33
att93 att57
att201 att33
att201 att57
att43 att31
att36 att180
att36 att48
att36 att72
att36 att132
att36 att144
att125 att113
att125 att185
att125 att65
att125 att29
att180 att48
att180 att72
att180 att192
att180 att108
att48 att72
att6 att186
att113 att185
att113 att53
att113 att65
att193 att97
att91 att31
att91 att199
att91 att19
att72 att132
att72 att144
att72 att192
att72 att120
att31 att199
att31 att7
att31 att67
att31 att55
att31 att1
att132 att144
att132 att120
att33 att57
att144 att192
att144 att120
att185 att53
att185 att65
att185 att29
att199 att19
att199 att7
att199 att67
att199 att55
att199 att109
att65 att29
att7 att67
att67 att55
att109 att181

859
data/mfeat-factors.net Normal file
View File

@ -0,0 +1,859 @@
class att215
class att25
class att131
class att95
class att122
class att17
class att28
class att5
class att121
class att214
class att197
class att116
class att182
class att60
class att168
class att178
class att206
class att89
class att77
class att209
class att73
class att126
class att16
class att74
class att27
class att61
class att20
class att101
class att85
class att76
class att137
class att211
class att143
class att14
class att40
class att210
class att155
class att170
class att160
class att23
class att162
class att203
class att164
class att107
class att62
class att42
class att71
class att128
class att138
class att83
class att171
class att92
class att163
class att49
class att161
class att158
class att176
class att11
class att145
class att4
class att172
class att196
class att58
class att68
class att169
class att80
class att32
class att175
class att87
class att88
class att159
class att18
class att52
class att98
class att136
class att150
class att156
class att110
class att100
class att63
class att148
class att90
class att167
class att35
class att205
class att51
class att21
class att142
class att46
class att134
class att39
class att102
class att208
class att130
class att149
class att96
class att75
class att118
class att78
class att213
class att112
class att38
class att174
class att189
class att70
class att179
class att59
class att79
class att15
class att47
class att124
class att34
class att54
class att191
class att86
class att56
class att151
class att66
class att173
class att44
class att198
class att139
class att216
class att129
class att152
class att69
class att81
class att50
class att153
class att41
class att204
class att188
class att26
class att13
class att117
class att114
class att10
class att64
class att200
class att9
class att3
class att119
class att45
class att104
class att140
class att30
class att183
class att146
class att141
class att202
class att194
class att24
class att147
class att8
class att212
class att123
class att166
class att187
class att127
class att190
class att105
class att106
class att184
class att82
class att2
class att135
class att154
class att111
class att115
class att99
class att22
class att84
class att207
class att94
class att177
class att103
class att93
class att201
class att43
class att36
class att12
class att125
class att165
class att180
class att195
class att157
class att48
class att6
class att113
class att193
class att91
class att72
class att31
class att132
class att33
class att57
class att144
class att192
class att185
class att37
class att53
class att120
class att186
class att199
class att65
class att108
class att133
class att29
class att19
class att7
class att97
class att67
class att55
class att1
class att109
class att181
att215 att25
att215 att131
att215 att95
att215 att17
att215 att214
att215 att143
att25 att131
att25 att95
att25 att122
att25 att121
att25 att73
att25 att61
att25 att85
att25 att169
att25 att13
att25 att157
att131 att95
att131 att122
att131 att17
att131 att28
att131 att5
att131 att121
att131 att214
att131 att116
att131 att182
att131 att60
att131 att126
att131 att16
att131 att27
att131 att20
att131 att143
att131 att155
att95 att122
att95 att17
att95 att28
att95 att5
att95 att121
att95 att214
att95 att197
att95 att116
att95 att60
att95 att168
att95 att178
att95 att143
att95 att155
att95 att23
att95 att71
att95 att167
att122 att28
att122 att182
att122 att170
att17 att5
att17 att197
att17 att89
att17 att77
att17 att209
att17 att137
att17 att161
att17 att41
att28 att206
att28 att16
att28 att76
att28 att40
att28 att210
att28 att160
att28 att172
att28 att124
att28 att64
att5 att197
att5 att89
att5 att77
att5 att209
att5 att101
att121 att73
att121 att61
att214 att116
att214 att178
att214 att206
att214 att58
att214 att142
att214 att46
att197 att89
att197 att209
att197 att101
att116 att182
att116 att60
att116 att168
att116 att178
att116 att206
att116 att73
att116 att126
att116 att16
att116 att74
att116 att27
att116 att20
att116 att211
att116 att164
att116 att128
att116 att92
att116 att176
att116 att68
att182 att27
att182 att14
att60 att168
att60 att156
att60 att96
att168 att126
att168 att156
att168 att96
att168 att216
att178 att20
att178 att211
att178 att58
att178 att142
att178 att130
att178 att166
att206 att74
att206 att170
att206 att158
att89 att77
att89 att137
att89 att149
att89 att173
att77 att137
att77 att161
att77 att149
att209 att101
att209 att41
att73 att61
att73 att85
att73 att13
att73 att157
att126 att162
att126 att138
att126 att18
att126 att150
att16 att74
att16 att76
att16 att40
att16 att4
att16 att196
att16 att136
att74 att14
att74 att62
att27 att171
att27 att63
att61 att85
att61 att169
att20 att76
att20 att211
att20 att210
att20 att170
att20 att164
att20 att128
att20 att176
att20 att80
att101 att41
att85 att169
att85 att13
att76 att14
att76 att40
att76 att160
att76 att4
att76 att52
att137 att161
att137 att149
att137 att173
att137 att125
att211 att210
att211 att162
att211 att164
att211 att62
att211 att42
att211 att171
att211 att163
att211 att175
att211 att79
att211 att151
att211 att43
att143 att155
att143 att23
att143 att203
att143 att71
att143 att83
att143 att11
att14 att98
att40 att160
att40 att4
att40 att196
att40 att88
att40 att52
att210 att162
att210 att42
att210 att114
att155 att23
att155 att203
att155 att107
att155 att11
att170 att158
att160 att52
att160 att124
att23 att203
att23 att107
att23 att71
att23 att11
att162 att138
att162 att18
att162 att150
att162 att90
att162 att102
att162 att174
att162 att66
att203 att107
att203 att49
att203 att59
att203 att47
att203 att191
att203 att119
att164 att62
att164 att42
att164 att128
att164 att171
att164 att92
att164 att163
att164 att158
att164 att176
att164 att145
att164 att172
att164 att58
att164 att68
att164 att80
att164 att32
att164 att98
att164 att156
att164 att110
att164 att205
att164 att21
att164 att134
att164 att213
att164 att112
att164 att38
att164 att189
att164 att56
att164 att44
att164 att152
att164 att8
att107 att83
att107 att49
att107 att59
att107 att47
att107 att191
att42 att138
att42 att54
att42 att114
att71 att83
att71 att167
att71 att35
att71 att179
att128 att92
att128 att112
att138 att18
att138 att150
att83 att167
att83 att35
att171 att87
att171 att159
att171 att63
att171 att51
att171 att39
att171 att75
att92 att163
att92 att145
att92 att56
att163 att49
att163 att175
att163 att87
att163 att79
att163 att151
att163 att139
att163 att187
att163 att127
att163 att103
att163 att91
att49 att37
att161 att173
att161 att113
att176 att145
att176 att172
att176 att68
att176 att80
att176 att32
att176 att175
att176 att98
att176 att110
att176 att205
att176 att21
att176 att134
att176 att213
att176 att56
att4 att196
att4 att88
att4 att136
att4 att100
att4 att148
att4 att208
att172 att112
att172 att184
att196 att88
att196 att136
att196 att100
att196 att148
att196 att208
att58 att142
att58 att46
att58 att34
att68 att32
att80 att38
att32 att110
att32 att21
att32 att44
att32 att200
att175 att87
att175 att159
att175 att79
att175 att187
att175 att115
att87 att159
att87 att63
att87 att51
att87 att75
att87 att15
att87 att99
att159 att75
att159 att15
att159 att195
att18 att90
att18 att102
att18 att78
att18 att198
att52 att124
att52 att64
att98 att86
att136 att100
att136 att208
att150 att90
att150 att174
att150 att66
att156 att205
att156 att96
att156 att216
att156 att204
att156 att24
att156 att84
att156 att36
att156 att12
att156 att108
att100 att148
att63 att51
att63 att39
att63 att3
att63 att183
att63 att147
att90 att102
att90 att78
att167 att35
att167 att179
att35 att179
att51 att39
att51 att3
att51 att183
att21 att134
att21 att213
att21 att38
att21 att189
att21 att129
att21 att81
att21 att153
att21 att117
att21 att9
att142 att46
att142 att130
att142 att118
att142 att70
att142 att10
att142 att202
att142 att190
att142 att106
att46 att130
att46 att118
att46 att70
att46 att34
att46 att166
att46 att82
att134 att2
att39 att3
att102 att78
att102 att174
att102 att54
att102 att198
att130 att118
att130 att10
att130 att202
att130 att190
att130 att106
att149 att125
att96 att216
att96 att204
att96 att24
att75 att15
att75 att99
att118 att70
att118 att10
att118 att202
att78 att198
att213 att189
att213 att129
att213 att69
att213 att81
att38 att50
att38 att26
att174 att54
att174 att66
att174 att30
att189 att86
att189 att129
att189 att69
att189 att81
att189 att153
att189 att117
att189 att9
att189 att45
att189 att141
att189 att105
att70 att34
att70 att154
att179 att59
att59 att47
att59 att191
att59 att119
att79 att86
att79 att151
att79 att139
att79 att187
att79 att127
att79 att103
att79 att43
att79 att193
att79 att91
att79 att19
att124 att64
att54 att114
att54 att30
att54 att6
att191 att119
att86 att194
att56 att44
att56 att152
att56 att50
att56 att188
att56 att26
att56 att200
att56 att104
att56 att140
att56 att146
att56 att194
att56 att8
att56 att2
att56 att133
att56 att1
att151 att139
att66 att30
att173 att125
att173 att113
att173 att185
att44 att152
att44 att50
att44 att188
att44 att200
att44 att104
att44 att140
att44 att194
att44 att212
att44 att1
att139 att26
att139 att99
att139 att103
att139 att43
att139 att91
att139 att31
att139 att199
att139 att7
att216 att204
att216 att24
att216 att84
att216 att36
att216 att12
att216 att180
att216 att108
att129 att69
att152 att188
att152 att140
att69 att153
att69 att9
att69 att177
att81 att45
att81 att105
att153 att117
att153 att141
att41 att53
att204 att12
att204 att180
att188 att146
att188 att212
att13 att157
att114 att6
att114 att186
att10 att190
att64 att184
att200 att104
att9 att45
att9 att146
att9 att141
att9 att177
att9 att37
att9 att133
att9 att109
att9 att181
att3 att183
att3 att147
att3 att123
att3 att135
att3 att111
att45 att105
att45 att177
att45 att93
att45 att201
att45 att165
att45 att193
att45 att33
att45 att37
att45 att133
att45 att97
att140 att8
att30 att6
att30 att186
att183 att147
att183 att123
att183 att135
att146 att2
att202 att166
att202 att106
att202 att82
att24 att84
att24 att36
att24 att132
att147 att123
att147 att135
att147 att111
att147 att207
att8 att212
att166 att82
att166 att22
att166 att94
att187 att127
att187 att115
att127 att115
att105 att184
att105 att93
att105 att201
att106 att154
att82 att154
att82 att22
att135 att111
att135 att207
att154 att22
att154 att94
att111 att207
att99 att195
att22 att94
att84 att48
att177 att93
att177 att165
att177 att181
att103 att195
att103 att97
att103 att109
att93 att201
att93 att165
att93 att193
att93 att33
att93 att57
att201 att33
att201 att57
att43 att31
att36 att180
att36 att48
att36 att72
att36 att132
att36 att144
att125 att113
att125 att185
att125 att65
att125 att29
att180 att48
att180 att72
att180 att192
att180 att108
att48 att72
att6 att186
att113 att185
att113 att53
att113 att65
att193 att97
att91 att31
att91 att199
att91 att19
att72 att132
att72 att144
att72 att192
att72 att120
att31 att199
att31 att7
att31 att67
att31 att55
att31 att1
att132 att144
att132 att120
att33 att57
att144 att192
att144 att120
att185 att53
att185 att65
att185 att29
att199 att19
att199 att7
att199 att67
att199 att55
att199 att109
att65 att29
att7 att67
att67 att55
att109 att181

BIN
diagrams/BayesNet.pdf Executable file

Binary file not shown.

View File

@ -1,580 +0,0 @@
@startuml
title clang-uml class diagram model
class "bayesnet::Node" as C_0010428199432536647474
class C_0010428199432536647474 #aliceblue;line:blue;line.dotted;text:blue {
+Node(const std::string &) : void
..
+addChild(Node *) : void
+addParent(Node *) : void
+clear() : void
+computeCPT(const torch::Tensor & dataset, const std::vector<std::string> & features, const double smoothing, const torch::Tensor & weights) : void
+getCPT() : torch::Tensor &
+getChildren() : std::vector<Node *> &
+getFactorValue(std::map<std::string,int> &) : double
+getName() const : std::string
+getNumStates() const : int
+getParents() : std::vector<Node *> &
+graph(const std::string & clasName) : std::vector<std::string>
+minFill() : unsigned int
+removeChild(Node *) : void
+removeParent(Node *) : void
+setNumStates(int) : void
__
}
enum "bayesnet::Smoothing_t" as C_0013393078277439680282
enum C_0013393078277439680282 {
NONE
ORIGINAL
LAPLACE
CESTNIK
}
class "bayesnet::Network" as C_0009493661199123436603
class C_0009493661199123436603 #aliceblue;line:blue;line.dotted;text:blue {
+Network() : void
+Network(const Network &) : void
+~Network() = default : void
..
+addEdge(const std::string &, const std::string &) : void
+addNode(const std::string &) : void
+dump_cpt() const : std::string
+fit(const torch::Tensor & samples, const torch::Tensor & weights, const std::vector<std::string> & featureNames, const std::string & className, const std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : void
+fit(const torch::Tensor & X, const torch::Tensor & y, const torch::Tensor & weights, const std::vector<std::string> & featureNames, const std::string & className, const std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : void
+fit(const std::vector<std::vector<int>> & input_data, const std::vector<int> & labels, const std::vector<double> & weights, const std::vector<std::string> & featureNames, const std::string & className, const std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : void
+getClassName() const : std::string
+getClassNumStates() const : int
+getEdges() const : std::vector<std::pair<std::string,std::string>>
+getFeatures() const : std::vector<std::string>
+getNodes() : std::map<std::string,std::unique_ptr<Node>> &
+getNumEdges() const : int
+getSamples() : torch::Tensor &
+getStates() const : int
+graph(const std::string & title) const : std::vector<std::string>
+initialize() : void
+predict(const std::vector<std::vector<int>> &) : std::vector<int>
+predict(const torch::Tensor &) : torch::Tensor
+predict_proba(const std::vector<std::vector<int>> &) : std::vector<std::vector<double>>
+predict_proba(const torch::Tensor &) : torch::Tensor
+predict_tensor(const torch::Tensor & samples, const bool proba) : torch::Tensor
+score(const std::vector<std::vector<int>> &, const std::vector<int> &) : double
+show() const : std::vector<std::string>
+topological_sort() : std::vector<std::string>
+version() : std::string
__
}
enum "bayesnet::status_t" as C_0005907365846270811004
enum C_0005907365846270811004 {
NORMAL
WARNING
ERROR
}
abstract "bayesnet::BaseClassifier" as C_0002617087915615796317
abstract C_0002617087915615796317 #aliceblue;line:blue;line.dotted;text:blue {
+~BaseClassifier() = default : void
..
{abstract} +dump_cpt() const = 0 : std::string
{abstract} +fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) = 0 : BaseClassifier &
{abstract} +fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) = 0 : BaseClassifier &
{abstract} +fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const torch::Tensor & weights, const Smoothing_t smoothing) = 0 : BaseClassifier &
{abstract} +fit(std::vector<std::vector<int>> & X, std::vector<int> & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) = 0 : BaseClassifier &
{abstract} +getClassNumStates() const = 0 : int
{abstract} +getNotes() const = 0 : std::vector<std::string>
{abstract} +getNumberOfEdges() const = 0 : int
{abstract} +getNumberOfNodes() const = 0 : int
{abstract} +getNumberOfStates() const = 0 : int
{abstract} +getStatus() const = 0 : status_t
+getValidHyperparameters() : std::vector<std::string> &
{abstract} +getVersion() = 0 : std::string
{abstract} +graph(const std::string & title = "") const = 0 : std::vector<std::string>
{abstract} +predict(std::vector<std::vector<int>> & X) = 0 : std::vector<int>
{abstract} +predict(torch::Tensor & X) = 0 : torch::Tensor
{abstract} +predict_proba(std::vector<std::vector<int>> & X) = 0 : std::vector<std::vector<double>>
{abstract} +predict_proba(torch::Tensor & X) = 0 : torch::Tensor
{abstract} +score(std::vector<std::vector<int>> & X, std::vector<int> & y) = 0 : float
{abstract} +score(torch::Tensor & X, torch::Tensor & y) = 0 : float
{abstract} +setHyperparameters(const nlohmann::json & hyperparameters) = 0 : void
{abstract} +show() const = 0 : std::vector<std::string>
{abstract} +topological_order() = 0 : std::vector<std::string>
{abstract} #trainModel(const torch::Tensor & weights, const Smoothing_t smoothing) = 0 : void
__
#validHyperparameters : std::vector<std::string>
}
class "bayesnet::Metrics" as C_0005895723015084986588
class C_0005895723015084986588 #aliceblue;line:blue;line.dotted;text:blue {
+Metrics() = default : void
+Metrics(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int classNumStates) : void
+Metrics(const std::vector<std::vector<int>> & vsamples, const std::vector<int> & labels, const std::vector<std::string> & features, const std::string & className, const int classNumStates) : void
..
+SelectKBestWeighted(const torch::Tensor & weights, bool ascending = false, unsigned int k = 0) : std::vector<int>
+SelectKPairs(const torch::Tensor & weights, std::vector<int> & featuresExcluded, bool ascending = false, unsigned int k = 0) : std::vector<std::pair<int,int>>
+conditionalEdge(const torch::Tensor & weights) : torch::Tensor
+conditionalEntropy(const torch::Tensor & firstFeature, const torch::Tensor & secondFeature, const torch::Tensor & labels, const torch::Tensor & weights) : double
+conditionalMutualInformation(const torch::Tensor & firstFeature, const torch::Tensor & secondFeature, const torch::Tensor & labels, const torch::Tensor & weights) : double
#doCombinations<T>(const std::vector<T> & source) : std::vector<std::pair<T, T> >
+entropy(const torch::Tensor & feature, const torch::Tensor & weights) : double
+getScoresKBest() const : std::vector<double>
+getScoresKPairs() const : std::vector<std::pair<std::pair<int,int>,double>>
+maximumSpanningTree(const std::vector<std::string> & features, const torch::Tensor & weights, const int root) : std::vector<std::pair<int,int>>
+mutualInformation(const torch::Tensor & firstFeature, const torch::Tensor & secondFeature, const torch::Tensor & weights) : double
#pop_first<T>(std::vector<T> & v) : T
__
#className : std::string
#features : std::vector<std::string>
#samples : torch::Tensor
}
abstract "bayesnet::Classifier" as C_0016351972983202413152
abstract C_0016351972983202413152 #aliceblue;line:blue;line.dotted;text:blue {
+Classifier(Network model) : void
+~Classifier() = default : void
..
+addNodes() : void
#buildDataset(torch::Tensor & y) : void
{abstract} #buildModel(const torch::Tensor & weights) = 0 : void
#checkFitParameters() : void
+dump_cpt() const : std::string
+fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : Classifier &
+fit(std::vector<std::vector<int>> & X, std::vector<int> & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : Classifier &
+fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : Classifier &
+fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const torch::Tensor & weights, const Smoothing_t smoothing) : Classifier &
+getClassNumStates() const : int
+getNotes() const : std::vector<std::string>
+getNumberOfEdges() const : int
+getNumberOfNodes() const : int
+getNumberOfStates() const : int
+getStatus() const : status_t
+getVersion() : std::string
+predict(std::vector<std::vector<int>> & X) : std::vector<int>
+predict(torch::Tensor & X) : torch::Tensor
+predict_proba(std::vector<std::vector<int>> & X) : std::vector<std::vector<double>>
+predict_proba(torch::Tensor & X) : torch::Tensor
+score(torch::Tensor & X, torch::Tensor & y) : float
+score(std::vector<std::vector<int>> & X, std::vector<int> & y) : float
+setHyperparameters(const nlohmann::json & hyperparameters) : void
+show() const : std::vector<std::string>
+topological_order() : std::vector<std::string>
#trainModel(const torch::Tensor & weights, const Smoothing_t smoothing) : void
__
#className : std::string
#dataset : torch::Tensor
#features : std::vector<std::string>
#fitted : bool
#m : unsigned int
#metrics : Metrics
#model : Network
#n : unsigned int
#notes : std::vector<std::string>
#states : std::map<std::string,std::vector<int>>
#status : status_t
}
class "bayesnet::KDB" as C_0008902920152122000044
class C_0008902920152122000044 #aliceblue;line:blue;line.dotted;text:blue {
+KDB(int k, float theta = 0.03) : void
+~KDB() = default : void
..
#buildModel(const torch::Tensor & weights) : void
+graph(const std::string & name = "KDB") const : std::vector<std::string>
+setHyperparameters(const nlohmann::json & hyperparameters_) : void
__
}
class "bayesnet::SPODE" as C_0004096182510460307610
class C_0004096182510460307610 #aliceblue;line:blue;line.dotted;text:blue {
+SPODE(int root) : void
+~SPODE() = default : void
..
#buildModel(const torch::Tensor & weights) : void
+graph(const std::string & name = "SPODE") const : std::vector<std::string>
__
}
class "bayesnet::SPnDE" as C_0016268916386101512883
class C_0016268916386101512883 #aliceblue;line:blue;line.dotted;text:blue {
+SPnDE(std::vector<int> parents) : void
+~SPnDE() = default : void
..
#buildModel(const torch::Tensor & weights) : void
+graph(const std::string & name = "SPnDE") const : std::vector<std::string>
__
}
class "bayesnet::TAN" as C_0014087955399074584137
class C_0014087955399074584137 #aliceblue;line:blue;line.dotted;text:blue {
+TAN() : void
+~TAN() = default : void
..
#buildModel(const torch::Tensor & weights) : void
+graph(const std::string & name = "TAN") const : std::vector<std::string>
__
}
class "bayesnet::Proposal" as C_0017759964713298103839
class C_0017759964713298103839 #aliceblue;line:blue;line.dotted;text:blue {
+Proposal(torch::Tensor & pDataset, std::vector<std::string> & features_, std::string & className_) : void
+~Proposal() : void
..
#checkInput(const torch::Tensor & X, const torch::Tensor & y) : void
#fit_local_discretization(const torch::Tensor & y) : std::map<std::string,std::vector<int>>
#localDiscretizationProposal(const std::map<std::string,std::vector<int>> & states, Network & model) : std::map<std::string,std::vector<int>>
#prepareX(torch::Tensor & X) : torch::Tensor
__
#Xf : torch::Tensor
#discretizers : map<std::string,mdlp::CPPFImdlp *>
#y : torch::Tensor
}
class "bayesnet::KDBLd" as C_0002756018222998454702
class C_0002756018222998454702 #aliceblue;line:blue;line.dotted;text:blue {
+KDBLd(int k) : void
+~KDBLd() = default : void
..
+fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : KDBLd &
+graph(const std::string & name = "KDB") const : std::vector<std::string>
+predict(torch::Tensor & X) : torch::Tensor
{static} +version() : std::string
__
}
class "bayesnet::SPODELd" as C_0010957245114062042836
class C_0010957245114062042836 #aliceblue;line:blue;line.dotted;text:blue {
+SPODELd(int root) : void
+~SPODELd() = default : void
..
+commonFit(const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : SPODELd &
+fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : SPODELd &
+fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : SPODELd &
+graph(const std::string & name = "SPODELd") const : std::vector<std::string>
+predict(torch::Tensor & X) : torch::Tensor
{static} +version() : std::string
__
}
class "bayesnet::TANLd" as C_0013350632773616302678
class C_0013350632773616302678 #aliceblue;line:blue;line.dotted;text:blue {
+TANLd() : void
+~TANLd() = default : void
..
+fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : TANLd &
+graph(const std::string & name = "TANLd") const : std::vector<std::string>
+predict(torch::Tensor & X) : torch::Tensor
__
}
class "bayesnet::Ensemble" as C_0015881931090842884611
class C_0015881931090842884611 #aliceblue;line:blue;line.dotted;text:blue {
+Ensemble(bool predict_voting = true) : void
+~Ensemble() = default : void
..
#compute_arg_max(std::vector<std::vector<double>> & X) : std::vector<int>
#compute_arg_max(torch::Tensor & X) : torch::Tensor
+dump_cpt() const : std::string
+getNumberOfEdges() const : int
+getNumberOfNodes() const : int
+getNumberOfStates() const : int
+graph(const std::string & title) const : std::vector<std::string>
+predict(std::vector<std::vector<int>> & X) : std::vector<int>
+predict(torch::Tensor & X) : torch::Tensor
#predict_average_proba(torch::Tensor & X) : torch::Tensor
#predict_average_proba(std::vector<std::vector<int>> & X) : std::vector<std::vector<double>>
#predict_average_voting(torch::Tensor & X) : torch::Tensor
#predict_average_voting(std::vector<std::vector<int>> & X) : std::vector<std::vector<double>>
+predict_proba(std::vector<std::vector<int>> & X) : std::vector<std::vector<double>>
+predict_proba(torch::Tensor & X) : torch::Tensor
+score(std::vector<std::vector<int>> & X, std::vector<int> & y) : float
+score(torch::Tensor & X, torch::Tensor & y) : float
+show() const : std::vector<std::string>
+topological_order() : std::vector<std::string>
#trainModel(const torch::Tensor & weights, const Smoothing_t smoothing) : void
#voting(torch::Tensor & votes) : torch::Tensor
__
#models : std::vector<std::unique_ptr<Classifier>>
#n_models : unsigned int
#predict_voting : bool
#significanceModels : std::vector<double>
}
class "bayesnet::A2DE" as C_0001410789567057647859
class C_0001410789567057647859 #aliceblue;line:blue;line.dotted;text:blue {
+A2DE(bool predict_voting = false) : void
+~A2DE() : void
..
#buildModel(const torch::Tensor & weights) : void
+graph(const std::string & title = "A2DE") const : std::vector<std::string>
+setHyperparameters(const nlohmann::json & hyperparameters) : void
__
}
class "bayesnet::AODE" as C_0006288892608974306258
class C_0006288892608974306258 #aliceblue;line:blue;line.dotted;text:blue {
+AODE(bool predict_voting = false) : void
+~AODE() : void
..
#buildModel(const torch::Tensor & weights) : void
+graph(const std::string & title = "AODE") const : std::vector<std::string>
+setHyperparameters(const nlohmann::json & hyperparameters) : void
__
}
abstract "bayesnet::FeatureSelect" as C_0013562609546004646591
abstract C_0013562609546004646591 #aliceblue;line:blue;line.dotted;text:blue {
+FeatureSelect(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int maxFeatures, const int classNumStates, const torch::Tensor & weights) : void
+~FeatureSelect() : void
..
#computeMeritCFS() : double
#computeSuFeatures(const int a, const int b) : double
#computeSuLabels() : void
{abstract} +fit() = 0 : void
+getFeatures() const : std::vector<int>
+getScores() const : std::vector<double>
#initialize() : void
#symmetricalUncertainty(int a, int b) : double
__
#fitted : bool
#maxFeatures : int
#selectedFeatures : std::vector<int>
#selectedScores : std::vector<double>
#suFeatures : std::map<std::pair<int,int>,double>
#suLabels : std::vector<double>
#weights : const torch::Tensor &
}
class "bayesnet::(anonymous_60342586)" as C_0005584545181746538542
class C_0005584545181746538542 #aliceblue;line:blue;line.dotted;text:blue {
__
+CFS : std::string
+FCBF : std::string
+IWSS : std::string
}
class "bayesnet::(anonymous_60343240)" as C_0016227156982041949444
class C_0016227156982041949444 #aliceblue;line:blue;line.dotted;text:blue {
__
+ASC : std::string
+DESC : std::string
+RAND : std::string
}
class "bayesnet::Boost" as C_0009819322948617116148
class C_0009819322948617116148 #aliceblue;line:blue;line.dotted;text:blue {
+Boost(bool predict_voting = false) : void
+~Boost() = default : void
..
#buildModel(const torch::Tensor & weights) : void
#featureSelection(torch::Tensor & weights_) : std::vector<int>
+setHyperparameters(const nlohmann::json & hyperparameters_) : void
#update_weights(torch::Tensor & ytrain, torch::Tensor & ypred, torch::Tensor & weights) : std::tuple<torch::Tensor &,double,bool>
#update_weights_block(int k, torch::Tensor & ytrain, torch::Tensor & weights) : std::tuple<torch::Tensor &,double,bool>
__
#X_test : torch::Tensor
#X_train : torch::Tensor
#bisection : bool
#block_update : bool
#convergence : bool
#convergence_best : bool
#featureSelector : FeatureSelect *
#maxTolerance : int
#order_algorithm : std::string
#selectFeatures : bool
#select_features_algorithm : std::string
#threshold : double
#y_test : torch::Tensor
#y_train : torch::Tensor
}
class "bayesnet::AODELd" as C_0003898187834670349177
class C_0003898187834670349177 #aliceblue;line:blue;line.dotted;text:blue {
+AODELd(bool predict_voting = true) : void
+~AODELd() = default : void
..
#buildModel(const torch::Tensor & weights) : void
+fit(torch::Tensor & X_, torch::Tensor & y_, const std::vector<std::string> & features_, const std::string & className_, std::map<std::string,std::vector<int>> & states_, const Smoothing_t smoothing) : AODELd &
+graph(const std::string & name = "AODELd") const : std::vector<std::string>
#trainModel(const torch::Tensor & weights, const Smoothing_t smoothing) : void
__
}
class "bayesnet::(anonymous_60275628)" as C_0009086919615463763584
class C_0009086919615463763584 #aliceblue;line:blue;line.dotted;text:blue {
__
+CFS : std::string
+FCBF : std::string
+IWSS : std::string
}
class "bayesnet::(anonymous_60276282)" as C_0015251985607563196159
class C_0015251985607563196159 #aliceblue;line:blue;line.dotted;text:blue {
__
+ASC : std::string
+DESC : std::string
+RAND : std::string
}
class "bayesnet::BoostA2DE" as C_0000272055465257861326
class C_0000272055465257861326 #aliceblue;line:blue;line.dotted;text:blue {
+BoostA2DE(bool predict_voting = false) : void
+~BoostA2DE() = default : void
..
+graph(const std::string & title = "BoostA2DE") const : std::vector<std::string>
#trainModel(const torch::Tensor & weights, const Smoothing_t smoothing) : void
__
}
class "bayesnet::(anonymous_60275502)" as C_0016033655851510053155
class C_0016033655851510053155 #aliceblue;line:blue;line.dotted;text:blue {
__
+CFS : std::string
+FCBF : std::string
+IWSS : std::string
}
class "bayesnet::(anonymous_60276156)" as C_0000379522761622473555
class C_0000379522761622473555 #aliceblue;line:blue;line.dotted;text:blue {
__
+ASC : std::string
+DESC : std::string
+RAND : std::string
}
class "bayesnet::BoostAODE" as C_0002867772739198819061
class C_0002867772739198819061 #aliceblue;line:blue;line.dotted;text:blue {
+BoostAODE(bool predict_voting = false) : void
+~BoostAODE() = default : void
..
+graph(const std::string & title = "BoostAODE") const : std::vector<std::string>
#trainModel(const torch::Tensor & weights, const Smoothing_t smoothing) : void
__
}
class "bayesnet::CFS" as C_0000093018845530739957
class C_0000093018845530739957 #aliceblue;line:blue;line.dotted;text:blue {
+CFS(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int maxFeatures, const int classNumStates, const torch::Tensor & weights) : void
+~CFS() : void
..
+fit() : void
__
}
class "bayesnet::FCBF" as C_0001157456122733975432
class C_0001157456122733975432 #aliceblue;line:blue;line.dotted;text:blue {
+FCBF(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int maxFeatures, const int classNumStates, const torch::Tensor & weights, const double threshold) : void
+~FCBF() : void
..
+fit() : void
__
}
class "bayesnet::IWSS" as C_0000066148117395428429
class C_0000066148117395428429 #aliceblue;line:blue;line.dotted;text:blue {
+IWSS(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int maxFeatures, const int classNumStates, const torch::Tensor & weights, const double threshold) : void
+~IWSS() : void
..
+fit() : void
__
}
class "bayesnet::(anonymous_60730495)" as C_0004857727320042830573
class C_0004857727320042830573 #aliceblue;line:blue;line.dotted;text:blue {
__
+CFS : std::string
+FCBF : std::string
+IWSS : std::string
}
class "bayesnet::(anonymous_60731150)" as C_0000076541533312623385
class C_0000076541533312623385 #aliceblue;line:blue;line.dotted;text:blue {
__
+ASC : std::string
+DESC : std::string
+RAND : std::string
}
class "bayesnet::(anonymous_60653004)" as C_0001444063444142949758
class C_0001444063444142949758 #aliceblue;line:blue;line.dotted;text:blue {
__
+CFS : std::string
+FCBF : std::string
+IWSS : std::string
}
class "bayesnet::(anonymous_60653658)" as C_0007139277546931322856
class C_0007139277546931322856 #aliceblue;line:blue;line.dotted;text:blue {
__
+ASC : std::string
+DESC : std::string
+RAND : std::string
}
class "bayesnet::(anonymous_60731375)" as C_0010493853592456211189
class C_0010493853592456211189 #aliceblue;line:blue;line.dotted;text:blue {
__
+CFS : std::string
+FCBF : std::string
+IWSS : std::string
}
class "bayesnet::(anonymous_60732030)" as C_0007011438637915849564
class C_0007011438637915849564 #aliceblue;line:blue;line.dotted;text:blue {
__
+ASC : std::string
+DESC : std::string
+RAND : std::string
}
class "bayesnet::MST" as C_0001054867409378333602
class C_0001054867409378333602 #aliceblue;line:blue;line.dotted;text:blue {
+MST() = default : void
+MST(const std::vector<std::string> & features, const torch::Tensor & weights, const int root) : void
..
+insertElement(std::list<int> & variables, int variable) : void
+maximumSpanningTree() : std::vector<std::pair<int,int>>
+reorder(std::vector<std::pair<float,std::pair<int,int>>> T, int root_original) : std::vector<std::pair<int,int>>
__
}
class "bayesnet::Graph" as C_0009576333456015187741
class C_0009576333456015187741 #aliceblue;line:blue;line.dotted;text:blue {
+Graph(int V) : void
..
+addEdge(int u, int v, float wt) : void
+find_set(int i) : int
+get_mst() : std::vector<std::pair<float,std::pair<int,int>>>
+kruskal_algorithm() : void
+union_set(int u, int v) : void
__
}
C_0010428199432536647474 --> C_0010428199432536647474 : -parents
C_0010428199432536647474 --> C_0010428199432536647474 : -children
C_0009493661199123436603 ..> C_0013393078277439680282
C_0009493661199123436603 o-- C_0010428199432536647474 : -nodes
C_0002617087915615796317 ..> C_0013393078277439680282
C_0002617087915615796317 ..> C_0005907365846270811004
C_0016351972983202413152 ..> C_0013393078277439680282
C_0016351972983202413152 o-- C_0009493661199123436603 : #model
C_0016351972983202413152 o-- C_0005895723015084986588 : #metrics
C_0016351972983202413152 o-- C_0005907365846270811004 : #status
C_0002617087915615796317 <|-- C_0016351972983202413152
C_0016351972983202413152 <|-- C_0008902920152122000044
C_0016351972983202413152 <|-- C_0004096182510460307610
C_0016351972983202413152 <|-- C_0016268916386101512883
C_0016351972983202413152 <|-- C_0014087955399074584137
C_0017759964713298103839 ..> C_0009493661199123436603
C_0002756018222998454702 ..> C_0013393078277439680282
C_0008902920152122000044 <|-- C_0002756018222998454702
C_0017759964713298103839 <|-- C_0002756018222998454702
C_0010957245114062042836 ..> C_0013393078277439680282
C_0004096182510460307610 <|-- C_0010957245114062042836
C_0017759964713298103839 <|-- C_0010957245114062042836
C_0013350632773616302678 ..> C_0013393078277439680282
C_0014087955399074584137 <|-- C_0013350632773616302678
C_0017759964713298103839 <|-- C_0013350632773616302678
C_0015881931090842884611 ..> C_0013393078277439680282
C_0015881931090842884611 o-- C_0016351972983202413152 : #models
C_0016351972983202413152 <|-- C_0015881931090842884611
C_0015881931090842884611 <|-- C_0001410789567057647859
C_0015881931090842884611 <|-- C_0006288892608974306258
C_0005895723015084986588 <|-- C_0013562609546004646591
C_0009819322948617116148 --> C_0013562609546004646591 : #featureSelector
C_0015881931090842884611 <|-- C_0009819322948617116148
C_0003898187834670349177 ..> C_0013393078277439680282
C_0015881931090842884611 <|-- C_0003898187834670349177
C_0017759964713298103839 <|-- C_0003898187834670349177
C_0000272055465257861326 ..> C_0013393078277439680282
C_0009819322948617116148 <|-- C_0000272055465257861326
C_0002867772739198819061 ..> C_0013393078277439680282
C_0009819322948617116148 <|-- C_0002867772739198819061
C_0013562609546004646591 <|-- C_0000093018845530739957
C_0013562609546004646591 <|-- C_0001157456122733975432
C_0013562609546004646591 <|-- C_0000066148117395428429
'Generated with clang-uml, version 0.5.5
'LLVM version clang version 18.1.8 (Fedora 18.1.8-5.fc41)
@enduml

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 196 KiB

View File

@ -1,314 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Generated by graphviz version 12.1.0 (20240811.2233)
-->
<!-- Title: BayesNet Pages: 1 -->
<svg width="3725pt" height="432pt"
viewBox="0.00 0.00 3724.84 431.80" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 427.8)">
<title>BayesNet</title>
<polygon fill="white" stroke="none" points="-4,4 -4,-427.8 3720.84,-427.8 3720.84,4 -4,4"/>
<!-- node0 -->
<g id="node1" class="node">
<title>node0</title>
<polygon fill="none" stroke="black" points="1655.43,-398.35 1655.43,-413.26 1625.69,-423.8 1583.63,-423.8 1553.89,-413.26 1553.89,-398.35 1583.63,-387.8 1625.69,-387.8 1655.43,-398.35"/>
<text text-anchor="middle" x="1604.66" y="-401.53" font-family="Times,serif" font-size="12.00">BayesNet</text>
</g>
<!-- node1 -->
<g id="node2" class="node">
<title>node1</title>
<polygon fill="none" stroke="black" points="413.32,-257.8 372.39,-273.03 206.66,-279.8 40.93,-273.03 0,-257.8 114.69,-245.59 298.64,-245.59 413.32,-257.8"/>
<text text-anchor="middle" x="206.66" y="-257.53" font-family="Times,serif" font-size="12.00">/home/rmontanana/Code/libtorch/lib/libc10.so</text>
</g>
<!-- node0&#45;&gt;node1 -->
<g id="edge1" class="edge">
<title>node0&#45;&gt;node1</title>
<path fill="none" stroke="black" d="M1553.59,-400.53C1451.65,-391.91 1215.69,-371.61 1017.66,-351.8 773.36,-327.37 488.07,-295.22 329.31,-277.01"/>
<polygon fill="black" stroke="black" points="329.93,-273.56 319.6,-275.89 329.14,-280.51 329.93,-273.56"/>
</g>
<!-- node2 -->
<g id="node3" class="node">
<title>node2</title>
<polygon fill="none" stroke="black" points="894.21,-257.8 848.35,-273.03 662.66,-279.8 476.98,-273.03 431.12,-257.8 559.61,-245.59 765.71,-245.59 894.21,-257.8"/>
<text text-anchor="middle" x="662.66" y="-257.53" font-family="Times,serif" font-size="12.00">/home/rmontanana/Code/libtorch/lib/libc10_cuda.so</text>
</g>
<!-- node0&#45;&gt;node2 -->
<g id="edge2" class="edge">
<title>node0&#45;&gt;node2</title>
<path fill="none" stroke="black" d="M1555.34,-397.37C1408.12,-375.18 969.52,-309.06 767.13,-278.55"/>
<polygon fill="black" stroke="black" points="767.81,-275.12 757.4,-277.09 766.77,-282.04 767.81,-275.12"/>
</g>
<!-- node3 -->
<g id="node4" class="node">
<title>node3</title>
<polygon fill="none" stroke="black" points="1338.68,-257.8 1296.49,-273.03 1125.66,-279.8 954.84,-273.03 912.65,-257.8 1030.86,-245.59 1220.46,-245.59 1338.68,-257.8"/>
<text text-anchor="middle" x="1125.66" y="-257.53" font-family="Times,serif" font-size="12.00">/home/rmontanana/Code/libtorch/lib/libkineto.a</text>
</g>
<!-- node0&#45;&gt;node3 -->
<g id="edge3" class="edge">
<title>node0&#45;&gt;node3</title>
<path fill="none" stroke="black" d="M1566.68,-393.54C1484.46,-369.17 1289.3,-311.32 1188.44,-281.41"/>
<polygon fill="black" stroke="black" points="1189.53,-278.09 1178.95,-278.6 1187.54,-284.8 1189.53,-278.09"/>
</g>
<!-- node4 -->
<g id="node5" class="node">
<title>node4</title>
<polygon fill="none" stroke="black" points="1552.26,-257.8 1532.93,-273.03 1454.66,-279.8 1376.4,-273.03 1357.07,-257.8 1411.23,-245.59 1498.1,-245.59 1552.26,-257.8"/>
<text text-anchor="middle" x="1454.66" y="-257.53" font-family="Times,serif" font-size="12.00">/usr/lib64/libcuda.so</text>
</g>
<!-- node0&#45;&gt;node4 -->
<g id="edge4" class="edge">
<title>node0&#45;&gt;node4</title>
<path fill="none" stroke="black" d="M1586.27,-387.39C1559.5,-362.05 1509.72,-314.92 1479.65,-286.46"/>
<polygon fill="black" stroke="black" points="1482.13,-283.99 1472.46,-279.65 1477.31,-289.07 1482.13,-283.99"/>
</g>
<!-- node5 -->
<g id="node6" class="node">
<title>node5</title>
<polygon fill="none" stroke="black" points="1873.26,-257.8 1843.23,-273.03 1721.66,-279.8 1600.09,-273.03 1570.06,-257.8 1654.19,-245.59 1789.13,-245.59 1873.26,-257.8"/>
<text text-anchor="middle" x="1721.66" y="-257.53" font-family="Times,serif" font-size="12.00">/usr/local/cuda/lib64/libcudart.so</text>
</g>
<!-- node0&#45;&gt;node5 -->
<g id="edge5" class="edge">
<title>node0&#45;&gt;node5</title>
<path fill="none" stroke="black" d="M1619.76,-387.77C1628.83,-377.46 1640.53,-363.98 1650.66,-351.8 1668.32,-330.59 1687.84,-306.03 1701.94,-288.1"/>
<polygon fill="black" stroke="black" points="1704.43,-290.59 1707.84,-280.56 1698.92,-286.27 1704.43,-290.59"/>
</g>
<!-- node6 -->
<g id="node7" class="node">
<title>node6</title>
<polygon fill="none" stroke="black" points="2231.79,-257.8 2198.1,-273.03 2061.66,-279.8 1925.23,-273.03 1891.53,-257.8 1985.95,-245.59 2137.38,-245.59 2231.79,-257.8"/>
<text text-anchor="middle" x="2061.66" y="-257.53" font-family="Times,serif" font-size="12.00">/usr/local/cuda/lib64/libnvToolsExt.so</text>
</g>
<!-- node0&#45;&gt;node6 -->
<g id="edge6" class="edge">
<title>node0&#45;&gt;node6</title>
<path fill="none" stroke="black" d="M1642.06,-393.18C1721.31,-368.56 1906.71,-310.95 2002.32,-281.24"/>
<polygon fill="black" stroke="black" points="2003.28,-284.61 2011.79,-278.3 2001.21,-277.92 2003.28,-284.61"/>
</g>
<!-- node7 -->
<g id="node8" class="node">
<title>node7</title>
<polygon fill="none" stroke="black" points="2541.44,-257.8 2512.56,-273.03 2395.66,-279.8 2278.76,-273.03 2249.89,-257.8 2330.79,-245.59 2460.54,-245.59 2541.44,-257.8"/>
<text text-anchor="middle" x="2395.66" y="-257.53" font-family="Times,serif" font-size="12.00">/usr/local/cuda/lib64/libnvrtc.so</text>
</g>
<!-- node0&#45;&gt;node7 -->
<g id="edge7" class="edge">
<title>node0&#45;&gt;node7</title>
<path fill="none" stroke="black" d="M1651.19,-396.45C1780.36,-373.26 2144.76,-307.85 2311.05,-277.99"/>
<polygon fill="black" stroke="black" points="2311.47,-281.47 2320.7,-276.26 2310.24,-274.58 2311.47,-281.47"/>
</g>
<!-- node8 -->
<g id="node9" class="node">
<title>node8</title>
<polygon fill="none" stroke="black" points="1642.01,-326.35 1642.01,-341.26 1620.13,-351.8 1589.19,-351.8 1567.31,-341.26 1567.31,-326.35 1589.19,-315.8 1620.13,-315.8 1642.01,-326.35"/>
<text text-anchor="middle" x="1604.66" y="-329.53" font-family="Times,serif" font-size="12.00">fimdlp</text>
</g>
<!-- node0&#45;&gt;node8 -->
<g id="edge8" class="edge">
<title>node0&#45;&gt;node8</title>
<path fill="none" stroke="black" d="M1604.66,-387.5C1604.66,-380.21 1604.66,-371.53 1604.66,-363.34"/>
<polygon fill="black" stroke="black" points="1608.16,-363.42 1604.66,-353.42 1601.16,-363.42 1608.16,-363.42"/>
</g>
<!-- node19 -->
<g id="node10" class="node">
<title>node19</title>
<polygon fill="none" stroke="black" points="2709.74,-267.37 2634.66,-279.8 2559.58,-267.37 2588.26,-247.24 2681.06,-247.24 2709.74,-267.37"/>
<text text-anchor="middle" x="2634.66" y="-257.53" font-family="Times,serif" font-size="12.00">torch_library</text>
</g>
<!-- node0&#45;&gt;node19 -->
<g id="edge29" class="edge">
<title>node0&#45;&gt;node19</title>
<path fill="none" stroke="black" d="M1655.87,-399.32C1798.23,-383.79 2210.64,-336.94 2550.66,-279.8 2559.43,-278.33 2568.68,-276.62 2577.72,-274.86"/>
<polygon fill="black" stroke="black" points="2578.38,-278.3 2587.5,-272.92 2577.01,-271.43 2578.38,-278.3"/>
</g>
<!-- node8&#45;&gt;node1 -->
<g id="edge9" class="edge">
<title>node8&#45;&gt;node1</title>
<path fill="none" stroke="black" d="M1566.84,-331.58C1419.81,-326.72 872.06,-307.69 421.66,-279.8 401.07,-278.53 379.38,-277.02 358.03,-275.43"/>
<polygon fill="black" stroke="black" points="358.3,-271.94 348.06,-274.67 357.77,-278.92 358.3,-271.94"/>
</g>
<!-- node8&#45;&gt;node2 -->
<g id="edge10" class="edge">
<title>node8&#45;&gt;node2</title>
<path fill="none" stroke="black" d="M1566.86,-330C1445.11,-320.95 1057.97,-292.18 831.67,-275.36"/>
<polygon fill="black" stroke="black" points="832.09,-271.89 821.86,-274.63 831.57,-278.87 832.09,-271.89"/>
</g>
<!-- node8&#45;&gt;node3 -->
<g id="edge11" class="edge">
<title>node8&#45;&gt;node3</title>
<path fill="none" stroke="black" d="M1567.08,-327.31C1495.4,-316.84 1336.86,-293.67 1230.62,-278.14"/>
<polygon fill="black" stroke="black" points="1231.44,-274.72 1221.04,-276.74 1230.42,-281.65 1231.44,-274.72"/>
</g>
<!-- node8&#45;&gt;node4 -->
<g id="edge12" class="edge">
<title>node8&#45;&gt;node4</title>
<path fill="none" stroke="black" d="M1578.53,-320.61C1555.96,-310.08 1522.92,-294.66 1496.64,-282.4"/>
<polygon fill="black" stroke="black" points="1498.12,-279.22 1487.58,-278.17 1495.16,-285.57 1498.12,-279.22"/>
</g>
<!-- node8&#45;&gt;node5 -->
<g id="edge13" class="edge">
<title>node8&#45;&gt;node5</title>
<path fill="none" stroke="black" d="M1627.78,-318.97C1644.15,-309.18 1666.44,-295.84 1685.2,-284.62"/>
<polygon fill="black" stroke="black" points="1686.83,-287.73 1693.61,-279.59 1683.23,-281.72 1686.83,-287.73"/>
</g>
<!-- node8&#45;&gt;node6 -->
<g id="edge14" class="edge">
<title>node8&#45;&gt;node6</title>
<path fill="none" stroke="black" d="M1642.45,-327.02C1712.36,-316.31 1863.89,-293.1 1964.32,-277.71"/>
<polygon fill="black" stroke="black" points="1964.84,-281.18 1974.2,-276.2 1963.78,-274.26 1964.84,-281.18"/>
</g>
<!-- node8&#45;&gt;node7 -->
<g id="edge15" class="edge">
<title>node8&#45;&gt;node7</title>
<path fill="none" stroke="black" d="M1642.33,-330.01C1740.75,-322.64 2013.75,-301.7 2240.66,-279.8 2254.16,-278.5 2268.32,-277.06 2282.35,-275.58"/>
<polygon fill="black" stroke="black" points="2282.49,-279.08 2292.06,-274.54 2281.75,-272.12 2282.49,-279.08"/>
</g>
<!-- node8&#45;&gt;node19 -->
<g id="edge16" class="edge">
<title>node8&#45;&gt;node19</title>
<path fill="none" stroke="black" d="M1642.25,-332.63C1770.06,-331.64 2199.48,-324.94 2550.66,-279.8 2560.1,-278.59 2570.07,-276.92 2579.71,-275.1"/>
<polygon fill="black" stroke="black" points="2580.21,-278.57 2589.34,-273.21 2578.86,-271.7 2580.21,-278.57"/>
</g>
<!-- node20 -->
<g id="node11" class="node">
<title>node20</title>
<polygon fill="none" stroke="black" points="2606.81,-185.8 2533.89,-201.03 2238.66,-207.8 1943.43,-201.03 1870.52,-185.8 2074.82,-173.59 2402.5,-173.59 2606.81,-185.8"/>
<text text-anchor="middle" x="2238.66" y="-185.53" font-family="Times,serif" font-size="12.00">&#45;Wl,&#45;&#45;no&#45;as&#45;needed,&quot;/home/rmontanana/Code/libtorch/lib/libtorch.so&quot; &#45;Wl,&#45;&#45;as&#45;needed</text>
</g>
<!-- node19&#45;&gt;node20 -->
<g id="edge17" class="edge">
<title>node19&#45;&gt;node20</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2583.63,-250.21C2572.76,-248.03 2561.34,-245.79 2550.66,-243.8 2482.14,-231.05 2404.92,-217.93 2344.44,-207.93"/>
<polygon fill="black" stroke="black" points="2345.28,-204.52 2334.84,-206.34 2344.14,-211.42 2345.28,-204.52"/>
</g>
<!-- node9 -->
<g id="node12" class="node">
<title>node9</title>
<polygon fill="none" stroke="black" points="2542.56,-123.37 2445.66,-135.8 2348.77,-123.37 2385.78,-103.24 2505.55,-103.24 2542.56,-123.37"/>
<text text-anchor="middle" x="2445.66" y="-113.53" font-family="Times,serif" font-size="12.00">torch_cpu_library</text>
</g>
<!-- node19&#45;&gt;node9 -->
<g id="edge18" class="edge">
<title>node19&#45;&gt;node9</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2635.72,-246.84C2636.4,-227.49 2634.61,-192.58 2615.66,-171.8 2601.13,-155.87 2551.93,-141.56 2510.18,-131.84"/>
<polygon fill="black" stroke="black" points="2511.2,-128.48 2500.67,-129.68 2509.65,-135.31 2511.2,-128.48"/>
</g>
<!-- node13 -->
<g id="node16" class="node">
<title>node13</title>
<polygon fill="none" stroke="black" points="3056.45,-195.37 2953.66,-207.8 2850.87,-195.37 2890.13,-175.24 3017.19,-175.24 3056.45,-195.37"/>
<text text-anchor="middle" x="2953.66" y="-185.53" font-family="Times,serif" font-size="12.00">torch_cuda_library</text>
</g>
<!-- node19&#45;&gt;node13 -->
<g id="edge22" class="edge">
<title>node19&#45;&gt;node13</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2685.21,-249.71C2741.11,-237.45 2831.21,-217.67 2891.42,-204.46"/>
<polygon fill="black" stroke="black" points="2891.8,-207.96 2900.82,-202.4 2890.3,-201.13 2891.8,-207.96"/>
</g>
<!-- node10 -->
<g id="node13" class="node">
<title>node10</title>
<polygon fill="none" stroke="black" points="2362.4,-27.9 2285.6,-43.12 1974.66,-49.9 1663.72,-43.12 1586.93,-27.9 1802.1,-15.68 2147.22,-15.68 2362.4,-27.9"/>
<text text-anchor="middle" x="1974.66" y="-27.63" font-family="Times,serif" font-size="12.00">&#45;Wl,&#45;&#45;no&#45;as&#45;needed,&quot;/home/rmontanana/Code/libtorch/lib/libtorch_cpu.so&quot; &#45;Wl,&#45;&#45;as&#45;needed</text>
</g>
<!-- node9&#45;&gt;node10 -->
<g id="edge19" class="edge">
<title>node9&#45;&gt;node10</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2381.16,-105.31C2301.63,-91.15 2165.65,-66.92 2073.05,-50.43"/>
<polygon fill="black" stroke="black" points="2073.93,-47.03 2063.48,-48.72 2072.71,-53.92 2073.93,-47.03"/>
</g>
<!-- node11 -->
<g id="node14" class="node">
<title>node11</title>
<polygon fill="none" stroke="black" points="2510.72,-37.46 2445.66,-49.9 2380.61,-37.46 2405.46,-17.34 2485.87,-17.34 2510.72,-37.46"/>
<text text-anchor="middle" x="2445.66" y="-27.63" font-family="Times,serif" font-size="12.00">caffe2::mkl</text>
</g>
<!-- node9&#45;&gt;node11 -->
<g id="edge20" class="edge">
<title>node9&#45;&gt;node11</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2445.66,-102.95C2445.66,-91.68 2445.66,-75.4 2445.66,-61.37"/>
<polygon fill="black" stroke="black" points="2449.16,-61.78 2445.66,-51.78 2442.16,-61.78 2449.16,-61.78"/>
</g>
<!-- node12 -->
<g id="node15" class="node">
<title>node12</title>
<polygon fill="none" stroke="black" points="2794.95,-41.76 2661.66,-63.8 2528.37,-41.76 2579.28,-6.09 2744.04,-6.09 2794.95,-41.76"/>
<text text-anchor="middle" x="2661.66" y="-34.75" font-family="Times,serif" font-size="12.00">dummy</text>
<text text-anchor="middle" x="2661.66" y="-20.5" font-family="Times,serif" font-size="12.00">(protobuf::libprotobuf)</text>
</g>
<!-- node9&#45;&gt;node12 -->
<g id="edge21" class="edge">
<title>node9&#45;&gt;node12</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2481.82,-102.76C2512.55,-90.82 2557.5,-73.36 2594.77,-58.89"/>
<polygon fill="black" stroke="black" points="2595.6,-62.32 2603.65,-55.44 2593.06,-55.79 2595.6,-62.32"/>
</g>
<!-- node13&#45;&gt;node9 -->
<g id="edge28" class="edge">
<title>node13&#45;&gt;node9</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2880.59,-179.79C2799.97,-169.71 2666.42,-152.57 2551.66,-135.8 2540.2,-134.13 2528.06,-132.27 2516.24,-130.41"/>
<polygon fill="black" stroke="black" points="2516.96,-126.98 2506.54,-128.86 2515.87,-133.89 2516.96,-126.98"/>
</g>
<!-- node14 -->
<g id="node17" class="node">
<title>node14</title>
<polygon fill="none" stroke="black" points="3346.69,-113.8 3268.85,-129.03 2953.66,-135.8 2638.48,-129.03 2560.63,-113.8 2778.75,-101.59 3128.58,-101.59 3346.69,-113.8"/>
<text text-anchor="middle" x="2953.66" y="-113.53" font-family="Times,serif" font-size="12.00">&#45;Wl,&#45;&#45;no&#45;as&#45;needed,&quot;/home/rmontanana/Code/libtorch/lib/libtorch_cuda.so&quot; &#45;Wl,&#45;&#45;as&#45;needed</text>
</g>
<!-- node13&#45;&gt;node14 -->
<g id="edge23" class="edge">
<title>node13&#45;&gt;node14</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2953.66,-174.97C2953.66,-167.13 2953.66,-157.01 2953.66,-147.53"/>
<polygon fill="black" stroke="black" points="2957.16,-147.59 2953.66,-137.59 2950.16,-147.59 2957.16,-147.59"/>
</g>
<!-- node15 -->
<g id="node18" class="node">
<title>node15</title>
<polygon fill="none" stroke="black" points="3514.74,-123.37 3439.66,-135.8 3364.58,-123.37 3393.26,-103.24 3486.06,-103.24 3514.74,-123.37"/>
<text text-anchor="middle" x="3439.66" y="-113.53" font-family="Times,serif" font-size="12.00">torch::cudart</text>
</g>
<!-- node13&#45;&gt;node15 -->
<g id="edge24" class="edge">
<title>node13&#45;&gt;node15</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M3028.35,-180.51C3109.24,-171.17 3241.96,-154.78 3355.66,-135.8 3364.43,-134.34 3373.69,-132.63 3382.72,-130.88"/>
<polygon fill="black" stroke="black" points="3383.38,-134.31 3392.51,-128.93 3382.02,-127.45 3383.38,-134.31"/>
</g>
<!-- node17 -->
<g id="node20" class="node">
<title>node17</title>
<polygon fill="none" stroke="black" points="3716.84,-123.37 3624.66,-135.8 3532.48,-123.37 3567.69,-103.24 3681.63,-103.24 3716.84,-123.37"/>
<text text-anchor="middle" x="3624.66" y="-113.53" font-family="Times,serif" font-size="12.00">torch::nvtoolsext</text>
</g>
<!-- node13&#45;&gt;node17 -->
<g id="edge26" class="edge">
<title>node13&#45;&gt;node17</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M3033.64,-183.25C3144.1,-175.14 3349.47,-158.53 3523.66,-135.8 3534.84,-134.35 3546.67,-132.57 3558.15,-130.72"/>
<polygon fill="black" stroke="black" points="3558.68,-134.18 3567.98,-129.1 3557.54,-127.27 3558.68,-134.18"/>
</g>
<!-- node16 -->
<g id="node19" class="node">
<title>node16</title>
<polygon fill="none" stroke="black" points="3510.78,-27.9 3496.7,-43.12 3439.66,-49.9 3382.63,-43.12 3368.54,-27.9 3408.01,-15.68 3471.31,-15.68 3510.78,-27.9"/>
<text text-anchor="middle" x="3439.66" y="-27.63" font-family="Times,serif" font-size="12.00">CUDA::cudart</text>
</g>
<!-- node15&#45;&gt;node16 -->
<g id="edge25" class="edge">
<title>node15&#45;&gt;node16</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M3439.66,-102.95C3439.66,-91.68 3439.66,-75.4 3439.66,-61.37"/>
<polygon fill="black" stroke="black" points="3443.16,-61.78 3439.66,-51.78 3436.16,-61.78 3443.16,-61.78"/>
</g>
<!-- node18 -->
<g id="node21" class="node">
<title>node18</title>
<polygon fill="none" stroke="black" points="3714.32,-27.9 3696.56,-43.12 3624.66,-49.9 3552.77,-43.12 3535.01,-27.9 3584.76,-15.68 3664.56,-15.68 3714.32,-27.9"/>
<text text-anchor="middle" x="3624.66" y="-27.63" font-family="Times,serif" font-size="12.00">CUDA::nvToolsExt</text>
</g>
<!-- node17&#45;&gt;node18 -->
<g id="edge27" class="edge">
<title>node17&#45;&gt;node18</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M3624.66,-102.95C3624.66,-91.68 3624.66,-75.4 3624.66,-61.37"/>
<polygon fill="black" stroke="black" points="3628.16,-61.78 3624.66,-51.78 3621.16,-61.78 3628.16,-61.78"/>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 18 KiB

View File

@ -1,30 +0,0 @@
# BoostAODE Algorithm Operation
## Hyperparameters
The hyperparameters defined in the algorithm are:
- ***bisection*** (*boolean*): If set to true allows the algorithm to add *k* models at once (as specified in the algorithm) to the ensemble. Default value: *true*.
- ***bisection_best*** (*boolean*): If set to *true*, the algorithm will take as *priorAccuracy* the best accuracy computed. If set to *false⁺ it will take the last accuracy as *priorAccuracy*. Default value: *false*.
- ***order*** (*{"asc", "desc", "rand"}*): Sets the order (ascending/descending/random) in which dataset variables will be processed to choose the parents of the *SPODEs*. Default value: *"desc"*.
- ***block_update*** (*boolean*): Sets whether the algorithm will update the weights of the models in blocks. If set to false, the algorithm will update the weights of the models one by one. Default value: *false*.
- ***convergence*** (*boolean*): Sets whether the convergence of the result will be used as a termination condition. If this hyperparameter is set to true, the training dataset passed to the model is divided into two sets, one serving as training data and the other as a test set (so the original test partition will become a validation partition in this case). The partition is made by taking the first partition generated by a process of generating a 5 fold partition with stratification using a predetermined seed. The exit condition used in this *convergence* is that the difference between the accuracy obtained by the current model and that obtained by the previous model is greater than *1e-4*; otherwise, one will be added to the number of models that worsen the result (see next hyperparameter). Default value: *true*.
- ***maxTolerance*** (*int*): Sets the maximum number of models that can worsen the result without constituting a termination condition. if ***bisection*** is set to *true*, the value of this hyperparameter will be exponent of base 2 to compute the number of models to insert at once. Default value: *3*
- ***select_features*** (*{"IWSS", "FCBF", "CFS", ""}*): Selects the variable selection method to be used to build initial models for the ensemble that will be included without considering any of the other exit conditions. Once the models of the selected variables are built, the algorithm will update the weights using the ensemble and set the significance of all the models built with the same &alpha;<sub>t</sub>. Default value: *""*.
- ***threshold*** (*double*): Sets the necessary value for the IWSS and FCBF algorithms to function. Accepted values are:
- IWSS: $threshold \in [0, 0.5]$
- FCBF: $threshold \in [10^{-7}, 1]$
Default value is *-1* so every time any of those algorithms are called, the threshold has to be set to the desired value.
- ***predict_voting*** (*boolean*): Sets whether the algorithm will use *model voting* to predict the result. If set to false, the weighted average of the probabilities of each model's prediction will be used. Default value: *false*.
## Operation
### [Base Algorithm](./algorithm.md)

Binary file not shown.

Binary file not shown.

File diff suppressed because it is too large Load Diff

View File

@ -1,117 +0,0 @@
# Algorithm
- // notation
- $n$ features ${\cal{X}} = \{X_1, \dots, X_n\}$ and the class $Y$
- $m$ instances.
- $D = \{ (x_1^i, \dots, x_n^i, y^i) \}_{i=1}^{m}$
- $W$ a weights vector. $W_0$ are the initial weights.
- $D[W]$ dataset with weights $W$ for the instances.
1. // initialization
2. $W_0 \leftarrow (w_1, \dots, w_m) \leftarrow 1/m$
3. $W \leftarrow W_0$
4. $Vars \leftarrow {\cal{X}}$
5. $\delta \leftarrow 10^{-4}$
6. $convergence \leftarrow True$ // hyperparameter
7. $maxTolerancia \leftarrow 3$ // hyperparameter
8. $bisection \leftarrow False$ // hyperparameter
9. $finished \leftarrow False$
10. $AODE \leftarrow \emptyset$ // the ensemble
11. $tolerance \leftarrow 0$
12. $numModelsInPack \leftarrow 0$
13. $maxAccuracy \leftarrow -1$
14.
15. // main loop
16. While $(\lnot finished)$
1. $\pi \leftarrow SortFeatures(Vars, criterio, D[W])$
2. $k \leftarrow 2^{tolerance}$
3. if ($tolerance == 0$) $numItemsPack \leftarrow0$
4. $P \leftarrow Head(\pi,k)$ // first k features in order
5. $spodes \leftarrow \emptyset$
6. $i \leftarrow 0$
7. While ($i < size(P)$)
1. $X \leftarrow P[i]$
2. $i \leftarrow i + 1$
3. $numItemsPack \leftarrow numItemsPack + 1$
4. $Vars.remove(X)$
5. $spode \leftarrow BuildSpode(X, {\cal{X}}, D[W])$
6. $\hat{y}[] \leftarrow spode.Predict(D)$
7. $\epsilon \leftarrow error(\hat{y}[], y[])$
8. $\alpha \leftarrow \frac{1}{2} ln \left ( \frac{1-\epsilon}{\epsilon} \right )$
9. if ($\epsilon > 0.5$)
1. $finished \leftarrow True$
2. break
10. $spodes.add( (spode,\alpha_t) )$
11. $W \leftarrow UpdateWeights(W,\alpha,y[],\hat{y}[])$
8. $AODE.add( spodes )$
9. if ($convergence \land \lnot finished$)
1. $\hat{y}[] \leftarrow AODE.Predict(D)$
2. $actualAccuracy \leftarrow accuracy(\hat{y}[], y[])$
3. $if (maxAccuracy == -1)\; maxAccuracy \leftarrow actualAccuracy$
4. if $((accuracy - maxAccuracy) < \delta)$ // result doesn't
improve enough
1. $tolerance \leftarrow tolerance + 1$
5. else
1. $tolerance \leftarrow 0$
2. $numItemsPack \leftarrow 0$
10. If $(Vars == \emptyset \lor tolerance>maxTolerance) \; finished \leftarrow True$
11. $lastAccuracy \leftarrow max(lastAccuracy, actualAccuracy)$
17. if ($tolerance > maxTolerance$) // algorithm finished because of
lack of convergence
1. $removeModels(AODE, numItemsPack)$
18. Return $AODE$

View File

@ -1,80 +0,0 @@
\section{Algorithm}
\begin{itemize}
\item[] // notation
\item $n$ features ${\cal{X}} = \{X_1, \dots, X_n\}$ and the class $Y$
\item $m$ instances.
\item $D = \{ (x_1^i, \dots, x_n^i, y^i) \}_{i=1}^{m}$
\item $W$ a weights vector. $W_0$ are the initial weights.
\item $D[W]$ dataset with weights $W$ for the instances.
\end{itemize}
\bigskip
\begin{enumerate}
\item[] // initialization
\item $W_0 \leftarrow (w_1, \dots, w_m) \leftarrow 1/m$
\item $W \leftarrow W_0$
\item $Vars \leftarrow {\cal{X}}$
\item $\delta \leftarrow 10^{-4}$
\item $convergence \leftarrow True$ // hyperparameter
\item $maxTolerancia \leftarrow 3$ // hyperparameter
\item $bisection \leftarrow False$ // hyperparameter
\item $finished \leftarrow False$
\item $AODE \leftarrow \emptyset$ \hspace*{2cm} // the ensemble
\item $tolerance \leftarrow 0$
\item $numModelsInPack \leftarrow 0$
\item $maxAccuracy \leftarrow -1$
\item[]
\newpage
\item[] // main loop
\item While $(\lnot finished)$
\begin{enumerate}
\item $\pi \leftarrow SortFeatures(Vars, criterio, D[W])$
\item $k \leftarrow 2^{tolerance}$
\item if ($tolerance == 0$) $numItemsPack \leftarrow0$
\item $P \leftarrow Head(\pi,k)$ \hspace*{2cm} // first k features in order
\item $spodes \leftarrow \emptyset$
\item $i \leftarrow 0$
\item While ($ i < size(P)$)
\begin{enumerate}
\item $X \leftarrow P[i]$
\item $i \leftarrow i + 1$
\item $numItemsPack \leftarrow numItemsPack + 1$
\item $Vars.remove(X)$
\item $spode \leftarrow BuildSpode(X, {\cal{X}}, D[W])$
\item $\hat{y}[] \leftarrow spode.Predict(D)$
\item $\epsilon \leftarrow error(\hat{y}[], y[])$
\item $\alpha \leftarrow \frac{1}{2} ln \left ( \frac{1-\epsilon}{\epsilon} \right )$
\item if ($\epsilon > 0.5$)
\begin{enumerate}
\item $finished \leftarrow True$
\item break
\end{enumerate}
\item $spodes.add( (spode,\alpha_t) )$
\item $W \leftarrow UpdateWeights(W,\alpha,y[],\hat{y}[])$
\end{enumerate}
\item $AODE.add( spodes )$
\item if ($convergence \land \lnot finished$)
\begin{enumerate}
\item $\hat{y}[] \leftarrow AODE.Predict(D)$
\item $actualAccuracy \leftarrow accuracy(\hat{y}[], y[])$
\item $if (maxAccuracy == -1)\; maxAccuracy \leftarrow actualAccuracy$
\item if $((accuracy - maxAccuracy) < \delta)$\hspace*{2cm} // result doesn't improve enough
\begin{enumerate}
\item $tolerance \leftarrow tolerance + 1$
\end{enumerate}
\item else
\begin{enumerate}
\item $tolerance \leftarrow 0$
\item $numItemsPack \leftarrow 0$
\end{enumerate}
\end{enumerate}
\item If $(Vars == \emptyset \lor tolerance>maxTolerance) \; finished \leftarrow True$
\item $lastAccuracy \leftarrow max(lastAccuracy, actualAccuracy)$
\end{enumerate}
\item if ($tolerance > maxTolerance$) \hspace*{1cm} // algorithm finished because of lack of convergence
\begin{enumerate}
\item $removeModels(AODE, numItemsPack)$
\end{enumerate}
\item Return $AODE$
\end{enumerate}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

4
gcovr.cfg Normal file
View File

@ -0,0 +1,4 @@
filter = src/
exclude-directories = build/lib/
print-summary = yes
sort-percentage = yes

170
lib/Files/ArffFiles.cc Normal file
View File

@ -0,0 +1,170 @@
#include "ArffFiles.h"
#include <fstream>
#include <sstream>
#include <map>
#include <iostream>
using namespace std;
ArffFiles::ArffFiles() = default;
vector<string> ArffFiles::getLines() const
{
return lines;
}
unsigned long int ArffFiles::getSize() const
{
return lines.size();
}
vector<pair<string, string>> ArffFiles::getAttributes() const
{
return attributes;
}
string ArffFiles::getClassName() const
{
return className;
}
string ArffFiles::getClassType() const
{
return classType;
}
vector<vector<float>>& ArffFiles::getX()
{
return X;
}
vector<int>& ArffFiles::getY()
{
return y;
}
void ArffFiles::loadCommon(string fileName)
{
ifstream file(fileName);
if (!file.is_open()) {
throw invalid_argument("Unable to open file");
}
string line;
string keyword;
string attribute;
string type;
string type_w;
while (getline(file, line)) {
if (line.empty() || line[0] == '%' || line == "\r" || line == " ") {
continue;
}
if (line.find("@attribute") != string::npos || line.find("@ATTRIBUTE") != string::npos) {
stringstream ss(line);
ss >> keyword >> attribute;
type = "";
while (ss >> type_w)
type += type_w + " ";
attributes.emplace_back(trim(attribute), trim(type));
continue;
}
if (line[0] == '@') {
continue;
}
lines.push_back(line);
}
file.close();
if (attributes.empty())
throw invalid_argument("No attributes found");
}
void ArffFiles::load(const string& fileName, bool classLast)
{
int labelIndex;
loadCommon(fileName);
if (classLast) {
className = get<0>(attributes.back());
classType = get<1>(attributes.back());
attributes.pop_back();
labelIndex = static_cast<int>(attributes.size());
} else {
className = get<0>(attributes.front());
classType = get<1>(attributes.front());
attributes.erase(attributes.begin());
labelIndex = 0;
}
generateDataset(labelIndex);
}
void ArffFiles::load(const string& fileName, const string& name)
{
int labelIndex;
loadCommon(fileName);
bool found = false;
for (int i = 0; i < attributes.size(); ++i) {
if (attributes[i].first == name) {
className = get<0>(attributes[i]);
classType = get<1>(attributes[i]);
attributes.erase(attributes.begin() + i);
labelIndex = i;
found = true;
break;
}
}
if (!found) {
throw invalid_argument("Class name not found");
}
generateDataset(labelIndex);
}
void ArffFiles::generateDataset(int labelIndex)
{
X = vector<vector<float>>(attributes.size(), vector<float>(lines.size()));
auto yy = vector<string>(lines.size(), "");
auto removeLines = vector<int>(); // Lines with missing values
for (size_t i = 0; i < lines.size(); i++) {
stringstream ss(lines[i]);
string value;
int pos = 0;
int xIndex = 0;
while (getline(ss, value, ',')) {
if (pos++ == labelIndex) {
yy[i] = value;
} else {
if (value == "?") {
X[xIndex++][i] = -1;
removeLines.push_back(i);
} else
X[xIndex++][i] = stof(value);
}
}
}
for (auto i : removeLines) {
yy.erase(yy.begin() + i);
for (auto& x : X) {
x.erase(x.begin() + i);
}
}
y = factorize(yy);
}
string ArffFiles::trim(const string& source)
{
string s(source);
s.erase(0, s.find_first_not_of(" '\n\r\t"));
s.erase(s.find_last_not_of(" '\n\r\t") + 1);
return s;
}
vector<int> ArffFiles::factorize(const vector<string>& labels_t)
{
vector<int> yy;
yy.reserve(labels_t.size());
map<string, int> labelMap;
int i = 0;
for (const string& label : labels_t) {
if (labelMap.find(label) == labelMap.end()) {
labelMap[label] = i++;
}
yy.push_back(labelMap[label]);
}
return yy;
}

34
lib/Files/ArffFiles.h Normal file
View File

@ -0,0 +1,34 @@
#ifndef ARFFFILES_H
#define ARFFFILES_H
#include <string>
#include <vector>
using namespace std;
class ArffFiles {
private:
vector<string> lines;
vector<pair<string, string>> attributes;
string className;
string classType;
vector<vector<float>> X;
vector<int> y;
void generateDataset(int);
void loadCommon(string);
public:
ArffFiles();
void load(const string&, bool = true);
void load(const string&, const string&);
vector<string> getLines() const;
unsigned long int getSize() const;
string getClassName() const;
string getClassType() const;
static string trim(const string&);
vector<vector<float>>& getX();
vector<int>& getY();
vector<pair<string, string>> getAttributes() const;
static vector<int> factorize(const vector<string>& labels_t);
};
#endif

1
lib/Files/CMakeLists.txt Normal file
View File

@ -0,0 +1 @@
add_library(ArffFiles ArffFiles.cc)

1
lib/argparse Submodule

@ -0,0 +1 @@
Subproject commit b0930ab0288185815d6dc67af59de7014a6272f7

@ -1 +1 @@
Subproject commit 029fe3b4609dd84cd939b73357f37bbb75bcf82f
Subproject commit 9c541ca72e7857dec71d8a41b97e42c2f1c92602

@ -1 +0,0 @@
Subproject commit 2ac43e32ac1eac0c986702ec526cf5367a565ef0

@ -1 +1 @@
Subproject commit 378e091795a70fced276cd882bd8a6a428668fe5
Subproject commit 5d2754306d67d1e654a1a34e1d2e74439a9d53b3

1
lib/libxlsxwriter Submodule

@ -0,0 +1 @@
Subproject commit 29355a0887475488c7cc470ad43cc867fcfa92e2

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -1 +1 @@
Subproject commit 7d62d6af4a6ca944a3bbde0b61f651fd4b2d3f57
Subproject commit 5708dc3de944fc22d61a2dd071b63aa338e04db3

BIN
logo.png

Binary file not shown.

Before

Width:  |  Height:  |  Size: 543 KiB

33
mac_mst.txt Normal file
View File

@ -0,0 +1,33 @@
Weights matrix:
0.0000000, 0.0384968, 0.0795434, 0.1546867, -0.0000000, 0.1788104, 0.2214721, 0.0323837, 0.0366549,
0.0384968, 0.0000000, 0.0200662, 0.0200937, -0.0000000, 0.0637224, 0.0183005, 0.0127657, 0.0136054,
0.0795434, 0.0200662, 0.0000000, 0.0605489, -0.0000000, 0.0894469, 0.1689408, 0.0321602, 0.0223184,
0.1546867, 0.0200937, 0.0605489, 0.0000000, -0.0000000, 0.1150757, 0.1332292, 0.0422865, 0.0191138,
-0.0000000, -0.0000000, -0.0000000, -0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.1788104, 0.0637224, 0.0894469, 0.1150757, 0.0000000, 0.0000000, 0.1407102, 0.0406590, 0.0366986,
0.2214721, 0.0183005, 0.1689408, 0.1332292, 0.0000000, 0.1407102, 0.0000000, 0.0427515, 0.0349965,
0.0323837, 0.0127657, 0.0321602, 0.0422865, 0.0000000, 0.0406590, 0.0427515, 0.0000000, 0.0343376,
0.0366549, 0.0136054, 0.0223184, 0.0191138, 0.0000000, 0.0366986, 0.0349965, 0.0343376, 0.0000000,
Edge : Weight
0 - 6 : 0.2214721
0 - 5 : 0.1788104
2 - 6 : 0.1689408
0 - 3 : 0.1546867
1 - 5 : 0.0637224
6 - 7 : 0.0427515
5 - 8 : 0.0366986
4 - 5 : 0.0000000
-------------------------------------------------------------------------------
Metrics Test
Test Maximum Spanning Tree
-------------------------------------------------------------------------------
/Users/rmontanana/Code/BayesNet/tests/TestBayesMetrics.cc:58
...............................................................................
/Users/rmontanana/Code/BayesNet/tests/TestBayesMetrics.cc:69: PASSED:
REQUIRE( result == resultsMST.at(file_name) )
with expansion:
(0, 6) (0, 5) (0, 3) (5, 1) (5, 8) (5, 4) (6, 2) (6, 7)
==
(0, 6) (0, 5) (0, 3) (5, 1) (5, 8) (5, 4) (6, 2) (6, 7)

View File

@ -1,25 +1,8 @@
cmake_minimum_required(VERSION 3.20)
project(bayesnet_sample)
set(CMAKE_CXX_STANDARD 17)
find_package(Torch REQUIRED)
find_library(BayesNet NAMES libBayesNet BayesNet libBayesNet.a REQUIRED)
find_path(Bayesnet_INCLUDE_DIRS REQUIRED NAMES bayesnet)
find_library(FImdlp NAMES libfimdlp.a PATHS REQUIRED)
message(STATUS "FImdlp=${FImdlp}")
message(STATUS "FImdlp_INCLUDE_DIRS=${FImdlp_INCLUDE_DIRS}")
message(STATUS "BayesNet=${BayesNet}")
message(STATUS "Bayesnet_INCLUDE_DIRS=${Bayesnet_INCLUDE_DIRS}")
include_directories(
../tests/lib/Files
lib/json/include
/usr/local/include
${FImdlp_INCLUDE_DIRS}
)
add_executable(bayesnet_sample sample.cc)
target_link_libraries(bayesnet_sample fimdlp "${TORCH_LIBRARIES}" "${BayesNet}")
include_directories(${BayesNet_SOURCE_DIR}/src/Platform)
include_directories(${BayesNet_SOURCE_DIR}/src/BayesNet)
include_directories(${BayesNet_SOURCE_DIR}/lib/Files)
include_directories(${BayesNet_SOURCE_DIR}/lib/mdlp)
include_directories(${BayesNet_SOURCE_DIR}/lib/argparse/include)
include_directories(${BayesNet_SOURCE_DIR}/lib/json/include)
add_executable(BayesNetSample sample.cc ${BayesNet_SOURCE_DIR}/src/Platform/Folding.cc ${BayesNet_SOURCE_DIR}/src/Platform/Models.cc)
target_link_libraries(BayesNetSample BayesNet ArffFiles mdlp "${TORCH_LIBRARIES}")

Some files were not shown because too many files have changed in this diff Show More