Compare commits
88 Commits
cuda
...
3cb454d4aa
Author | SHA1 | Date | |
---|---|---|---|
3cb454d4aa
|
|||
3178bcbda9
|
|||
32d231cdaf
|
|||
526d036d75
|
|||
7a9d4178d9
|
|||
3e94d400e2 | |||
31fa9cd498
|
|||
676637fb1b | |||
9f3de4d924
|
|||
dafd5672bc
|
|||
70545ee0ad
|
|||
7917a7598b
|
|||
bb547a3347
|
|||
23d74c4643
|
|||
fcccbc15dd
|
|||
c68b75fcc1
|
|||
ab86dae90d
|
|||
ad72bb355b
|
|||
da357ac5ba
|
|||
833455803e
|
|||
74a9d29dc1 | |||
3615a1463c
|
|||
36ce6effe9
|
|||
250036f224
|
|||
b11620bbe8
|
|||
8a02a3a5cb
|
|||
7f6f49b3d0
|
|||
5f95117dd4 | |||
2f5bc10b8e
|
|||
257f519641
|
|||
5c5ecef3cf
|
|||
d0ebe596f6
|
|||
670b93d0a1
|
|||
306d3a4b55
|
|||
bf08b0de89
|
|||
b976db53c6
|
|||
be39d2dedb
|
|||
4ca770d16b
|
|||
6bf3b939bc
|
|||
7076efc2a1 | |||
9ee388561f
|
|||
70c7d3dd3d
|
|||
400967b4e3
|
|||
c234308701
|
|||
4ded6f51eb
|
|||
b1d317d8f4
|
|||
7876d1a370
|
|||
3bdb14bd65
|
|||
71b05cc1a7
|
|||
a59689272d
|
|||
3d8be79b37
|
|||
619276a5ea
|
|||
e681099360
|
|||
5919fbfd34
|
|||
a26522e62f
|
|||
86cccb6c7b
|
|||
d1b235261e
|
|||
7a8e0391dc
|
|||
6cfbc482d8
|
|||
ca54f799ee
|
|||
06621ea361
|
|||
a70ac3e883
|
|||
b987dcbcc4
|
|||
81fd7df7f0
|
|||
dd98cf159d
|
|||
f658149977
|
|||
fb957ac3fe
|
|||
b90e558238
|
|||
64970cf7f7 | |||
b571a4da4d
|
|||
8a9f329ff9
|
|||
e2781ee525
|
|||
56a2d3ead0
|
|||
dc32a0fc47
|
|||
3d6b4f0614
|
|||
18844c7da7
|
|||
43ceefd2c9
|
|||
e6501502d1
|
|||
d84adf6172
|
|||
268a86cbe0
|
|||
fc4c93b299
|
|||
86f2bc44fc | |||
f0f3d9ad6e
|
|||
9a323cd7a3
|
|||
cb949ac7e5
|
|||
2c297ea15d
|
|||
4e4b6e67f4
|
|||
82847774ee
|
10
.clang-format
Normal file
10
.clang-format
Normal file
@@ -0,0 +1,10 @@
|
||||
# .clang-format
|
||||
---
|
||||
BasedOnStyle: LLVM
|
||||
AccessModifierOffset: -4
|
||||
BreakBeforeBraces: Linux
|
||||
ColumnLimit: 0
|
||||
FixNamespaceComments: false
|
||||
IndentWidth: 4
|
||||
NamespaceIndentation: All
|
||||
TabWidth: 4
|
@@ -1,4 +1,4 @@
|
||||
compilation_database_dir: build_debug
|
||||
compilation_database_dir: build_Debug
|
||||
output_directory: diagrams
|
||||
diagrams:
|
||||
BayesNet:
|
||||
|
@@ -1,6 +1,6 @@
|
||||
FROM mcr.microsoft.com/devcontainers/cpp:ubuntu22.04
|
||||
|
||||
ARG REINSTALL_CMAKE_VERSION_FROM_SOURCE="3.22.2"
|
||||
ARG REINSTALL_CMAKE_VERSION_FROM_SOURCE="3.29.3"
|
||||
|
||||
# Optionally install the cmake for vcpkg
|
||||
COPY ./reinstall-cmake.sh /tmp/
|
||||
@@ -23,7 +23,7 @@ RUN add-apt-repository ppa:ubuntu-toolchain-r/test
|
||||
RUN apt-get update
|
||||
|
||||
# Install GCC 13.1
|
||||
RUN apt-get install -y gcc-13 g++-13
|
||||
RUN apt-get install -y gcc-13 g++-13 doxygen
|
||||
|
||||
# Install lcov 2.1
|
||||
RUN wget --quiet https://github.com/linux-test-project/lcov/releases/download/v2.1/lcov-2.1.tar.gz && \
|
||||
|
3
.gitignore
vendored
3
.gitignore
vendored
@@ -44,4 +44,5 @@ docs/manual
|
||||
docs/man3
|
||||
docs/man
|
||||
docs/Doxyfile
|
||||
|
||||
.cache
|
||||
vcpkg_installed
|
23
.gitmodules
vendored
23
.gitmodules
vendored
@@ -1,23 +0,0 @@
|
||||
[submodule "lib/mdlp"]
|
||||
path = lib/mdlp
|
||||
url = https://github.com/rmontanana/mdlp
|
||||
main = main
|
||||
update = merge
|
||||
[submodule "lib/json"]
|
||||
path = lib/json
|
||||
url = https://github.com/nlohmann/json.git
|
||||
master = master
|
||||
update = merge
|
||||
[submodule "lib/folding"]
|
||||
path = lib/folding
|
||||
url = https://github.com/rmontanana/folding
|
||||
main = main
|
||||
update = merge
|
||||
[submodule "tests/lib/catch2"]
|
||||
path = tests/lib/catch2
|
||||
url = https://github.com/catchorg/Catch2.git
|
||||
main = main
|
||||
update = merge
|
||||
[submodule "tests/lib/Files"]
|
||||
path = tests/lib/Files
|
||||
url = https://github.com/rmontanana/ArffFiles
|
4
.vscode/launch.json
vendored
4
.vscode/launch.json
vendored
@@ -5,7 +5,7 @@
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"name": "sample",
|
||||
"program": "${workspaceFolder}/build_release/sample/bayesnet_sample",
|
||||
"program": "${workspaceFolder}/sample/build/bayesnet_sample",
|
||||
"args": [
|
||||
"${workspaceFolder}/tests/data/glass.arff"
|
||||
]
|
||||
@@ -16,7 +16,7 @@
|
||||
"name": "test",
|
||||
"program": "${workspaceFolder}/build_Debug/tests/TestBayesNet",
|
||||
"args": [
|
||||
"[Network]"
|
||||
"[XBAODE]"
|
||||
],
|
||||
"cwd": "${workspaceFolder}/build_Debug/tests"
|
||||
},
|
||||
|
80
CHANGELOG.md
80
CHANGELOG.md
@@ -7,6 +7,71 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [1.2.0] - 2025-06-30
|
||||
|
||||
### Internal
|
||||
|
||||
- Add docs generation to CMakeLists.txt.
|
||||
- Add new hyperparameters to the Ld classifiers:
|
||||
- *ld_algorithm*: algorithm to use for local discretization, with the following options: "MDLP", "BINQ", "BINU".
|
||||
- *ld_proposed_cuts*: number of cut points to return.
|
||||
- *mdlp_min_length*: minimum length of a partition in MDLP algorithm to be evaluated for partition.
|
||||
- *mdlp_max_depth*: maximum level of recursion in MDLP algorithm.
|
||||
- Remove vcpkg as a dependency manager, now the library is built with Conan package manager and CMake.
|
||||
|
||||
## [1.1.1] - 2025-05-20
|
||||
|
||||
### Internal
|
||||
|
||||
- Fix CFS metric expression in the FeatureSelection class.
|
||||
- Fix the vcpkg configuration in building the library.
|
||||
- Fix the sample app to use the vcpkg configuration.
|
||||
- Refactor the computeCPT method in the Node class with libtorch vectorized operations.
|
||||
- Refactor the sample to use local discretization models.
|
||||
|
||||
### Added
|
||||
|
||||
- Add predict_proba method to all Ld classifiers.
|
||||
- Add L1FS feature selection methods to the FeatureSelection class.
|
||||
|
||||
## [1.1.0] - 2025-04-27
|
||||
|
||||
### Internal
|
||||
|
||||
- Add changes to .clang-format to adjust to vscode format style thanks to <https://clang-format-configurator.site/>
|
||||
- Remove all the dependencies as git submodules and add them as vcpkg dependencies.
|
||||
- Fix the dependencies versions for this specific BayesNet version.
|
||||
|
||||
## [1.0.7] 2025-03-16
|
||||
|
||||
### Added
|
||||
|
||||
- A new hyperparameter to the BoostAODE class, *alphablock*, to control the way α is computed, with the last model or with the ensmble built so far. Default value is *false*.
|
||||
- A new hyperparameter to the SPODE class, *parent*, to set the root node of the model. If no value is set the root parameter of the constructor is used.
|
||||
- A new hyperparameter to the TAN class, *parent*, to set the root node of the model. If not set the first feature is used as root.
|
||||
- A new model named XSPODE, an optimized for speed averaged one dependence estimator.
|
||||
- A new model named XSP2DE, an optimized for speed averaged two dependence estimator.
|
||||
- A new model named XBAODE, an optimized for speed BoostAODE model.
|
||||
- A new model named XBA2DE, an optimized for speed BoostA2DE model.
|
||||
|
||||
### Internal
|
||||
|
||||
- Optimize ComputeCPT method in the Node class.
|
||||
- Add methods getCount and getMaxCount to the CountingSemaphore class, returning the current count and the maximum count of threads respectively.
|
||||
|
||||
### Changed
|
||||
|
||||
- Hyperparameter *maxTolerance* in the BoostAODE class is now in [1, 6] range (it was in [1, 4] range before).
|
||||
|
||||
## [1.0.6] 2024-11-23
|
||||
|
||||
### Fixed
|
||||
|
||||
- Prevent existing edges to be added to the network in the `add_edge` method.
|
||||
- Don't allow to add nodes or edges on already fiited networks.
|
||||
- Number of threads spawned
|
||||
- Network class tests
|
||||
|
||||
### Added
|
||||
|
||||
- Library logo generated with <https://openart.ai> to README.md
|
||||
@@ -14,15 +79,21 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
- *convergence_best* hyperparameter to the BoostAODE class, to control the way the prior accuracy is computed if convergence is set. Default value is *false*.
|
||||
- SPnDE model.
|
||||
- A2DE model.
|
||||
- BoostA2DE model.
|
||||
- A2DE & SPnDE tests.
|
||||
- Add tests to reach 99% of coverage.
|
||||
- Add tests to check the correct version of the mdlp, folding and json libraries.
|
||||
- Library documentation generated with Doxygen.
|
||||
- Link to documentation in the README.md.
|
||||
- Three types of smoothing the Bayesian Network OLD_LAPLACE, LAPLACE and CESTNIK.
|
||||
- Three types of smoothing the Bayesian Network ORIGINAL, LAPLACE and CESTNIK.
|
||||
|
||||
### Internal
|
||||
|
||||
- Fixed doxygen optional dependency
|
||||
- Add env parallel variable to Makefile
|
||||
- Add CountingSemaphore class to manage the number of threads spawned.
|
||||
- Ignore CUDA language in CMake CodeCoverage module.
|
||||
- Update mdlp library as a git submodule.
|
||||
- Create library ShuffleArffFile to limit the number of samples with a parameter and shuffle them.
|
||||
- Refactor catch2 library location to test/lib
|
||||
- Refactor loadDataset function in tests.
|
||||
@@ -33,6 +104,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
- Add a Makefile target (doc) to generate the documentation.
|
||||
- Add a Makefile target (doc-install) to install the documentation.
|
||||
|
||||
### Libraries versions
|
||||
|
||||
- mdlp: 2.0.1
|
||||
- Folding: 1.1.0
|
||||
- json: 3.11
|
||||
- ArffFiles: 1.1.0
|
||||
|
||||
## [1.0.5] 2024-04-20
|
||||
|
||||
### Added
|
||||
|
176
CLAUDE.md
Normal file
176
CLAUDE.md
Normal file
@@ -0,0 +1,176 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Project Overview
|
||||
|
||||
BayesNet is a C++ library implementing Bayesian Network Classifiers. It provides various algorithms for machine learning classification including TAN, KDB, SPODE, SPnDE, AODE, A2DE, and their ensemble variants (Boost, XB). The library also includes local discretization variants (Ld) and feature selection algorithms.
|
||||
|
||||
## Build System & Dependencies
|
||||
|
||||
### Dependency Management
|
||||
The project supports **two package managers**:
|
||||
|
||||
#### vcpkg (Default)
|
||||
- Uses vcpkg with private registry at https://github.com/rmontanana/vcpkg-stash
|
||||
- Core dependencies: libtorch, nlohmann-json, folding, fimdlp, arff-files, catch2
|
||||
- All dependencies defined in `vcpkg.json` with version overrides
|
||||
|
||||
#### Conan (Alternative)
|
||||
- Modern C++ package manager with better dependency resolution
|
||||
- Configured via `conanfile.py` for packaging and distribution
|
||||
- Supports subset of dependencies (libtorch, nlohmann-json, catch2)
|
||||
- Custom dependencies (folding, fimdlp, arff-files) need custom Conan recipes
|
||||
|
||||
### Build Commands
|
||||
|
||||
#### Using vcpkg (Default)
|
||||
```bash
|
||||
# Initialize dependencies
|
||||
make init
|
||||
|
||||
# Build debug version (with tests and coverage)
|
||||
make debug
|
||||
make buildd
|
||||
|
||||
# Build release version
|
||||
make release
|
||||
make buildr
|
||||
|
||||
# Run tests
|
||||
make test
|
||||
|
||||
# Generate coverage report
|
||||
make coverage
|
||||
make viewcoverage
|
||||
|
||||
# Clean project
|
||||
make clean
|
||||
```
|
||||
|
||||
#### Using Conan
|
||||
```bash
|
||||
# Install Conan first: pip install conan
|
||||
|
||||
# Initialize dependencies
|
||||
make conan-init
|
||||
|
||||
# Build debug version (with tests and coverage)
|
||||
make conan-debug
|
||||
make buildd
|
||||
|
||||
# Build release version
|
||||
make conan-release
|
||||
make buildr
|
||||
|
||||
# Create and test Conan package
|
||||
make conan-create
|
||||
|
||||
# Upload to Conan remote
|
||||
make conan-upload remote=myremote
|
||||
|
||||
# Clean Conan cache and builds
|
||||
make conan-clean
|
||||
```
|
||||
|
||||
### CMake Configuration
|
||||
- Uses CMake 3.27+ with C++17 standard
|
||||
- Debug builds automatically enable testing and coverage
|
||||
- Release builds optimize with `-Ofast`
|
||||
- **Automatic package manager detection**: CMake detects whether Conan or vcpkg is being used
|
||||
- Supports both static library and package manager installation
|
||||
- Conditional dependency linking based on availability
|
||||
|
||||
## Testing Framework
|
||||
|
||||
- **Catch2** testing framework (version 3.8.1)
|
||||
- Test executable: `TestBayesNet` in `build_Debug/tests/`
|
||||
- Individual test categories can be run: `./TestBayesNet "[CategoryName]"`
|
||||
- Coverage reporting with lcov/genhtml
|
||||
|
||||
### Test Categories
|
||||
- A2DE, BoostA2DE, BoostAODE, XSPODE, XSPnDE, XBAODE, XBA2DE
|
||||
- Classifier, Ensemble, FeatureSelection, Metrics, Models
|
||||
- Network, Node, MST, Modules
|
||||
|
||||
## Code Architecture
|
||||
|
||||
### Core Structure
|
||||
```
|
||||
bayesnet/
|
||||
├── BaseClassifier.h # Abstract base for all classifiers
|
||||
├── classifiers/ # Basic Bayesian classifiers (TAN, KDB, SPODE, etc.)
|
||||
├── ensembles/ # Ensemble methods (AODE, A2DE, Boost variants)
|
||||
├── feature_selection/ # Feature selection algorithms (CFS, FCBF, IWSS, L1FS)
|
||||
├── network/ # Bayesian network structure (Network, Node)
|
||||
└── utils/ # Utilities (metrics, MST, tensor operations)
|
||||
```
|
||||
|
||||
### Key Design Patterns
|
||||
- **BaseClassifier** abstract interface for all algorithms
|
||||
- Template-based design with both std::vector and torch::Tensor support
|
||||
- Network/Node abstraction for Bayesian network representation
|
||||
- Feature selection as separate, composable modules
|
||||
|
||||
### Data Handling
|
||||
- Supports both discrete integer data and continuous data with discretization
|
||||
- ARFF file format support through arff-files library
|
||||
- Tensor operations via PyTorch C++ (libtorch)
|
||||
- Local discretization variants use fimdlp library
|
||||
|
||||
## Documentation & Tools
|
||||
|
||||
- **Doxygen** for API documentation: `make doc`
|
||||
- **lcov** for coverage reports: `make coverage`
|
||||
- **plantuml + clang-uml** for UML diagrams: `make diagrams`
|
||||
- Man pages available in `docs/man3/`
|
||||
|
||||
## Sample Applications
|
||||
|
||||
Sample code in `sample/` directory demonstrates library usage:
|
||||
```bash
|
||||
make sample fname=tests/data/iris.arff model=TANLd
|
||||
```
|
||||
|
||||
## Package Distribution
|
||||
|
||||
### Creating Conan Packages
|
||||
```bash
|
||||
# Create package locally
|
||||
make conan-create
|
||||
|
||||
# Test package installation
|
||||
cd test_package
|
||||
conan create ..
|
||||
|
||||
# Upload to remote repository
|
||||
make conan-upload remote=myremote profile=myprofile
|
||||
```
|
||||
|
||||
### Using the Library
|
||||
With Conan:
|
||||
```python
|
||||
# conanfile.txt or conanfile.py
|
||||
[requires]
|
||||
bayesnet/1.1.2@user/channel
|
||||
|
||||
[generators]
|
||||
cmake
|
||||
```
|
||||
|
||||
With vcpkg:
|
||||
```json
|
||||
{
|
||||
"dependencies": ["bayesnet"]
|
||||
}
|
||||
```
|
||||
|
||||
## Common Development Tasks
|
||||
|
||||
- **Add new classifier**: Extend BaseClassifier, implement in appropriate subdirectory
|
||||
- **Add new test**: Update `tests/CMakeLists.txt` and create test in `tests/`
|
||||
- **Modify build**: Edit main `CMakeLists.txt` or use Makefile targets
|
||||
- **Update dependencies**:
|
||||
- vcpkg: Modify `vcpkg.json` and run `make init`
|
||||
- Conan: Modify `conanfile.py` and run `make conan-init`
|
||||
- **Package for distribution**: Use `make conan-create` for Conan packaging
|
143
CMakeLists.txt
143
CMakeLists.txt
@@ -1,21 +1,14 @@
|
||||
cmake_minimum_required(VERSION 3.20)
|
||||
cmake_minimum_required(VERSION 3.27)
|
||||
|
||||
project(BayesNet
|
||||
VERSION 1.0.6
|
||||
project(bayesnet
|
||||
VERSION 1.2.0
|
||||
DESCRIPTION "Bayesian Network and basic classifiers Library."
|
||||
HOMEPAGE_URL "https://github.com/rmontanana/bayesnet"
|
||||
LANGUAGES CXX
|
||||
)
|
||||
|
||||
if (CODE_COVERAGE AND NOT ENABLE_TESTING)
|
||||
MESSAGE(FATAL_ERROR "Code coverage requires testing enabled")
|
||||
endif (CODE_COVERAGE AND NOT ENABLE_TESTING)
|
||||
|
||||
find_package(Torch REQUIRED)
|
||||
|
||||
if (POLICY CMP0135)
|
||||
cmake_policy(SET CMP0135 NEW)
|
||||
endif ()
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
cmake_policy(SET CMP0135 NEW)
|
||||
|
||||
# Global CMake variables
|
||||
# ----------------------
|
||||
@@ -33,70 +26,112 @@ endif()
|
||||
|
||||
# Options
|
||||
# -------
|
||||
option(ENABLE_CLANG_TIDY "Enable to add clang tidy." OFF)
|
||||
option(ENABLE_TESTING "Unit testing build" OFF)
|
||||
option(CODE_COVERAGE "Collect coverage from test library" OFF)
|
||||
option(INSTALL_GTEST "Enable installation of googletest." OFF)
|
||||
|
||||
# CMakes modules
|
||||
# --------------
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules ${CMAKE_MODULE_PATH})
|
||||
include(AddGitSubmodule)
|
||||
|
||||
find_package(Torch CONFIG REQUIRED)
|
||||
if(NOT TARGET torch::torch)
|
||||
add_library(torch::torch INTERFACE IMPORTED GLOBAL)
|
||||
# expose include paths and libraries that the find-module discovered
|
||||
set_target_properties(torch::torch PROPERTIES
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${TORCH_INCLUDE_DIRS}"
|
||||
INTERFACE_LINK_LIBRARIES "${TORCH_LIBRARIES}")
|
||||
endif()
|
||||
|
||||
find_package(fimdlp CONFIG REQUIRED)
|
||||
find_package(folding CONFIG REQUIRED)
|
||||
find_package(nlohmann_json REQUIRED)
|
||||
|
||||
add_subdirectory(config)
|
||||
|
||||
# Add the library
|
||||
# ---------------
|
||||
include_directories(
|
||||
${bayesnet_SOURCE_DIR}
|
||||
${CMAKE_BINARY_DIR}/configured_files/include
|
||||
)
|
||||
|
||||
file(GLOB_RECURSE Sources "bayesnet/*.cc")
|
||||
|
||||
add_library(bayesnet ${Sources})
|
||||
|
||||
target_link_libraries(bayesnet
|
||||
nlohmann_json::nlohmann_json
|
||||
folding::folding
|
||||
fimdlp::fimdlp
|
||||
torch::torch
|
||||
arff-files::arff-files
|
||||
)
|
||||
|
||||
# Testing
|
||||
# -------
|
||||
if (CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
MESSAGE("Debug mode")
|
||||
set(ENABLE_TESTING ON)
|
||||
set(CODE_COVERAGE ON)
|
||||
endif (CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
|
||||
|
||||
if (CODE_COVERAGE)
|
||||
enable_testing()
|
||||
include(CodeCoverage)
|
||||
MESSAGE("Code coverage enabled")
|
||||
SET(GCC_COVERAGE_LINK_FLAGS " ${GCC_COVERAGE_LINK_FLAGS} -lgcov --coverage")
|
||||
endif (CODE_COVERAGE)
|
||||
|
||||
if (ENABLE_CLANG_TIDY)
|
||||
include(StaticAnalyzers) # clang-tidy
|
||||
endif (ENABLE_CLANG_TIDY)
|
||||
|
||||
# External libraries - dependencies of BayesNet
|
||||
# ---------------------------------------------
|
||||
# include(FetchContent)
|
||||
add_git_submodule("lib/json")
|
||||
add_git_submodule("lib/mdlp")
|
||||
|
||||
# Subdirectories
|
||||
# --------------
|
||||
add_subdirectory(config)
|
||||
add_subdirectory(bayesnet)
|
||||
|
||||
# Testing
|
||||
# -------
|
||||
if (ENABLE_TESTING)
|
||||
MESSAGE("Testing enabled")
|
||||
add_subdirectory(tests/lib/catch2)
|
||||
MESSAGE(STATUS "Testing enabled")
|
||||
find_package(Catch2 CONFIG REQUIRED)
|
||||
find_package(arff-files CONFIG REQUIRED)
|
||||
enable_testing()
|
||||
include(CTest)
|
||||
add_subdirectory(tests)
|
||||
else(ENABLE_TESTING)
|
||||
message("Release mode")
|
||||
endif (ENABLE_TESTING)
|
||||
|
||||
# Installation
|
||||
# ------------
|
||||
install(TARGETS BayesNet
|
||||
include(CMakePackageConfigHelpers)
|
||||
write_basic_package_version_file(
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/bayesnetConfigVersion.cmake"
|
||||
VERSION ${PROJECT_VERSION}
|
||||
COMPATIBILITY AnyNewerVersion
|
||||
)
|
||||
|
||||
configure_package_config_file(
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/bayesnetConfig.cmake.in
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/bayesnetConfig.cmake"
|
||||
INSTALL_DESTINATION share/bayesnet)
|
||||
|
||||
install(TARGETS bayesnet
|
||||
EXPORT bayesnetTargets
|
||||
ARCHIVE DESTINATION lib
|
||||
LIBRARY DESTINATION lib
|
||||
CONFIGURATIONS Release)
|
||||
install(DIRECTORY bayesnet/ DESTINATION include/bayesnet FILES_MATCHING CONFIGURATIONS Release PATTERN "*.h")
|
||||
install(FILES ${CMAKE_BINARY_DIR}/configured_files/include/bayesnet/config.h DESTINATION include/bayesnet CONFIGURATIONS Release)
|
||||
|
||||
install(DIRECTORY bayesnet/
|
||||
DESTINATION include/bayesnet
|
||||
FILES_MATCHING
|
||||
CONFIGURATIONS Release
|
||||
PATTERN "*.h")
|
||||
install(FILES ${CMAKE_BINARY_DIR}/configured_files/include/bayesnet/config.h
|
||||
DESTINATION include/bayesnet
|
||||
CONFIGURATIONS Release)
|
||||
|
||||
install(EXPORT bayesnetTargets
|
||||
FILE bayesnetTargets.cmake
|
||||
NAMESPACE bayesnet::
|
||||
DESTINATION share/bayesnet)
|
||||
|
||||
install(FILES
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/bayesnetConfig.cmake"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/bayesnetConfigVersion.cmake"
|
||||
DESTINATION share/bayesnet
|
||||
)
|
||||
# Documentation
|
||||
# -------------
|
||||
find_package(Doxygen)
|
||||
set(DOC_DIR ${CMAKE_CURRENT_SOURCE_DIR}/docs)
|
||||
set(doxyfile_in ${DOC_DIR}/Doxyfile.in)
|
||||
set(doxyfile ${DOC_DIR}/Doxyfile)
|
||||
configure_file(${doxyfile_in} ${doxyfile} @ONLY)
|
||||
doxygen_add_docs(doxygen
|
||||
if (Doxygen_FOUND)
|
||||
set(DOC_DIR ${CMAKE_CURRENT_SOURCE_DIR}/docs)
|
||||
set(doxyfile_in ${DOC_DIR}/Doxyfile.in)
|
||||
set(doxyfile ${DOC_DIR}/Doxyfile)
|
||||
configure_file(${doxyfile_in} ${doxyfile} @ONLY)
|
||||
doxygen_add_docs(doxygen
|
||||
WORKING_DIRECTORY ${DOC_DIR}
|
||||
CONFIG_FILE ${doxyfile})
|
||||
else (Doxygen_FOUND)
|
||||
MESSAGE("* Doxygen not found")
|
||||
endif (Doxygen_FOUND)
|
||||
|
12
CMakeUserPresets.json
Normal file
12
CMakeUserPresets.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"version": 4,
|
||||
"vendor": {
|
||||
"conan": {}
|
||||
},
|
||||
"include": [
|
||||
"build/Release/generators/CMakePresets.json",
|
||||
"build_Debug/build/Debug/generators/CMakePresets.json",
|
||||
"build_Debug/build/Release/generators/CMakePresets.json",
|
||||
"build_Release/build/Release/generators/CMakePresets.json"
|
||||
]
|
||||
}
|
87
CONAN_README.md
Normal file
87
CONAN_README.md
Normal file
@@ -0,0 +1,87 @@
|
||||
# Using BayesNet with Conan
|
||||
|
||||
This document explains how to use Conan as an alternative package manager for BayesNet.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
```bash
|
||||
pip install conan
|
||||
conan remote add Cimmeria https://conan.rmontanana.es/artifactory/api/conan/Cimmeria
|
||||
conan profile new default --detect
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
### As a Consumer
|
||||
|
||||
1. Create a `conanfile.txt` in your project:
|
||||
|
||||
```ini
|
||||
[requires]
|
||||
libtorch/2.7.0
|
||||
bayesnet/1.2.0
|
||||
|
||||
[generators]
|
||||
CMakeDeps
|
||||
CMakeToolchain
|
||||
|
||||
```
|
||||
|
||||
2. Install dependencies:
|
||||
|
||||
```bash
|
||||
conan install . --build=missing
|
||||
```
|
||||
|
||||
3. In your CMakeLists.txt:
|
||||
|
||||
```cmake
|
||||
find_package(bayesnet REQUIRED)
|
||||
target_link_libraries(your_target bayesnet::bayesnet)
|
||||
```
|
||||
|
||||
### Building BayesNet with Conan
|
||||
|
||||
```bash
|
||||
# Install dependencies
|
||||
make conan-init
|
||||
|
||||
# Build debug version
|
||||
make debug
|
||||
make buildd
|
||||
|
||||
# Build release version
|
||||
make release
|
||||
make buildr
|
||||
|
||||
# Create package
|
||||
make conan-create
|
||||
```
|
||||
|
||||
## Current Limitations
|
||||
|
||||
- Custom dependencies (folding, fimdlp, arff-files) are not available in ConanCenter
|
||||
- These need to be built as custom Conan packages or replaced with alternatives
|
||||
- The conanfile.py currently comments out these dependencies
|
||||
|
||||
## Creating Custom Dependency Packages
|
||||
|
||||
For the custom dependencies, you'll need to create Conan recipes:
|
||||
|
||||
1. **folding**: Cross-validation library
|
||||
2. **fimdlp**: Discretization library
|
||||
3. **arff-files**: ARFF file format parser
|
||||
|
||||
Contact the maintainer or create custom recipes for these packages.
|
||||
|
||||
## Package Distribution
|
||||
|
||||
Once custom dependencies are resolved:
|
||||
|
||||
```bash
|
||||
# Create and test package
|
||||
make conan-create
|
||||
|
||||
# Upload to your remote
|
||||
conan upload bayesnet/1.2.0 -r myremote
|
||||
```
|
155
Makefile
155
Makefile
@@ -1,11 +1,11 @@
|
||||
SHELL := /bin/bash
|
||||
.DEFAULT_GOAL := help
|
||||
.PHONY: viewcoverage coverage setup help install uninstall diagrams buildr buildd test clean debug release sample updatebadge doc doc-install
|
||||
.PHONY: viewcoverage coverage setup help install uninstall diagrams buildr buildd test clean updatebadge doc doc-install init clean-test conan-debug conan-release conan-create conan-upload conan-clean conan-sample
|
||||
|
||||
f_release = build_Release
|
||||
f_debug = build_Debug
|
||||
f_diagrams = diagrams
|
||||
app_targets = BayesNet
|
||||
app_targets = bayesnet
|
||||
test_targets = TestBayesNet
|
||||
clang-uml = clang-uml
|
||||
plantuml = plantuml
|
||||
@@ -31,7 +31,6 @@ define ClearTests
|
||||
fi ;
|
||||
endef
|
||||
|
||||
|
||||
setup: ## Install dependencies for tests and coverage
|
||||
@if [ "$(shell uname)" = "Darwin" ]; then \
|
||||
brew install gcovr; \
|
||||
@@ -43,30 +42,30 @@ setup: ## Install dependencies for tests and coverage
|
||||
fi
|
||||
@echo "* You should install plantuml & graphviz for the diagrams"
|
||||
|
||||
diagrams: ## Create an UML class diagram & depnendency of the project (diagrams/BayesNet.png)
|
||||
@which $(plantuml) || (echo ">>> Please install plantuml"; exit 1)
|
||||
@which $(dot) || (echo ">>> Please install graphviz"; exit 1)
|
||||
@which $(clang-uml) || (echo ">>> Please install clang-uml"; exit 1)
|
||||
@export PLANTUML_LIMIT_SIZE=16384
|
||||
@echo ">>> Creating UML class diagram of the project...";
|
||||
@$(clang-uml) -p
|
||||
@cd $(f_diagrams); \
|
||||
$(plantuml) -tsvg BayesNet.puml
|
||||
@echo ">>> Creating dependency graph diagram of the project...";
|
||||
$(MAKE) debug
|
||||
cd $(f_debug) && cmake .. --graphviz=dependency.dot
|
||||
@$(dot) -Tsvg $(f_debug)/dependency.dot.BayesNet -o $(f_diagrams)/dependency.svg
|
||||
clean: ## Clean the project
|
||||
@echo ">>> Cleaning the project..."
|
||||
@if test -f CMakeCache.txt ; then echo "- Deleting CMakeCache.txt"; rm -f CMakeCache.txt; fimake
|
||||
@for folder in $(f_release) $(f_debug) vpcpkg_installed install_test ; do \
|
||||
if test -d "$$folder" ; then \
|
||||
echo "- Deleting $$folder folder" ; \
|
||||
rm -rf "$$folder"; \
|
||||
fi; \
|
||||
done
|
||||
@$(MAKE) clean-test
|
||||
@echo ">>> Done";
|
||||
|
||||
# Build targets
|
||||
# =============
|
||||
|
||||
buildd: ## Build the debug targets
|
||||
cmake --build $(f_debug) -t $(app_targets) --parallel
|
||||
cmake --build $(f_debug) --config Debug -t $(app_targets) --parallel $(CMAKE_BUILD_PARALLEL_LEVEL)
|
||||
|
||||
buildr: ## Build the release targets
|
||||
cmake --build $(f_release) -t $(app_targets) --parallel
|
||||
cmake --build $(f_release) --config Release -t $(app_targets) --parallel $(CMAKE_BUILD_PARALLEL_LEVEL)
|
||||
|
||||
clean: ## Clean the tests info
|
||||
@echo ">>> Cleaning Debug BayesNet tests...";
|
||||
$(call ClearTests)
|
||||
@echo ">>> Done";
|
||||
|
||||
# Install targets
|
||||
# ===============
|
||||
|
||||
uninstall: ## Uninstall library
|
||||
@echo ">>> Uninstalling BayesNet...";
|
||||
@@ -79,33 +78,20 @@ install: ## Install library
|
||||
@cmake --install $(f_release) --prefix $(prefix)
|
||||
@echo ">>> Done";
|
||||
|
||||
debug: ## Build a debug version of the project
|
||||
@echo ">>> Building Debug BayesNet...";
|
||||
@if [ -d ./$(f_debug) ]; then rm -rf ./$(f_debug); fi
|
||||
@mkdir $(f_debug);
|
||||
@cmake -S . -B $(f_debug) -D CMAKE_BUILD_TYPE=Debug -D ENABLE_TESTING=ON -D CODE_COVERAGE=ON
|
||||
@echo ">>> Done";
|
||||
|
||||
release: ## Build a Release version of the project
|
||||
@echo ">>> Building Release BayesNet...";
|
||||
@if [ -d ./$(f_release) ]; then rm -rf ./$(f_release); fi
|
||||
@mkdir $(f_release);
|
||||
@cmake -S . -B $(f_release) -D CMAKE_BUILD_TYPE=Release
|
||||
@echo ">>> Done";
|
||||
# Test targets
|
||||
# ============
|
||||
|
||||
fname = "tests/data/iris.arff"
|
||||
sample: ## Build sample
|
||||
@echo ">>> Building Sample...";
|
||||
@if [ -d ./sample/build ]; then rm -rf ./sample/build; fi
|
||||
@cd sample && cmake -B build -S . && cmake --build build -t bayesnet_sample
|
||||
sample/build/bayesnet_sample $(fname)
|
||||
clean-test: ## Clean the tests info
|
||||
@echo ">>> Cleaning Debug BayesNet tests...";
|
||||
$(call ClearTests)
|
||||
@echo ">>> Done";
|
||||
|
||||
opt = ""
|
||||
test: ## Run tests (opt="-s") to verbose output the tests, (opt="-c='Test Maximum Spanning Tree'") to run only that section
|
||||
@echo ">>> Running BayesNet tests...";
|
||||
@$(MAKE) clean
|
||||
@cmake --build $(f_debug) -t $(test_targets) --parallel
|
||||
@$(MAKE) clean-test
|
||||
@cmake --build $(f_debug) -t $(test_targets) --parallel $(CMAKE_BUILD_PARALLEL_LEVEL)
|
||||
@for t in $(test_targets); do \
|
||||
echo ">>> Running $$t...";\
|
||||
if [ -f $(f_debug)/tests/$$t ]; then \
|
||||
@@ -125,6 +111,7 @@ coverage: ## Run tests and generate coverage report (build/index.html)
|
||||
$(lcov) --directory CMakeFiles --capture --demangle-cpp --ignore-errors source,source --output-file coverage.info >/dev/null 2>&1; \
|
||||
$(lcov) --remove coverage.info '/usr/*' --output-file coverage.info >/dev/null 2>&1; \
|
||||
$(lcov) --remove coverage.info 'lib/*' --output-file coverage.info >/dev/null 2>&1; \
|
||||
$(lcov) --remove coverage.info 'include/*' --output-file coverage.info >/dev/null 2>&1; \
|
||||
$(lcov) --remove coverage.info 'libtorch/*' --output-file coverage.info >/dev/null 2>&1; \
|
||||
$(lcov) --remove coverage.info 'tests/*' --output-file coverage.info >/dev/null 2>&1; \
|
||||
$(lcov) --remove coverage.info 'bayesnet/utils/loguru.*' --ignore-errors unused --output-file coverage.info >/dev/null 2>&1; \
|
||||
@@ -154,6 +141,9 @@ updatebadge: ## Update the coverage badge in README.md
|
||||
@env python update_coverage.py $(f_debug)/tests
|
||||
@echo ">>> Done";
|
||||
|
||||
# Documentation targets
|
||||
# =====================
|
||||
|
||||
doc: ## Generate documentation
|
||||
@echo ">>> Generating documentation..."
|
||||
@cmake --build $(f_release) -t doxygen
|
||||
@@ -168,11 +158,25 @@ doc: ## Generate documentation
|
||||
fi
|
||||
@echo ">>> Done";
|
||||
|
||||
diagrams: ## Create an UML class diagram & dependency of the project (diagrams/BayesNet.png)
|
||||
@which $(plantuml) || (echo ">>> Please install plantuml"; exit 1)
|
||||
@which $(dot) || (echo ">>> Please install graphviz"; exit 1)
|
||||
@which $(clang-uml) || (echo ">>> Please install clang-uml"; exit 1)
|
||||
@export PLANTUML_LIMIT_SIZE=16384
|
||||
@echo ">>> Creating UML class diagram of the project...";
|
||||
@$(clang-uml) -p
|
||||
@cd $(f_diagrams); \
|
||||
$(plantuml) -tsvg BayesNet.puml
|
||||
@echo ">>> Creating dependency graph diagram of the project...";
|
||||
$(MAKE) debug
|
||||
cd $(f_debug) && cmake .. --graphviz=dependency.dot
|
||||
@$(dot) -Tsvg $(f_debug)/dependency.dot.BayesNet -o $(f_diagrams)/dependency.svg
|
||||
|
||||
docdir = ""
|
||||
doc-install: ## Install documentation
|
||||
@echo ">>> Installing documentation..."
|
||||
@if [ "$(docdir)" = "" ]; then \
|
||||
echo "docdir parameter has to be set when calling doc-install"; \
|
||||
echo "docdir parameter has to be set when calling doc-install, i.e. docdir=../bayesnet_help"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@if [ ! -d $(docdir) ]; then \
|
||||
@@ -182,6 +186,71 @@ doc-install: ## Install documentation
|
||||
@sudo cp -rp $(mansrcdir) $(mandestdir)
|
||||
@echo ">>> Done";
|
||||
|
||||
# Conan package manager targets
|
||||
# =============================
|
||||
|
||||
debug: ## Build debug version using Conan
|
||||
@echo ">>> Building *Debug* BayesNet with Conan..."
|
||||
@rm -rf $(f_debug) # wipe previous tree
|
||||
@conan install . \
|
||||
-s build_type=Debug \
|
||||
--build=missing \
|
||||
-of $(f_debug) \
|
||||
--profile=debug
|
||||
@cmake -S . -B $(f_debug) \
|
||||
-DCMAKE_BUILD_TYPE=Debug \
|
||||
-DENABLE_TESTING=ON \
|
||||
-DCODE_COVERAGE=ON \
|
||||
-DCMAKE_TOOLCHAIN_FILE=$(f_debug)/build/Debug/generators/conan_toolchain.cmake
|
||||
@echo ">>> Done"
|
||||
|
||||
release: ## Build release version using Conan
|
||||
@echo ">>> Building Release BayesNet with Conan..."
|
||||
@conan install . \
|
||||
-s build_type=Release \
|
||||
--build=missing \
|
||||
-of $(f_debug) \
|
||||
--profile=release
|
||||
@if [ -d ./$(f_release) ]; then rm -rf ./$(f_release); fi
|
||||
@mkdir $(f_release)
|
||||
@conan install . -s build_type=Release --build=missing -of $(f_release)
|
||||
@cmake -S . -B $(f_release) -D CMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$(f_release)/build/Release/generators/conan_toolchain.cmake
|
||||
@echo ">>> Done"
|
||||
|
||||
conan-create: ## Create Conan package
|
||||
@echo ">>> Creating Conan package..."
|
||||
@conan create . --build=missing -tf "" --profile=release -tf ""
|
||||
@conan create . --build=missing -tf "" --profile=debug
|
||||
@echo ">>> Done"
|
||||
|
||||
profile ?= release
|
||||
remote ?= Cimmeria
|
||||
conan-upload: ## Upload package to Conan remote (profile=release remote=Cimmeria)
|
||||
@echo ">>> Uploading to Conan remote $(remote) with profile $(profile)..."
|
||||
@conan upload bayesnet/$(grep version conanfile.py | cut -d'"' -f2) -r $(remote) --confirm
|
||||
@echo ">>> Done"
|
||||
|
||||
conan-clean: ## Clean Conan cache and build folders
|
||||
@echo ">>> Cleaning Conan cache and build folders..."
|
||||
@conan remove "*" --confirm
|
||||
@if test -d "$(f_release)" ; then rm -rf "$(f_release)"; fi
|
||||
@if test -d "$(f_debug)" ; then rm -rf "$(f_debug)"; fi
|
||||
@echo ">>> Done"
|
||||
|
||||
fname = "tests/data/iris.arff"
|
||||
model = "TANLd"
|
||||
sample: ## Build sample with Conan
|
||||
@echo ">>> Building Sample with Conan...";
|
||||
@if [ -d ./sample/build ]; then rm -rf ./sample/build; fi
|
||||
@cd sample && conan install . --output-folder=build --build=missing
|
||||
@cd sample && cmake -B build -S . -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=build/conan_toolchain.cmake && \
|
||||
cmake --build build -t bayesnet_sample
|
||||
sample/build/bayesnet_sample $(fname) $(model)
|
||||
@echo ">>> Done";
|
||||
|
||||
# Help target
|
||||
# ===========
|
||||
|
||||
help: ## Show help message
|
||||
@IFS=$$'\n' ; \
|
||||
help_lines=(`fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##/:/'`); \
|
||||
|
118
README.md
118
README.md
@@ -2,63 +2,125 @@
|
||||
|
||||

|
||||
[](<https://opensource.org/licenses/MIT>)
|
||||

|
||||

|
||||
[](https://app.codacy.com/gh/Doctorado-ML/BayesNet/dashboard?utm_source=gh&utm_medium=referral&utm_content=&utm_campaign=Badge_grade)
|
||||
[](https://sonarcloud.io/summary/new_code?id=rmontanana_BayesNet)
|
||||
[](https://sonarcloud.io/summary/new_code?id=rmontanana_BayesNet)
|
||||

|
||||
[](html/index.html)
|
||||
[](https://deepwiki.com/Doctorado-ML/BayesNet)
|
||||

|
||||
[](https://gitea.rmontanana.es/rmontanana/BayesNet)
|
||||
[](https://doi.org/10.5281/zenodo.14210344)
|
||||
|
||||
Bayesian Network Classifiers using libtorch from scratch
|
||||
Bayesian Network Classifiers library
|
||||
|
||||
## Dependencies
|
||||
## Using the Library
|
||||
|
||||
The only external dependency is [libtorch](https://pytorch.org/cppdocs/installing.html) which can be installed with the following commands:
|
||||
### Using Conan Package Manager
|
||||
|
||||
```bash
|
||||
wget https://download.pytorch.org/libtorch/nightly/cpu/libtorch-shared-with-deps-latest.zip
|
||||
unzip libtorch-shared-with-deps-latest.zips
|
||||
You can use the library with the [Conan](https://conan.io/) package manager. In your project you need to add the following files:
|
||||
|
||||
#### conanfile.txt
|
||||
|
||||
```txt
|
||||
[requires]
|
||||
bayesnet/1.1.2
|
||||
|
||||
[generators]
|
||||
CMakeDeps
|
||||
CMakeToolchain
|
||||
```
|
||||
|
||||
## Setup
|
||||
#### CMakeLists.txt
|
||||
|
||||
Include the following lines in your `CMakeLists.txt` file:
|
||||
|
||||
```cmake
|
||||
find_package(bayesnet REQUIRED)
|
||||
|
||||
add_executable(myapp main.cpp)
|
||||
|
||||
target_link_libraries(myapp PRIVATE bayesnet::bayesnet)
|
||||
```
|
||||
|
||||
Then install the dependencies and build your project:
|
||||
|
||||
```bash
|
||||
conan install . --output-folder=build --build=missing
|
||||
cmake -B build -S . -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=build/conan_toolchain.cmake
|
||||
cmake --build build
|
||||
```
|
||||
|
||||
**Note: In the `sample` folder you can find a sample application that uses the library. You can use it as a reference to create your own application.**
|
||||
|
||||
## Building and Testing
|
||||
|
||||
The project uses [Conan](https://conan.io/) for dependency management and provides convenient Makefile targets for common tasks.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- [Conan](https://conan.io/) package manager (`pip install conan`)
|
||||
- CMake 3.27+
|
||||
- C++17 compatible compiler
|
||||
|
||||
### Getting the code
|
||||
|
||||
```bash
|
||||
git clone --recurse-submodules https://github.com/doctorado-ml/bayesnet
|
||||
git clone https://github.com/doctorado-ml/bayesnet
|
||||
cd bayesnet
|
||||
```
|
||||
|
||||
### Release
|
||||
### Build Commands
|
||||
|
||||
#### Release Build
|
||||
|
||||
```bash
|
||||
make release
|
||||
make buildr
|
||||
sudo make install
|
||||
make release # Configure release build with Conan
|
||||
make buildr # Build the release version
|
||||
```
|
||||
|
||||
### Debug & Tests
|
||||
#### Debug Build & Tests
|
||||
|
||||
```bash
|
||||
make debug
|
||||
make test
|
||||
make debug # Configure debug build with Conan
|
||||
make buildd # Build the debug version
|
||||
make test # Run the tests
|
||||
```
|
||||
|
||||
### Coverage
|
||||
#### Coverage Analysis
|
||||
|
||||
```bash
|
||||
make coverage
|
||||
make viewcoverage
|
||||
make coverage # Run tests with coverage analysis
|
||||
make viewcoverage # View coverage report in browser
|
||||
```
|
||||
|
||||
### Sample app
|
||||
#### Sample Application
|
||||
|
||||
After building and installing the release version, you can run the sample app with the following commands:
|
||||
Run the sample application with different datasets and models:
|
||||
|
||||
```bash
|
||||
make sample
|
||||
make sample fname=tests/data/glass.arff
|
||||
make sample # Run with default settings
|
||||
make sample fname=tests/data/glass.arff # Use glass dataset
|
||||
make sample fname=tests/data/iris.arff model=AODE # Use specific model
|
||||
```
|
||||
|
||||
### Available Makefile Targets
|
||||
|
||||
- `debug` - Configure debug build using Conan
|
||||
- `release` - Configure release build using Conan
|
||||
- `buildd` - Build debug targets
|
||||
- `buildr` - Build release targets
|
||||
- `test` - Run all tests (use `opt="-s"` for verbose output)
|
||||
- `coverage` - Generate test coverage report
|
||||
- `viewcoverage` - Open coverage report in browser
|
||||
- `sample` - Build and run sample application
|
||||
- `conan-create` - Create Conan package
|
||||
- `conan-upload` - Upload package to Conan remote
|
||||
- `conan-clean` - Clean Conan cache and build folders
|
||||
- `clean` - Clean all build artifacts
|
||||
- `doc` - Generate documentation
|
||||
- `diagrams` - Generate UML diagrams
|
||||
- `help` - Show all available targets
|
||||
|
||||
## Models
|
||||
|
||||
#### - TAN
|
||||
@@ -71,10 +133,16 @@ make sample fname=tests/data/glass.arff
|
||||
|
||||
#### - AODE
|
||||
|
||||
#### - A2DE
|
||||
|
||||
#### - [BoostAODE](docs/BoostAODE.md)
|
||||
|
||||
#### - XBAODE
|
||||
|
||||
#### - BoostA2DE
|
||||
|
||||
#### - XBA2DE
|
||||
|
||||
### With Local Discretization
|
||||
|
||||
#### - TANLd
|
||||
|
518
REVISION_TECNICA_BAYESNET.md
Normal file
518
REVISION_TECNICA_BAYESNET.md
Normal file
@@ -0,0 +1,518 @@
|
||||
# Revisión Técnica de BayesNet - Informe Completo
|
||||
|
||||
## Resumen Ejecutivo
|
||||
|
||||
Como desarrollador experto en C++, he realizado una revisión técnica exhaustiva de la biblioteca BayesNet, evaluando su arquitectura, calidad de código, rendimiento y mantenibilidad. A continuación presento un análisis detallado con recomendaciones priorizadas para mejorar la biblioteca.
|
||||
|
||||
## 1. Fortalezas Identificadas
|
||||
|
||||
### 1.1 Arquitectura y Diseño
|
||||
- **✅ Diseño orientado a objetos bien estructurado** con jerarquía clara de clases
|
||||
- **✅ Uso adecuado de smart pointers** (std::unique_ptr) en la mayoría del código
|
||||
- **✅ Abstracción coherente** a través de BaseClassifier
|
||||
- **✅ Separación clara de responsabilidades** entre módulos
|
||||
- **✅ Documentación API con Doxygen** completa y actualizada
|
||||
|
||||
### 1.2 Gestión de Dependencias y Build
|
||||
- **✅ Sistema vcpkg** bien configurado para gestión de dependencias
|
||||
- **✅ CMake moderno** (3.27+) con configuración robusta
|
||||
- **✅ Separación Debug/Release** con optimizaciones apropiadas
|
||||
- **✅ Sistema de testing integrado** con Catch2
|
||||
|
||||
### 1.3 Testing y Cobertura
|
||||
- **✅ 17 archivos de test** cubriendo los componentes principales
|
||||
- **✅ Tests parametrizados** con múltiples datasets
|
||||
- **✅ Integración con lcov** para reportes de cobertura
|
||||
- **✅ Tests automáticos** en el proceso de build
|
||||
|
||||
## 2. Debilidades y Problemas Críticos
|
||||
|
||||
### 2.1 Problemas de Gestión de Memoria
|
||||
|
||||
#### **🔴 CRÍTICO: Memory Leak Potencial**
|
||||
**Archivo:** `/bayesnet/ensembles/Boost.cc` (líneas 124-141)
|
||||
```cpp
|
||||
// PROBLEMA: Raw pointer sin RAII
|
||||
FeatureSelect* featureSelector = nullptr;
|
||||
if (select_features_algorithm == SelectFeatures.CFS) {
|
||||
featureSelector = new CFS(...); // ❌ Riesgo de leak
|
||||
}
|
||||
// ...
|
||||
delete featureSelector; // ❌ Puede fallar por excepción
|
||||
```
|
||||
|
||||
**Impacto:** Memory leak si se lanza excepción entre `new` y `delete`
|
||||
**Prioridad:** ALTA
|
||||
|
||||
### 2.2 Problemas de Performance
|
||||
|
||||
#### **🔴 CRÍTICO: Complejidad O(n³)**
|
||||
**Archivo:** `/bayesnet/utils/BayesMetrics.cc` (líneas 41-53)
|
||||
```cpp
|
||||
for (int i = 0; i < n - 1; ++i) {
|
||||
if (std::find(featuresExcluded.begin(), featuresExcluded.end(), i) != featuresExcluded.end()) {
|
||||
continue; // ❌ O(n) en bucle anidado
|
||||
}
|
||||
for (int j = i + 1; j < n; ++j) {
|
||||
if (std::find(featuresExcluded.begin(), featuresExcluded.end(), j) != featuresExcluded.end()) {
|
||||
continue; // ❌ O(n) en bucle anidado
|
||||
}
|
||||
// Más operaciones costosas...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Impacto:** Con 100 features = 1,250,000 operaciones de búsqueda
|
||||
**Prioridad:** ALTA
|
||||
|
||||
#### **🔴 CRÍTICO: Threading Ineficiente**
|
||||
**Archivo:** `/bayesnet/network/Network.cc` (líneas 269-273)
|
||||
```cpp
|
||||
for (int i = 0; i < samples.size(1); ++i) {
|
||||
threads.emplace_back(worker, sample, i); // ❌ Thread per sample
|
||||
}
|
||||
```
|
||||
|
||||
**Impacto:** Con 10,000 muestras = 10,000 threads (context switching excesivo)
|
||||
**Prioridad:** ALTA
|
||||
|
||||
### 2.3 Problemas de Calidad de Código
|
||||
|
||||
#### **🟡 MODERADO: Funciones Excesivamente Largas**
|
||||
- `XSP2DE.cc`: 575 líneas (violación de SRP)
|
||||
- `Boost::setHyperparameters()`: 150+ líneas
|
||||
- `L1FS::fitLasso()`: 200+ líneas de complejidad algoritmica alta
|
||||
|
||||
#### **🟡 MODERADO: Validación Insuficiente**
|
||||
```cpp
|
||||
// En múltiples archivos: falta validación de entrada
|
||||
if (features.empty()) {
|
||||
// Sin manejo de caso edge
|
||||
}
|
||||
```
|
||||
|
||||
### 2.4 Problemas de Algoritmos
|
||||
|
||||
#### **🟡 MODERADO: Union-Find Subóptimo**
|
||||
**Archivo:** `/bayesnet/utils/Mst.cc`
|
||||
```cpp
|
||||
// ❌ Sin compresión de caminos ni unión por rango
|
||||
int find_set(int i) {
|
||||
if (i != parent[i])
|
||||
i = find_set(parent[i]); // Ineficiente O(n)
|
||||
return i;
|
||||
}
|
||||
```
|
||||
|
||||
**Impacto:** Algoritmo MST subóptimo O(V²) en lugar de O(E log V)
|
||||
|
||||
## 3. Plan de Mejoras Priorizadas
|
||||
|
||||
### 3.1 Fase 1: Problemas Críticos (Semanas 1-2)
|
||||
|
||||
#### **Tarea 1.1: Eliminar Memory Leak en Boost.cc**
|
||||
```cpp
|
||||
// ANTES (línea 51 en Boost.h):
|
||||
FeatureSelect* featureSelector = nullptr;
|
||||
|
||||
// DESPUÉS:
|
||||
std::unique_ptr<FeatureSelect> featureSelector;
|
||||
|
||||
// ANTES (líneas 124-141 en Boost.cc):
|
||||
if (select_features_algorithm == SelectFeatures.CFS) {
|
||||
featureSelector = new CFS(...);
|
||||
}
|
||||
// ...
|
||||
delete featureSelector;
|
||||
|
||||
// DESPUÉS:
|
||||
if (select_features_algorithm == SelectFeatures.CFS) {
|
||||
featureSelector = std::make_unique<CFS>(...);
|
||||
}
|
||||
// ... automática limpieza del smart pointer
|
||||
```
|
||||
|
||||
**Estimación:** 2 horas
|
||||
**Prioridad:** CRÍTICA
|
||||
|
||||
#### **Tarea 1.2: Optimizar BayesMetrics::SelectKPairs()**
|
||||
```cpp
|
||||
// SOLUCIÓN PROPUESTA:
|
||||
std::vector<std::pair<int, int>> Metrics::SelectKPairs(
|
||||
const torch::Tensor& weights,
|
||||
std::vector<int>& featuresExcluded,
|
||||
bool ascending, unsigned k) {
|
||||
|
||||
// ✅ O(1) lookups en lugar de O(n)
|
||||
std::unordered_set<int> excludedSet(featuresExcluded.begin(), featuresExcluded.end());
|
||||
|
||||
auto n = features.size();
|
||||
scoresKPairs.clear();
|
||||
scoresKPairs.reserve((n * (n-1)) / 2); // ✅ Reserve memoria
|
||||
|
||||
for (int i = 0; i < n - 1; ++i) {
|
||||
if (excludedSet.count(i)) continue; // ✅ O(1)
|
||||
for (int j = i + 1; j < n; ++j) {
|
||||
if (excludedSet.count(j)) continue; // ✅ O(1)
|
||||
// resto del procesamiento...
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ nth_element en lugar de sort completo
|
||||
if (k > 0 && k < scoresKPairs.size()) {
|
||||
std::nth_element(scoresKPairs.begin(),
|
||||
scoresKPairs.begin() + k,
|
||||
scoresKPairs.end());
|
||||
scoresKPairs.resize(k);
|
||||
}
|
||||
return pairsKBest;
|
||||
}
|
||||
```
|
||||
|
||||
**Beneficio:** 50x mejora de performance (de O(n³) a O(n² log k))
|
||||
**Estimación:** 4 horas
|
||||
**Prioridad:** CRÍTICA
|
||||
|
||||
#### **Tarea 1.3: Implementar Thread Pool**
|
||||
```cpp
|
||||
// SOLUCIÓN PROPUESTA para Network.cc:
|
||||
void Network::predict_tensor_optimized(const torch::Tensor& samples, const bool proba) {
|
||||
const int num_threads = std::min(
|
||||
static_cast<int>(std::thread::hardware_concurrency()),
|
||||
static_cast<int>(samples.size(1))
|
||||
);
|
||||
const int batch_size = (samples.size(1) + num_threads - 1) / num_threads;
|
||||
|
||||
std::vector<std::thread> threads;
|
||||
threads.reserve(num_threads);
|
||||
|
||||
for (int t = 0; t < num_threads; ++t) {
|
||||
int start = t * batch_size;
|
||||
int end = std::min(start + batch_size, static_cast<int>(samples.size(1)));
|
||||
|
||||
threads.emplace_back([this, &samples, &result, start, end]() {
|
||||
for (int i = start; i < end; ++i) {
|
||||
const auto sample = samples.index({ "...", i });
|
||||
auto prediction = predict_sample(sample);
|
||||
// Thread-safe escritura
|
||||
std::lock_guard<std::mutex> lock(result_mutex);
|
||||
result.index_put_({ i, "..." }, torch::tensor(prediction));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Beneficio:** 4-8x mejora en predicción con múltiples cores
|
||||
**Estimación:** 6 horas
|
||||
**Prioridad:** CRÍTICA
|
||||
|
||||
### 3.2 Fase 2: Optimizaciones Importantes (Semanas 3-4)
|
||||
|
||||
#### **Tarea 2.1: Refactoring de Funciones Largas**
|
||||
|
||||
**XSP2DE.cc** - Dividir en funciones más pequeñas:
|
||||
```cpp
|
||||
// ANTES: Una función de 575 líneas
|
||||
void XSP2DE::buildModel(const torch::Tensor& weights) {
|
||||
// ... 575 líneas de código
|
||||
}
|
||||
|
||||
// DESPUÉS: Funciones especializadas
|
||||
class XSP2DE {
|
||||
private:
|
||||
void initializeHyperparameters();
|
||||
void selectFeatures(const torch::Tensor& weights);
|
||||
void buildSubModels();
|
||||
void trainIndividualModels(const torch::Tensor& weights);
|
||||
|
||||
public:
|
||||
void buildModel(const torch::Tensor& weights) override {
|
||||
initializeHyperparameters();
|
||||
selectFeatures(weights);
|
||||
buildSubModels();
|
||||
trainIndividualModels(weights);
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
**Estimación:** 8 horas
|
||||
**Beneficio:** Mejora mantenibilidad y testing
|
||||
|
||||
#### **Tarea 2.2: Optimizar Union-Find en MST**
|
||||
```cpp
|
||||
// SOLUCIÓN PROPUESTA para Mst.cc:
|
||||
class UnionFind {
|
||||
private:
|
||||
std::vector<int> parent, rank;
|
||||
|
||||
public:
|
||||
UnionFind(int n) : parent(n), rank(n, 0) {
|
||||
std::iota(parent.begin(), parent.end(), 0);
|
||||
}
|
||||
|
||||
int find_set(int i) {
|
||||
if (i != parent[i])
|
||||
parent[i] = find_set(parent[i]); // ✅ Path compression
|
||||
return parent[i];
|
||||
}
|
||||
|
||||
bool union_set(int u, int v) {
|
||||
u = find_set(u);
|
||||
v = find_set(v);
|
||||
if (u == v) return false;
|
||||
|
||||
// ✅ Union by rank
|
||||
if (rank[u] < rank[v]) std::swap(u, v);
|
||||
parent[v] = u;
|
||||
if (rank[u] == rank[v]) rank[u]++;
|
||||
return true;
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
**Beneficio:** Mejora de O(V²) a O(E log V)
|
||||
**Estimación:** 4 horas
|
||||
|
||||
#### **Tarea 2.3: Eliminar Copias Innecesarias de Tensores**
|
||||
```cpp
|
||||
// ANTES (múltiples archivos):
|
||||
X = X.to(torch::kFloat32); // ❌ Copia completa
|
||||
y = y.to(torch::kFloat32); // ❌ Copia completa
|
||||
|
||||
// DESPUÉS:
|
||||
torch::Tensor X = samples.index({Slice(0, n_features), Slice()})
|
||||
.t()
|
||||
.to(torch::kFloat32); // ✅ Una sola conversión
|
||||
|
||||
torch::Tensor y = samples.index({-1, Slice()})
|
||||
.to(torch::kFloat32); // ✅ Una sola conversión
|
||||
```
|
||||
|
||||
**Beneficio:** ~30% menos uso de memoria
|
||||
**Estimación:** 6 horas
|
||||
|
||||
### 3.3 Fase 3: Mejoras de Robustez (Semanas 5-6)
|
||||
|
||||
#### **Tarea 3.1: Implementar Validación Comprehensiva**
|
||||
```cpp
|
||||
// TEMPLATE PARA VALIDACIÓN:
|
||||
template<typename T>
|
||||
void validateInput(const std::vector<T>& data, const std::string& name) {
|
||||
if (data.empty()) {
|
||||
throw std::invalid_argument(name + " cannot be empty");
|
||||
}
|
||||
}
|
||||
|
||||
void validateTensorDimensions(const torch::Tensor& tensor,
|
||||
const std::vector<int64_t>& expected_dims) {
|
||||
if (tensor.sizes() != expected_dims) {
|
||||
throw std::invalid_argument("Tensor dimensions mismatch");
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### **Tarea 3.2: Implementar Jerarquía de Excepciones**
|
||||
```cpp
|
||||
// PROPUESTA DE JERARQUÍA:
|
||||
namespace bayesnet {
|
||||
class BayesNetException : public std::exception {
|
||||
public:
|
||||
explicit BayesNetException(const std::string& msg) : message(msg) {}
|
||||
const char* what() const noexcept override { return message.c_str(); }
|
||||
private:
|
||||
std::string message;
|
||||
};
|
||||
|
||||
class InvalidInputException : public BayesNetException {
|
||||
public:
|
||||
explicit InvalidInputException(const std::string& msg)
|
||||
: BayesNetException("Invalid input: " + msg) {}
|
||||
};
|
||||
|
||||
class ModelNotFittedException : public BayesNetException {
|
||||
public:
|
||||
ModelNotFittedException()
|
||||
: BayesNetException("Model has not been fitted") {}
|
||||
};
|
||||
|
||||
class DimensionMismatchException : public BayesNetException {
|
||||
public:
|
||||
explicit DimensionMismatchException(const std::string& msg)
|
||||
: BayesNetException("Dimension mismatch: " + msg) {}
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
#### **Tarea 3.3: Mejorar Cobertura de Tests**
|
||||
```cpp
|
||||
// TESTS ADICIONALES NECESARIOS:
|
||||
TEST_CASE("Edge Cases", "[FeatureSelection]") {
|
||||
SECTION("Empty dataset") {
|
||||
torch::Tensor empty_dataset = torch::empty({0, 0});
|
||||
std::vector<std::string> empty_features;
|
||||
|
||||
REQUIRE_THROWS_AS(
|
||||
CFS(empty_dataset, empty_features, "class", 0, 2, torch::ones({1})),
|
||||
InvalidInputException
|
||||
);
|
||||
}
|
||||
|
||||
SECTION("Single feature") {
|
||||
// Test comportamiento con un solo feature
|
||||
}
|
||||
|
||||
SECTION("All features excluded") {
|
||||
// Test cuando todas las features están excluidas
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3.4 Fase 4: Mejoras de Performance Avanzadas (Semanas 7-8)
|
||||
|
||||
#### **Tarea 4.1: Paralelización con OpenMP**
|
||||
```cpp
|
||||
// EXAMPLE PARA BUCLES CRÍTICOS:
|
||||
#include <omp.h>
|
||||
|
||||
void computeIntensiveOperation(const torch::Tensor& data) {
|
||||
const int n = data.size(0);
|
||||
std::vector<double> results(n);
|
||||
|
||||
#pragma omp parallel for
|
||||
for (int i = 0; i < n; ++i) {
|
||||
results[i] = expensiveComputation(data[i]);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### **Tarea 4.2: Memory Pool para Operaciones Frecuentes**
|
||||
```cpp
|
||||
// PROPUESTA DE MEMORY POOL:
|
||||
class TensorPool {
|
||||
private:
|
||||
std::stack<torch::Tensor> available_tensors;
|
||||
std::mutex pool_mutex;
|
||||
|
||||
public:
|
||||
torch::Tensor acquire(const std::vector<int64_t>& shape) {
|
||||
std::lock_guard<std::mutex> lock(pool_mutex);
|
||||
if (!available_tensors.empty()) {
|
||||
auto tensor = available_tensors.top();
|
||||
available_tensors.pop();
|
||||
return tensor.resize_(shape);
|
||||
}
|
||||
return torch::zeros(shape);
|
||||
}
|
||||
|
||||
void release(torch::Tensor tensor) {
|
||||
std::lock_guard<std::mutex> lock(pool_mutex);
|
||||
available_tensors.push(tensor);
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
## 4. Estimaciones y Timeline
|
||||
|
||||
### 4.1 Resumen de Esfuerzo
|
||||
| Fase | Tareas | Estimación | Beneficio |
|
||||
|------|--------|------------|-----------|
|
||||
| Fase 1 | Problemas Críticos | 12 horas | 10-50x mejora performance |
|
||||
| Fase 2 | Optimizaciones | 18 horas | Mantenibilidad + 30% menos memoria |
|
||||
| Fase 3 | Robustez | 16 horas | Estabilidad y debugging |
|
||||
| Fase 4 | Performance Avanzada | 12 horas | Escalabilidad |
|
||||
| **Total** | | **58 horas** | **Transformación significativa** |
|
||||
|
||||
### 4.2 Timeline Sugerido
|
||||
```
|
||||
Semana 1: [CRÍTICO] Memory leak + BayesMetrics
|
||||
Semana 2: [CRÍTICO] Thread pool + validación básica
|
||||
Semana 3: [IMPORTANTE] Refactoring XSP2DE + MST
|
||||
Semana 4: [IMPORTANTE] Optimización tensores + duplicación
|
||||
Semana 5: [ROBUSTEZ] Validación + excepciones
|
||||
Semana 6: [ROBUSTEZ] Tests adicionales + edge cases
|
||||
Semana 7: [AVANZADO] Paralelización OpenMP
|
||||
Semana 8: [AVANZADO] Memory pool + optimizaciones finales
|
||||
```
|
||||
|
||||
## 5. Impacto Esperado
|
||||
|
||||
### 5.1 Performance
|
||||
- **50x más rápido** en operaciones de feature selection
|
||||
- **4-8x más rápido** en predicción con datasets grandes
|
||||
- **30% menos uso de memoria** eliminando copias innecesarias
|
||||
- **Escalabilidad mejorada** con paralelización
|
||||
|
||||
### 5.2 Mantenibilidad
|
||||
- **Funciones más pequeñas** y especializadas
|
||||
- **Mejor separación de responsabilidades**
|
||||
- **Testing más comprehensivo**
|
||||
- **Debugging más fácil** con excepciones específicas
|
||||
|
||||
### 5.3 Robustez
|
||||
- **Eliminación de memory leaks**
|
||||
- **Validación comprehensiva de entrada**
|
||||
- **Manejo robusto de casos edge**
|
||||
- **Mejor reportes de error**
|
||||
|
||||
## 6. Recomendaciones Adicionales
|
||||
|
||||
### 6.1 Herramientas de Desarrollo
|
||||
- **Análisis estático:** Implementar clang-static-analyzer y cppcheck
|
||||
- **Sanitizers:** Usar AddressSanitizer y ThreadSanitizer en CI
|
||||
- **Profiling:** Integrar valgrind y perf para análisis de performance
|
||||
- **Benchmarking:** Implementar Google Benchmark para tests de regression
|
||||
|
||||
### 6.2 Proceso de Desarrollo
|
||||
- **Code reviews obligatorios** para cambios críticos
|
||||
- **CI/CD con tests automáticos** en múltiples plataformas
|
||||
- **Métricas de calidad** integradas (cobertura, complejidad ciclomática)
|
||||
- **Documentación de algoritmos** con complejidad y referencias
|
||||
|
||||
### 6.3 Monitoreo de Performance
|
||||
```cpp
|
||||
// PROPUESTA DE PROFILING INTEGRADO:
|
||||
class PerformanceProfiler {
|
||||
private:
|
||||
std::unordered_map<std::string, std::chrono::duration<double>> timings;
|
||||
|
||||
public:
|
||||
class ScopedTimer {
|
||||
// RAII timer para medir automáticamente
|
||||
};
|
||||
|
||||
void startProfiling(const std::string& operation);
|
||||
void endProfiling(const std::string& operation);
|
||||
void generateReport();
|
||||
};
|
||||
```
|
||||
|
||||
## 7. Conclusiones
|
||||
|
||||
BayesNet es una biblioteca sólida con una arquitectura bien diseñada y uso apropiado de técnicas modernas de C++. Sin embargo, existen oportunidades significativas de mejora que pueden transformar dramáticamente su performance y mantenibilidad.
|
||||
|
||||
### Prioridades Inmediatas:
|
||||
1. **Eliminar memory leak crítico** en Boost.cc
|
||||
2. **Optimizar algoritmo O(n³)** en BayesMetrics.cc
|
||||
3. **Implementar thread pool eficiente** en Network.cc
|
||||
|
||||
### Beneficios del Plan de Mejoras:
|
||||
- **Performance:** 10-50x mejora en operaciones críticas
|
||||
- **Memoria:** 30% reducción en uso de memoria
|
||||
- **Mantenibilidad:** Código más modular y testing comprehensivo
|
||||
- **Robustez:** Eliminación de crashes y mejor handling de errores
|
||||
|
||||
La implementación de estas mejoras convertirá BayesNet en una biblioteca de clase industrial, ready para production en entornos de alto rendimiento y misión crítica.
|
||||
|
||||
---
|
||||
|
||||
**Próximos Pasos Recomendados:**
|
||||
1. Revisar y aprobar este plan de mejoras
|
||||
2. Establecer prioridades basadas en necesidades del proyecto
|
||||
3. Implementar mejoras en el orden sugerido
|
||||
4. Establecer métricas de success para cada fase
|
||||
5. Configurar CI/CD para validar mejoras automáticamente
|
@@ -14,13 +14,13 @@ namespace bayesnet {
|
||||
enum status_t { NORMAL, WARNING, ERROR };
|
||||
class BaseClassifier {
|
||||
public:
|
||||
virtual ~BaseClassifier() = default;
|
||||
// X is nxm std::vector, y is nx1 std::vector
|
||||
virtual BaseClassifier& fit(std::vector<std::vector<int>>& X, std::vector<int>& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) = 0;
|
||||
// X is nxm tensor, y is nx1 tensor
|
||||
virtual BaseClassifier& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) = 0;
|
||||
virtual BaseClassifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) = 0;
|
||||
virtual BaseClassifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing) = 0;
|
||||
virtual ~BaseClassifier() = default;
|
||||
torch::Tensor virtual predict(torch::Tensor& X) = 0;
|
||||
std::vector<int> virtual predict(std::vector<std::vector<int >>& X) = 0;
|
||||
torch::Tensor virtual predict_proba(torch::Tensor& X) = 0;
|
||||
@@ -28,8 +28,8 @@ namespace bayesnet {
|
||||
status_t virtual getStatus() const = 0;
|
||||
float virtual score(std::vector<std::vector<int>>& X, std::vector<int>& y) = 0;
|
||||
float virtual score(torch::Tensor& X, torch::Tensor& y) = 0;
|
||||
int virtual getNumberOfNodes()const = 0;
|
||||
int virtual getNumberOfEdges()const = 0;
|
||||
int virtual getNumberOfNodes() const = 0;
|
||||
int virtual getNumberOfEdges() const = 0;
|
||||
int virtual getNumberOfStates() const = 0;
|
||||
int virtual getClassNumStates() const = 0;
|
||||
std::vector<std::string> virtual show() const = 0;
|
||||
@@ -37,11 +37,13 @@ namespace bayesnet {
|
||||
virtual std::string getVersion() = 0;
|
||||
std::vector<std::string> virtual topological_order() = 0;
|
||||
std::vector<std::string> virtual getNotes() const = 0;
|
||||
std::string virtual dump_cpt()const = 0;
|
||||
std::string virtual dump_cpt() const = 0;
|
||||
virtual void setHyperparameters(const nlohmann::json& hyperparameters) = 0;
|
||||
std::vector<std::string>& getValidHyperparameters() { return validHyperparameters; }
|
||||
protected:
|
||||
virtual void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) = 0;
|
||||
std::vector<std::string> validHyperparameters;
|
||||
std::vector<std::string> notes; // Used to store messages occurred during the fit process
|
||||
status_t status = NORMAL;
|
||||
};
|
||||
}
|
@@ -1,12 +0,0 @@
|
||||
include_directories(
|
||||
${BayesNet_SOURCE_DIR}/lib/mdlp/src
|
||||
${BayesNet_SOURCE_DIR}/lib/folding
|
||||
${BayesNet_SOURCE_DIR}/lib/json/include
|
||||
${BayesNet_SOURCE_DIR}
|
||||
${CMAKE_BINARY_DIR}/configured_files/include
|
||||
)
|
||||
|
||||
file(GLOB_RECURSE Sources "*.cc")
|
||||
|
||||
add_library(BayesNet ${Sources})
|
||||
target_link_libraries(BayesNet mdlp "${TORCH_LIBRARIES}")
|
@@ -10,7 +10,6 @@
|
||||
|
||||
namespace bayesnet {
|
||||
Classifier::Classifier(Network model) : model(model), m(0), n(0), metrics(Metrics()), fitted(false) {}
|
||||
const std::string CLASSIFIER_NOT_FITTED = "Classifier has not been fitted";
|
||||
Classifier& Classifier::build(const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing)
|
||||
{
|
||||
this->features = features;
|
||||
|
@@ -46,12 +46,11 @@ namespace bayesnet {
|
||||
std::string className;
|
||||
std::map<std::string, std::vector<int>> states;
|
||||
torch::Tensor dataset; // (n+1)xm tensor
|
||||
status_t status = NORMAL;
|
||||
std::vector<std::string> notes; // Used to store messages occurred during the fit process
|
||||
void checkFitParameters();
|
||||
virtual void buildModel(const torch::Tensor& weights) = 0;
|
||||
void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
|
||||
void buildDataset(torch::Tensor& y);
|
||||
const std::string CLASSIFIER_NOT_FITTED = "Classifier has not been fitted";
|
||||
private:
|
||||
Classifier& build(const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing);
|
||||
};
|
||||
|
@@ -3,7 +3,7 @@
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include "bayesnet/utils/bayesnetUtils.h"
|
||||
#include "KDB.h"
|
||||
|
||||
namespace bayesnet {
|
||||
|
@@ -7,21 +7,19 @@
|
||||
#ifndef KDB_H
|
||||
#define KDB_H
|
||||
#include <torch/torch.h>
|
||||
#include "bayesnet/utils/bayesnetUtils.h"
|
||||
#include "Classifier.h"
|
||||
namespace bayesnet {
|
||||
class KDB : public Classifier {
|
||||
private:
|
||||
int k;
|
||||
float theta;
|
||||
void add_m_edges(int idx, std::vector<int>& S, torch::Tensor& weights);
|
||||
protected:
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
public:
|
||||
explicit KDB(int k, float theta = 0.03);
|
||||
virtual ~KDB() = default;
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters_) override;
|
||||
std::vector<std::string> graph(const std::string& name = "KDB") const override;
|
||||
protected:
|
||||
int k;
|
||||
float theta;
|
||||
void add_m_edges(int idx, std::vector<int>& S, torch::Tensor& weights);
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
};
|
||||
}
|
||||
#endif
|
@@ -7,7 +7,25 @@
|
||||
#include "KDBLd.h"
|
||||
|
||||
namespace bayesnet {
|
||||
KDBLd::KDBLd(int k) : KDB(k), Proposal(dataset, features, className) {}
|
||||
KDBLd::KDBLd(int k) : KDB(k), Proposal(dataset, features, className)
|
||||
{
|
||||
validHyperparameters = validHyperparameters_ld;
|
||||
validHyperparameters.push_back("k");
|
||||
validHyperparameters.push_back("theta");
|
||||
}
|
||||
void KDBLd::setHyperparameters(const nlohmann::json& hyperparameters_)
|
||||
{
|
||||
auto hyperparameters = hyperparameters_;
|
||||
if (hyperparameters.contains("k")) {
|
||||
k = hyperparameters["k"];
|
||||
hyperparameters.erase("k");
|
||||
}
|
||||
if (hyperparameters.contains("theta")) {
|
||||
theta = hyperparameters["theta"];
|
||||
hyperparameters.erase("theta");
|
||||
}
|
||||
Proposal::setHyperparameters(hyperparameters);
|
||||
}
|
||||
KDBLd& KDBLd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing)
|
||||
{
|
||||
checkInput(X_, y_);
|
||||
@@ -28,6 +46,11 @@ namespace bayesnet {
|
||||
auto Xt = prepareX(X);
|
||||
return KDB::predict(Xt);
|
||||
}
|
||||
torch::Tensor KDBLd::predict_proba(torch::Tensor& X)
|
||||
{
|
||||
auto Xt = prepareX(X);
|
||||
return KDB::predict_proba(Xt);
|
||||
}
|
||||
std::vector<std::string> KDBLd::graph(const std::string& name) const
|
||||
{
|
||||
return KDB::graph(name);
|
||||
|
@@ -11,13 +11,14 @@
|
||||
|
||||
namespace bayesnet {
|
||||
class KDBLd : public KDB, public Proposal {
|
||||
private:
|
||||
public:
|
||||
explicit KDBLd(int k);
|
||||
virtual ~KDBLd() = default;
|
||||
KDBLd& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
|
||||
std::vector<std::string> graph(const std::string& name = "KDB") const override;
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters_) override;
|
||||
torch::Tensor predict(torch::Tensor& X) override;
|
||||
torch::Tensor predict_proba(torch::Tensor& X) override;
|
||||
static inline std::string version() { return "0.0.1"; };
|
||||
};
|
||||
}
|
||||
|
@@ -7,13 +7,42 @@
|
||||
#include "Proposal.h"
|
||||
|
||||
namespace bayesnet {
|
||||
Proposal::Proposal(torch::Tensor& dataset_, std::vector<std::string>& features_, std::string& className_) : pDataset(dataset_), pFeatures(features_), pClassName(className_) {}
|
||||
Proposal::~Proposal()
|
||||
Proposal::Proposal(torch::Tensor& dataset_, std::vector<std::string>& features_, std::string& className_) : pDataset(dataset_), pFeatures(features_), pClassName(className_)
|
||||
{
|
||||
for (auto& [key, value] : discretizers) {
|
||||
delete value;
|
||||
}
|
||||
void Proposal::setHyperparameters(const nlohmann::json& hyperparameters_)
|
||||
{
|
||||
auto hyperparameters = hyperparameters_;
|
||||
if (hyperparameters.contains("ld_proposed_cuts")) {
|
||||
ld_params.proposed_cuts = hyperparameters["ld_proposed_cuts"];
|
||||
hyperparameters.erase("ld_proposed_cuts");
|
||||
}
|
||||
if (hyperparameters.contains("mdlp_max_depth")) {
|
||||
ld_params.max_depth = hyperparameters["mdlp_max_depth"];
|
||||
hyperparameters.erase("mdlp_max_depth");
|
||||
}
|
||||
if (hyperparameters.contains("mdlp_min_length")) {
|
||||
ld_params.min_length = hyperparameters["mdlp_min_length"];
|
||||
hyperparameters.erase("mdlp_min_length");
|
||||
}
|
||||
if (hyperparameters.contains("ld_algorithm")) {
|
||||
auto algorithm = hyperparameters["ld_algorithm"];
|
||||
hyperparameters.erase("ld_algorithm");
|
||||
if (algorithm == "MDLP") {
|
||||
discretizationType = discretization_t::MDLP;
|
||||
} else if (algorithm == "BINQ") {
|
||||
discretizationType = discretization_t::BINQ;
|
||||
} else if (algorithm == "BINU") {
|
||||
discretizationType = discretization_t::BINU;
|
||||
} else {
|
||||
throw std::invalid_argument("Invalid discretization algorithm: " + algorithm.get<std::string>());
|
||||
}
|
||||
}
|
||||
if (!hyperparameters.empty()) {
|
||||
throw std::invalid_argument("Invalid hyperparameters for Proposal: " + hyperparameters.dump());
|
||||
}
|
||||
}
|
||||
|
||||
void Proposal::checkInput(const torch::Tensor& X, const torch::Tensor& y)
|
||||
{
|
||||
if (!torch::is_floating_point(X)) {
|
||||
@@ -23,6 +52,7 @@ namespace bayesnet {
|
||||
throw std::invalid_argument("y must be an integer tensor");
|
||||
}
|
||||
}
|
||||
// Fit method for single classifier
|
||||
map<std::string, std::vector<int>> Proposal::localDiscretizationProposal(const map<std::string, std::vector<int>>& oldStates, Network& model)
|
||||
{
|
||||
// order of local discretization is important. no good 0, 1, 2...
|
||||
@@ -83,8 +113,15 @@ namespace bayesnet {
|
||||
pDataset = torch::zeros({ n + 1, m }, torch::kInt32);
|
||||
auto yv = std::vector<int>(y.data_ptr<int>(), y.data_ptr<int>() + y.size(0));
|
||||
// discretize input data by feature(row)
|
||||
std::unique_ptr<mdlp::Discretizer> discretizer;
|
||||
for (auto i = 0; i < pFeatures.size(); ++i) {
|
||||
auto* discretizer = new mdlp::CPPFImdlp();
|
||||
if (discretizationType == discretization_t::BINQ) {
|
||||
discretizer = std::make_unique<mdlp::BinDisc>(ld_params.proposed_cuts, mdlp::strategy_t::QUANTILE);
|
||||
} else if (discretizationType == discretization_t::BINU) {
|
||||
discretizer = std::make_unique<mdlp::BinDisc>(ld_params.proposed_cuts, mdlp::strategy_t::UNIFORM);
|
||||
} else { // Default is MDLP
|
||||
discretizer = std::make_unique<mdlp::CPPFImdlp>(ld_params.min_length, ld_params.max_depth, ld_params.proposed_cuts);
|
||||
}
|
||||
auto Xt_ptr = Xf.index({ i }).data_ptr<float>();
|
||||
auto Xt = std::vector<float>(Xt_ptr, Xt_ptr + Xf.size(1));
|
||||
discretizer->fit(Xt, yv);
|
||||
@@ -92,7 +129,7 @@ namespace bayesnet {
|
||||
auto xStates = std::vector<int>(discretizer->getCutPoints().size() + 1);
|
||||
iota(xStates.begin(), xStates.end(), 0);
|
||||
states[pFeatures[i]] = xStates;
|
||||
discretizers[pFeatures[i]] = discretizer;
|
||||
discretizers[pFeatures[i]] = std::move(discretizer);
|
||||
}
|
||||
int n_classes = torch::max(y).item<int>() + 1;
|
||||
auto yStates = std::vector<int>(n_classes);
|
||||
|
@@ -9,15 +9,17 @@
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <torch/torch.h>
|
||||
#include <CPPFImdlp.h>
|
||||
#include <fimdlp/CPPFImdlp.h>
|
||||
#include <fimdlp/BinDisc.h>
|
||||
#include "bayesnet/network/Network.h"
|
||||
#include <nlohmann/json.hpp>
|
||||
#include "Classifier.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class Proposal {
|
||||
public:
|
||||
Proposal(torch::Tensor& pDataset, std::vector<std::string>& features_, std::string& className_);
|
||||
virtual ~Proposal();
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters_);
|
||||
protected:
|
||||
void checkInput(const torch::Tensor& X, const torch::Tensor& y);
|
||||
torch::Tensor prepareX(torch::Tensor& X);
|
||||
@@ -25,12 +27,24 @@ namespace bayesnet {
|
||||
map<std::string, std::vector<int>> fit_local_discretization(const torch::Tensor& y);
|
||||
torch::Tensor Xf; // X continuous nxm tensor
|
||||
torch::Tensor y; // y discrete nx1 tensor
|
||||
map<std::string, mdlp::CPPFImdlp*> discretizers;
|
||||
map<std::string, std::unique_ptr<mdlp::Discretizer>> discretizers;
|
||||
// MDLP parameters
|
||||
struct {
|
||||
size_t min_length = 3; // Minimum length of the interval to consider it in mdlp
|
||||
float proposed_cuts = 0.0; // Proposed cuts for the Discretization algorithm
|
||||
int max_depth = std::numeric_limits<int>::max(); // Maximum depth of the MDLP tree
|
||||
} ld_params;
|
||||
nlohmann::json validHyperparameters_ld = { "ld_algorithm", "ld_proposed_cuts", "mdlp_min_length", "mdlp_max_depth" };
|
||||
private:
|
||||
std::vector<int> factorize(const std::vector<std::string>& labels_t);
|
||||
torch::Tensor& pDataset; // (n+1)xm tensor
|
||||
std::vector<std::string>& pFeatures;
|
||||
std::string& pClassName;
|
||||
enum class discretization_t {
|
||||
MDLP,
|
||||
BINQ,
|
||||
BINU
|
||||
} discretizationType = discretization_t::MDLP; // Default discretization type
|
||||
};
|
||||
}
|
||||
|
||||
|
@@ -8,14 +8,29 @@
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
SPODE::SPODE(int root) : Classifier(Network()), root(root) {}
|
||||
SPODE::SPODE(int root) : Classifier(Network()), root(root)
|
||||
{
|
||||
validHyperparameters = { "parent" };
|
||||
}
|
||||
|
||||
void SPODE::setHyperparameters(const nlohmann::json& hyperparameters_)
|
||||
{
|
||||
auto hyperparameters = hyperparameters_;
|
||||
if (hyperparameters.contains("parent")) {
|
||||
root = hyperparameters["parent"];
|
||||
hyperparameters.erase("parent");
|
||||
}
|
||||
Classifier::setHyperparameters(hyperparameters);
|
||||
}
|
||||
void SPODE::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
// 0. Add all nodes to the model
|
||||
addNodes();
|
||||
// 1. Add edges from the class node to all other nodes
|
||||
// 2. Add edges from the root node to all other nodes
|
||||
if (root >= static_cast<int>(features.size())) {
|
||||
throw std::invalid_argument("The parent node is not in the dataset");
|
||||
}
|
||||
for (int i = 0; i < static_cast<int>(features.size()); ++i) {
|
||||
model.addEdge(className, features[i]);
|
||||
if (i != root) {
|
||||
|
@@ -10,14 +10,15 @@
|
||||
|
||||
namespace bayesnet {
|
||||
class SPODE : public Classifier {
|
||||
private:
|
||||
int root;
|
||||
protected:
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
public:
|
||||
explicit SPODE(int root);
|
||||
virtual ~SPODE() = default;
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters_) override;
|
||||
std::vector<std::string> graph(const std::string& name = "SPODE") const override;
|
||||
protected:
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
private:
|
||||
int root;
|
||||
};
|
||||
}
|
||||
#endif
|
@@ -7,7 +7,11 @@
|
||||
#include "SPODELd.h"
|
||||
|
||||
namespace bayesnet {
|
||||
SPODELd::SPODELd(int root) : SPODE(root), Proposal(dataset, features, className) {}
|
||||
SPODELd::SPODELd(int root) : SPODE(root), Proposal(dataset, features, className)
|
||||
{
|
||||
validHyperparameters = validHyperparameters_ld; // Inherits the valid hyperparameters from Proposal
|
||||
}
|
||||
|
||||
SPODELd& SPODELd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing)
|
||||
{
|
||||
checkInput(X_, y_);
|
||||
@@ -43,6 +47,11 @@ namespace bayesnet {
|
||||
auto Xt = prepareX(X);
|
||||
return SPODE::predict(Xt);
|
||||
}
|
||||
torch::Tensor SPODELd::predict_proba(torch::Tensor& X)
|
||||
{
|
||||
auto Xt = prepareX(X);
|
||||
return SPODE::predict_proba(Xt);
|
||||
}
|
||||
std::vector<std::string> SPODELd::graph(const std::string& name) const
|
||||
{
|
||||
return SPODE::graph(name);
|
||||
|
@@ -19,6 +19,7 @@ namespace bayesnet {
|
||||
SPODELd& commonFit(const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states, const Smoothing_t smoothing);
|
||||
std::vector<std::string> graph(const std::string& name = "SPODELd") const override;
|
||||
torch::Tensor predict(torch::Tensor& X) override;
|
||||
torch::Tensor predict_proba(torch::Tensor& X) override;
|
||||
static inline std::string version() { return "0.0.1"; };
|
||||
};
|
||||
}
|
||||
|
@@ -7,8 +7,20 @@
|
||||
#include "TAN.h"
|
||||
|
||||
namespace bayesnet {
|
||||
TAN::TAN() : Classifier(Network()) {}
|
||||
TAN::TAN() : Classifier(Network())
|
||||
{
|
||||
validHyperparameters = { "parent" };
|
||||
}
|
||||
|
||||
void TAN::setHyperparameters(const nlohmann::json& hyperparameters_)
|
||||
{
|
||||
auto hyperparameters = hyperparameters_;
|
||||
if (hyperparameters.contains("parent")) {
|
||||
parent = hyperparameters["parent"];
|
||||
hyperparameters.erase("parent");
|
||||
}
|
||||
Classifier::setHyperparameters(hyperparameters);
|
||||
}
|
||||
void TAN::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
// 0. Add all nodes to the model
|
||||
@@ -23,7 +35,10 @@ namespace bayesnet {
|
||||
mi.push_back({ i, mi_value });
|
||||
}
|
||||
sort(mi.begin(), mi.end(), [](const auto& left, const auto& right) {return left.second < right.second;});
|
||||
auto root = mi[mi.size() - 1].first;
|
||||
auto root = parent == -1 ? mi[mi.size() - 1].first : parent;
|
||||
if (root >= static_cast<int>(features.size())) {
|
||||
throw std::invalid_argument("The parent node is not in the dataset");
|
||||
}
|
||||
// 2. Compute mutual information between each feature and the class
|
||||
auto weights_matrix = metrics.conditionalEdge(weights);
|
||||
// 3. Compute the maximum spanning tree
|
||||
|
@@ -9,13 +9,15 @@
|
||||
#include "Classifier.h"
|
||||
namespace bayesnet {
|
||||
class TAN : public Classifier {
|
||||
private:
|
||||
protected:
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
public:
|
||||
TAN();
|
||||
virtual ~TAN() = default;
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters_) override;
|
||||
std::vector<std::string> graph(const std::string& name = "TAN") const override;
|
||||
protected:
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
private:
|
||||
int parent = -1;
|
||||
};
|
||||
}
|
||||
#endif
|
@@ -29,6 +29,11 @@ namespace bayesnet {
|
||||
auto Xt = prepareX(X);
|
||||
return TAN::predict(Xt);
|
||||
}
|
||||
torch::Tensor TANLd::predict_proba(torch::Tensor& X)
|
||||
{
|
||||
auto Xt = prepareX(X);
|
||||
return TAN::predict_proba(Xt);
|
||||
}
|
||||
std::vector<std::string> TANLd::graph(const std::string& name) const
|
||||
{
|
||||
return TAN::graph(name);
|
||||
|
@@ -18,6 +18,7 @@ namespace bayesnet {
|
||||
TANLd& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
|
||||
std::vector<std::string> graph(const std::string& name = "TANLd") const override;
|
||||
torch::Tensor predict(torch::Tensor& X) override;
|
||||
torch::Tensor predict_proba(torch::Tensor& X) override;
|
||||
};
|
||||
}
|
||||
#endif // !TANLD_H
|
575
bayesnet/classifiers/XSP2DE.cc
Normal file
575
bayesnet/classifiers/XSP2DE.cc
Normal file
@@ -0,0 +1,575 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include "XSP2DE.h"
|
||||
#include <pthread.h> // for pthread_setname_np on linux
|
||||
#include <cassert>
|
||||
#include <cmath>
|
||||
#include <limits>
|
||||
#include <stdexcept>
|
||||
#include <iostream>
|
||||
#include "bayesnet/utils/TensorUtils.h"
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
// --------------------------------------
|
||||
// Constructor
|
||||
// --------------------------------------
|
||||
XSp2de::XSp2de(int spIndex1, int spIndex2)
|
||||
: superParent1_{ spIndex1 }
|
||||
, superParent2_{ spIndex2 }
|
||||
, nFeatures_{0}
|
||||
, statesClass_{0}
|
||||
, alpha_{1.0}
|
||||
, initializer_{1.0}
|
||||
, semaphore_{ CountingSemaphore::getInstance() }
|
||||
, Classifier(Network())
|
||||
{
|
||||
validHyperparameters = { "parent1", "parent2" };
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// setHyperparameters
|
||||
// --------------------------------------
|
||||
void XSp2de::setHyperparameters(const nlohmann::json &hyperparameters_)
|
||||
{
|
||||
auto hyperparameters = hyperparameters_;
|
||||
if (hyperparameters.contains("parent1")) {
|
||||
superParent1_ = hyperparameters["parent1"];
|
||||
hyperparameters.erase("parent1");
|
||||
}
|
||||
if (hyperparameters.contains("parent2")) {
|
||||
superParent2_ = hyperparameters["parent2"];
|
||||
hyperparameters.erase("parent2");
|
||||
}
|
||||
// Hand off anything else to base Classifier
|
||||
Classifier::setHyperparameters(hyperparameters);
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// fitx
|
||||
// --------------------------------------
|
||||
void XSp2de::fitx(torch::Tensor & X, torch::Tensor & y,
|
||||
torch::Tensor & weights_, const Smoothing_t smoothing)
|
||||
{
|
||||
m = X.size(1); // number of samples
|
||||
n = X.size(0); // number of features
|
||||
dataset = X;
|
||||
|
||||
// Build the dataset in your environment if needed:
|
||||
buildDataset(y);
|
||||
|
||||
// Construct the data structures needed for counting
|
||||
buildModel(weights_);
|
||||
|
||||
// Accumulate counts & convert to probabilities
|
||||
trainModel(weights_, smoothing);
|
||||
fitted = true;
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// buildModel
|
||||
// --------------------------------------
|
||||
void XSp2de::buildModel(const torch::Tensor &weights)
|
||||
{
|
||||
nFeatures_ = n;
|
||||
|
||||
// Derive the number of states for each feature from the dataset
|
||||
// states_[f] = max value in dataset[f] + 1.
|
||||
states_.resize(nFeatures_);
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
// This is naive: we take max in feature f. You might adapt for real data.
|
||||
states_[f] = dataset[f].max().item<int>() + 1;
|
||||
}
|
||||
// Class states:
|
||||
statesClass_ = dataset[-1].max().item<int>() + 1;
|
||||
|
||||
// Initialize the class counts
|
||||
classCounts_.resize(statesClass_, 0.0);
|
||||
|
||||
// For sp1 -> p(sp1Val| c)
|
||||
sp1FeatureCounts_.resize(states_[superParent1_] * statesClass_, 0.0);
|
||||
|
||||
// For sp2 -> p(sp2Val| c)
|
||||
sp2FeatureCounts_.resize(states_[superParent2_] * statesClass_, 0.0);
|
||||
|
||||
// For child features, we store p(childVal | c, sp1Val, sp2Val).
|
||||
// childCounts_ will hold raw counts. We’ll gather them in one big vector.
|
||||
// We need an offset for each feature.
|
||||
childOffsets_.resize(nFeatures_, -1);
|
||||
|
||||
int totalSize = 0;
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
if (f == superParent1_ || f == superParent2_) {
|
||||
// skip the superparents
|
||||
childOffsets_[f] = -1;
|
||||
continue;
|
||||
}
|
||||
childOffsets_[f] = totalSize;
|
||||
// block size for a single child f: states_[f] * statesClass_
|
||||
// * states_[superParent1_]
|
||||
// * states_[superParent2_].
|
||||
totalSize += (states_[f] * statesClass_
|
||||
* states_[superParent1_]
|
||||
* states_[superParent2_]);
|
||||
}
|
||||
childCounts_.resize(totalSize, 0.0);
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// trainModel
|
||||
// --------------------------------------
|
||||
void XSp2de::trainModel(const torch::Tensor &weights,
|
||||
const bayesnet::Smoothing_t smoothing)
|
||||
{
|
||||
// Accumulate raw counts
|
||||
for (int i = 0; i < m; i++) {
|
||||
std::vector<int> instance(nFeatures_ + 1);
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
instance[f] = dataset[f][i].item<int>();
|
||||
}
|
||||
instance[nFeatures_] = dataset[-1][i].item<int>(); // class
|
||||
double w = weights[i].item<double>();
|
||||
addSample(instance, w);
|
||||
}
|
||||
|
||||
// Choose alpha based on smoothing:
|
||||
switch (smoothing) {
|
||||
case bayesnet::Smoothing_t::ORIGINAL:
|
||||
alpha_ = 1.0 / m;
|
||||
break;
|
||||
case bayesnet::Smoothing_t::LAPLACE:
|
||||
alpha_ = 1.0;
|
||||
break;
|
||||
default:
|
||||
alpha_ = 0.0; // no smoothing
|
||||
}
|
||||
|
||||
// Large initializer factor for numerical stability
|
||||
initializer_ = std::numeric_limits<double>::max() / (nFeatures_ * nFeatures_);
|
||||
|
||||
// Convert raw counts to probabilities
|
||||
computeProbabilities();
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// addSample
|
||||
// --------------------------------------
|
||||
void XSp2de::addSample(const std::vector<int> &instance, double weight)
|
||||
{
|
||||
if (weight <= 0.0)
|
||||
return;
|
||||
|
||||
int c = instance.back();
|
||||
// increment classCounts
|
||||
classCounts_[c] += weight;
|
||||
|
||||
int sp1Val = instance[superParent1_];
|
||||
int sp2Val = instance[superParent2_];
|
||||
|
||||
// p(sp1|c)
|
||||
sp1FeatureCounts_[sp1Val * statesClass_ + c] += weight;
|
||||
|
||||
// p(sp2|c)
|
||||
sp2FeatureCounts_[sp2Val * statesClass_ + c] += weight;
|
||||
|
||||
// p(childVal| c, sp1Val, sp2Val)
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
if (f == superParent1_ || f == superParent2_)
|
||||
continue;
|
||||
|
||||
int childVal = instance[f];
|
||||
int offset = childOffsets_[f];
|
||||
// block layout:
|
||||
// offset + (sp1Val*(states_[sp2_]* states_[f]* statesClass_))
|
||||
// + (sp2Val*(states_[f]* statesClass_))
|
||||
// + childVal*(statesClass_)
|
||||
// + c
|
||||
int blockSizeSp2 = states_[superParent2_]
|
||||
* states_[f]
|
||||
* statesClass_;
|
||||
int blockSizeChild = states_[f] * statesClass_;
|
||||
|
||||
int idx = offset
|
||||
+ sp1Val*blockSizeSp2
|
||||
+ sp2Val*blockSizeChild
|
||||
+ childVal*statesClass_
|
||||
+ c;
|
||||
childCounts_[idx] += weight;
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// computeProbabilities
|
||||
// --------------------------------------
|
||||
void XSp2de::computeProbabilities()
|
||||
{
|
||||
double totalCount = std::accumulate(classCounts_.begin(),
|
||||
classCounts_.end(), 0.0);
|
||||
|
||||
// classPriors_
|
||||
classPriors_.resize(statesClass_, 0.0);
|
||||
if (totalCount <= 0.0) {
|
||||
// fallback => uniform
|
||||
double unif = 1.0 / static_cast<double>(statesClass_);
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
classPriors_[c] = unif;
|
||||
}
|
||||
} else {
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
classPriors_[c] =
|
||||
(classCounts_[c] + alpha_)
|
||||
/ (totalCount + alpha_ * statesClass_);
|
||||
}
|
||||
}
|
||||
|
||||
// p(sp1Val| c)
|
||||
sp1FeatureProbs_.resize(sp1FeatureCounts_.size());
|
||||
int sp1Card = states_[superParent1_];
|
||||
for (int spVal = 0; spVal < sp1Card; spVal++) {
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
double denom = classCounts_[c] + alpha_ * sp1Card;
|
||||
double num = sp1FeatureCounts_[spVal * statesClass_ + c] + alpha_;
|
||||
sp1FeatureProbs_[spVal * statesClass_ + c] =
|
||||
(denom <= 0.0 ? 0.0 : num / denom);
|
||||
}
|
||||
}
|
||||
|
||||
// p(sp2Val| c)
|
||||
sp2FeatureProbs_.resize(sp2FeatureCounts_.size());
|
||||
int sp2Card = states_[superParent2_];
|
||||
for (int spVal = 0; spVal < sp2Card; spVal++) {
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
double denom = classCounts_[c] + alpha_ * sp2Card;
|
||||
double num = sp2FeatureCounts_[spVal * statesClass_ + c] + alpha_;
|
||||
sp2FeatureProbs_[spVal * statesClass_ + c] =
|
||||
(denom <= 0.0 ? 0.0 : num / denom);
|
||||
}
|
||||
}
|
||||
|
||||
// p(childVal| c, sp1Val, sp2Val)
|
||||
childProbs_.resize(childCounts_.size());
|
||||
int offset = 0;
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
if (f == superParent1_ || f == superParent2_)
|
||||
continue;
|
||||
|
||||
int fCard = states_[f];
|
||||
int sp1Card_ = states_[superParent1_];
|
||||
int sp2Card_ = states_[superParent2_];
|
||||
int childBlockSizeSp2 = sp2Card_ * fCard * statesClass_;
|
||||
int childBlockSizeF = fCard * statesClass_;
|
||||
|
||||
int blockSize = fCard * sp1Card_ * sp2Card_ * statesClass_;
|
||||
for (int sp1Val = 0; sp1Val < sp1Card_; sp1Val++) {
|
||||
for (int sp2Val = 0; sp2Val < sp2Card_; sp2Val++) {
|
||||
for (int childVal = 0; childVal < fCard; childVal++) {
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
// index in childCounts_
|
||||
int idx = offset
|
||||
+ sp1Val*childBlockSizeSp2
|
||||
+ sp2Val*childBlockSizeF
|
||||
+ childVal*statesClass_
|
||||
+ c;
|
||||
double num = childCounts_[idx] + alpha_;
|
||||
// denominator is the count of (sp1Val,sp2Val,c) plus alpha * fCard
|
||||
// We can find that by summing childVal dimension, but we already
|
||||
// have it in childCounts_[...] or we can re-check the superparent
|
||||
// counts if your approach is purely hierarchical.
|
||||
// Here we'll do it like the XSpode approach: sp1&sp2 are
|
||||
// conditionally independent given c, so denominators come from
|
||||
// summing the relevant block or we treat sp1,sp2 as "parents."
|
||||
// A simpler approach:
|
||||
double sumSp1Sp2C = 0.0;
|
||||
// sum over all childVal:
|
||||
for (int cv = 0; cv < fCard; cv++) {
|
||||
int idx2 = offset
|
||||
+ sp1Val*childBlockSizeSp2
|
||||
+ sp2Val*childBlockSizeF
|
||||
+ cv*statesClass_ + c;
|
||||
sumSp1Sp2C += childCounts_[idx2];
|
||||
}
|
||||
double denom = sumSp1Sp2C + alpha_ * fCard;
|
||||
childProbs_[idx] = (denom <= 0.0 ? 0.0 : num / denom);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
offset += blockSize;
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// predict_proba (single instance)
|
||||
// --------------------------------------
|
||||
std::vector<double> XSp2de::predict_proba(const std::vector<int> &instance) const
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(CLASSIFIER_NOT_FITTED);
|
||||
}
|
||||
std::vector<double> probs(statesClass_, 0.0);
|
||||
|
||||
int sp1Val = instance[superParent1_];
|
||||
int sp2Val = instance[superParent2_];
|
||||
|
||||
// Start with p(c) * p(sp1Val| c) * p(sp2Val| c)
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
double pC = classPriors_[c];
|
||||
double pSp1C = sp1FeatureProbs_[sp1Val * statesClass_ + c];
|
||||
double pSp2C = sp2FeatureProbs_[sp2Val * statesClass_ + c];
|
||||
probs[c] = pC * pSp1C * pSp2C * initializer_;
|
||||
}
|
||||
|
||||
// Multiply by each child feature f
|
||||
int offset = 0;
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
if (f == superParent1_ || f == superParent2_)
|
||||
continue;
|
||||
|
||||
int valF = instance[f];
|
||||
int fCard = states_[f];
|
||||
int sp1Card = states_[superParent1_];
|
||||
int sp2Card = states_[superParent2_];
|
||||
int blockSizeSp2 = sp2Card * fCard * statesClass_;
|
||||
int blockSizeF = fCard * statesClass_;
|
||||
|
||||
// base index for childProbs_ for this child and sp1Val, sp2Val
|
||||
int base = offset
|
||||
+ sp1Val*blockSizeSp2
|
||||
+ sp2Val*blockSizeF
|
||||
+ valF*statesClass_;
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
probs[c] *= childProbs_[base + c];
|
||||
}
|
||||
offset += (fCard * sp1Card * sp2Card * statesClass_);
|
||||
}
|
||||
|
||||
// Normalize
|
||||
normalize(probs);
|
||||
return probs;
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// predict_proba (batch)
|
||||
// --------------------------------------
|
||||
std::vector<std::vector<double>> XSp2de::predict_proba(std::vector<std::vector<int>> &test_data)
|
||||
{
|
||||
int test_size = test_data[0].size(); // each feature is test_data[f], size = #samples
|
||||
int sample_size = test_data.size(); // = nFeatures_
|
||||
std::vector<std::vector<double>> probabilities(
|
||||
test_size, std::vector<double>(statesClass_, 0.0));
|
||||
|
||||
// same concurrency approach
|
||||
int chunk_size = std::min(150, int(test_size / semaphore_.getMaxCount()) + 1);
|
||||
std::vector<std::thread> threads;
|
||||
|
||||
auto worker = [&](const std::vector<std::vector<int>> &samples,
|
||||
int begin,
|
||||
int chunk,
|
||||
int sample_size,
|
||||
std::vector<std::vector<double>> &predictions) {
|
||||
std::string threadName =
|
||||
"XSp2de-" + std::to_string(begin) + "-" + std::to_string(chunk);
|
||||
#if defined(__linux__)
|
||||
pthread_setname_np(pthread_self(), threadName.c_str());
|
||||
#else
|
||||
pthread_setname_np(threadName.c_str());
|
||||
#endif
|
||||
|
||||
std::vector<int> instance(sample_size);
|
||||
for (int sample = begin; sample < begin + chunk; ++sample) {
|
||||
for (int feature = 0; feature < sample_size; ++feature) {
|
||||
instance[feature] = samples[feature][sample];
|
||||
}
|
||||
predictions[sample] = predict_proba(instance);
|
||||
}
|
||||
semaphore_.release();
|
||||
};
|
||||
|
||||
for (int begin = 0; begin < test_size; begin += chunk_size) {
|
||||
int chunk = std::min(chunk_size, test_size - begin);
|
||||
semaphore_.acquire();
|
||||
threads.emplace_back(worker, test_data, begin, chunk, sample_size,
|
||||
std::ref(probabilities));
|
||||
}
|
||||
for (auto &th : threads) {
|
||||
th.join();
|
||||
}
|
||||
return probabilities;
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// predict (single instance)
|
||||
// --------------------------------------
|
||||
int XSp2de::predict(const std::vector<int> &instance) const
|
||||
{
|
||||
auto p = predict_proba(instance);
|
||||
return static_cast<int>(
|
||||
std::distance(p.begin(), std::max_element(p.begin(), p.end()))
|
||||
);
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// predict (batch of data)
|
||||
// --------------------------------------
|
||||
std::vector<int> XSp2de::predict(std::vector<std::vector<int>> &test_data)
|
||||
{
|
||||
auto probabilities = predict_proba(test_data);
|
||||
std::vector<int> predictions(probabilities.size(), 0);
|
||||
|
||||
for (size_t i = 0; i < probabilities.size(); i++) {
|
||||
predictions[i] = static_cast<int>(
|
||||
std::distance(probabilities[i].begin(),
|
||||
std::max_element(probabilities[i].begin(),
|
||||
probabilities[i].end()))
|
||||
);
|
||||
}
|
||||
return predictions;
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// predict (torch::Tensor version)
|
||||
// --------------------------------------
|
||||
torch::Tensor XSp2de::predict(torch::Tensor &X)
|
||||
{
|
||||
auto X_ = TensorUtils::to_matrix(X);
|
||||
auto result_v = predict(X_);
|
||||
return torch::tensor(result_v, torch::kInt32);
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// predict_proba (torch::Tensor version)
|
||||
// --------------------------------------
|
||||
torch::Tensor XSp2de::predict_proba(torch::Tensor &X)
|
||||
{
|
||||
auto X_ = TensorUtils::to_matrix(X);
|
||||
auto result_v = predict_proba(X_);
|
||||
int n_samples = X.size(1);
|
||||
torch::Tensor result =
|
||||
torch::zeros({ n_samples, statesClass_ }, torch::kDouble);
|
||||
for (int i = 0; i < (int)result_v.size(); ++i) {
|
||||
result.index_put_({ i, "..." }, torch::tensor(result_v[i]));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// score (torch::Tensor version)
|
||||
// --------------------------------------
|
||||
float XSp2de::score(torch::Tensor &X, torch::Tensor &y)
|
||||
{
|
||||
torch::Tensor y_pred = predict(X);
|
||||
return (y_pred == y).sum().item<float>() / y.size(0);
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// score (vector version)
|
||||
// --------------------------------------
|
||||
float XSp2de::score(std::vector<std::vector<int>> &X, std::vector<int> &y)
|
||||
{
|
||||
auto y_pred = predict(X);
|
||||
int correct = 0;
|
||||
for (size_t i = 0; i < y_pred.size(); ++i) {
|
||||
if (y_pred[i] == y[i]) {
|
||||
correct++;
|
||||
}
|
||||
}
|
||||
return static_cast<float>(correct) / static_cast<float>(y_pred.size());
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// Utility: normalize
|
||||
// --------------------------------------
|
||||
void XSp2de::normalize(std::vector<double> &v) const
|
||||
{
|
||||
double sum = 0.0;
|
||||
for (auto &val : v) {
|
||||
sum += val;
|
||||
}
|
||||
if (sum > 0.0) {
|
||||
for (auto &val : v) {
|
||||
val /= sum;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// to_string
|
||||
// --------------------------------------
|
||||
std::string XSp2de::to_string() const
|
||||
{
|
||||
std::ostringstream oss;
|
||||
oss << "----- XSp2de Model -----\n"
|
||||
<< "nFeatures_ = " << nFeatures_ << "\n"
|
||||
<< "superParent1_ = " << superParent1_ << "\n"
|
||||
<< "superParent2_ = " << superParent2_ << "\n"
|
||||
<< "statesClass_ = " << statesClass_ << "\n\n";
|
||||
|
||||
oss << "States: [";
|
||||
for (auto s : states_) oss << s << " ";
|
||||
oss << "]\n";
|
||||
|
||||
oss << "classCounts_:\n";
|
||||
for (auto v : classCounts_) oss << v << " ";
|
||||
oss << "\nclassPriors_:\n";
|
||||
for (auto v : classPriors_) oss << v << " ";
|
||||
oss << "\nsp1FeatureCounts_ (size=" << sp1FeatureCounts_.size() << ")\n";
|
||||
for (auto v : sp1FeatureCounts_) oss << v << " ";
|
||||
oss << "\nsp2FeatureCounts_ (size=" << sp2FeatureCounts_.size() << ")\n";
|
||||
for (auto v : sp2FeatureCounts_) oss << v << " ";
|
||||
oss << "\nchildCounts_ (size=" << childCounts_.size() << ")\n";
|
||||
for (auto v : childCounts_) oss << v << " ";
|
||||
|
||||
oss << "\nchildOffsets_:\n";
|
||||
for (auto c : childOffsets_) oss << c << " ";
|
||||
|
||||
oss << "\n----------------------------------------\n";
|
||||
return oss.str();
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// Some introspection about the graph
|
||||
// --------------------------------------
|
||||
int XSp2de::getNumberOfNodes() const
|
||||
{
|
||||
// nFeatures + 1 class node
|
||||
return nFeatures_ + 1;
|
||||
}
|
||||
|
||||
int XSp2de::getClassNumStates() const
|
||||
{
|
||||
return statesClass_;
|
||||
}
|
||||
|
||||
int XSp2de::getNFeatures() const
|
||||
{
|
||||
return nFeatures_;
|
||||
}
|
||||
|
||||
int XSp2de::getNumberOfStates() const
|
||||
{
|
||||
// purely an example. Possibly you want to sum up actual
|
||||
// cardinalities or something else.
|
||||
return std::accumulate(states_.begin(), states_.end(), 0) * nFeatures_;
|
||||
}
|
||||
|
||||
int XSp2de::getNumberOfEdges() const
|
||||
{
|
||||
// In an SPNDE with n=2, for each feature we have edges from class, sp1, sp2.
|
||||
// So that’s 3*(nFeatures_) edges, minus the ones for the superparents themselves,
|
||||
// plus the edges from class->superparent1, class->superparent2.
|
||||
// For a quick approximation:
|
||||
// - class->sp1, class->sp2 => 2 edges
|
||||
// - class->child => (nFeatures -2) edges
|
||||
// - sp1->child, sp2->child => 2*(nFeatures -2) edges
|
||||
// total = 2 + (nFeatures-2) + 2*(nFeatures-2) = 2 + 3*(nFeatures-2)
|
||||
// = 3nFeatures - 4 (just an example).
|
||||
// You can adapt to your liking:
|
||||
return 3 * nFeatures_ - 4;
|
||||
}
|
||||
|
||||
} // namespace bayesnet
|
||||
|
75
bayesnet/classifiers/XSP2DE.h
Normal file
75
bayesnet/classifiers/XSP2DE.h
Normal file
@@ -0,0 +1,75 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef XSP2DE_H
|
||||
#define XSP2DE_H
|
||||
|
||||
#include "Classifier.h"
|
||||
#include "bayesnet/utils/CountingSemaphore.h"
|
||||
#include <torch/torch.h>
|
||||
#include <vector>
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
class XSp2de : public Classifier {
|
||||
public:
|
||||
XSp2de(int spIndex1, int spIndex2);
|
||||
void setHyperparameters(const nlohmann::json &hyperparameters_) override;
|
||||
void fitx(torch::Tensor &X, torch::Tensor &y, torch::Tensor &weights_, const Smoothing_t smoothing);
|
||||
std::vector<double> predict_proba(const std::vector<int> &instance) const;
|
||||
std::vector<std::vector<double>> predict_proba(std::vector<std::vector<int>> &test_data) override;
|
||||
int predict(const std::vector<int> &instance) const;
|
||||
std::vector<int> predict(std::vector<std::vector<int>> &test_data) override;
|
||||
torch::Tensor predict(torch::Tensor &X) override;
|
||||
torch::Tensor predict_proba(torch::Tensor &X) override;
|
||||
|
||||
float score(torch::Tensor &X, torch::Tensor &y) override;
|
||||
float score(std::vector<std::vector<int>> &X, std::vector<int> &y) override;
|
||||
std::string to_string() const;
|
||||
std::vector<std::string> graph(const std::string &title) const override {
|
||||
return std::vector<std::string>({title});
|
||||
}
|
||||
|
||||
int getNumberOfNodes() const override;
|
||||
int getNumberOfEdges() const override;
|
||||
int getNFeatures() const;
|
||||
int getClassNumStates() const override;
|
||||
int getNumberOfStates() const override;
|
||||
|
||||
protected:
|
||||
void buildModel(const torch::Tensor &weights) override;
|
||||
void trainModel(const torch::Tensor &weights, const bayesnet::Smoothing_t smoothing) override;
|
||||
|
||||
private:
|
||||
void addSample(const std::vector<int> &instance, double weight);
|
||||
void normalize(std::vector<double> &v) const;
|
||||
void computeProbabilities();
|
||||
|
||||
int superParent1_;
|
||||
int superParent2_;
|
||||
int nFeatures_;
|
||||
int statesClass_;
|
||||
double alpha_;
|
||||
double initializer_;
|
||||
|
||||
std::vector<int> states_;
|
||||
std::vector<double> classCounts_;
|
||||
std::vector<double> classPriors_;
|
||||
std::vector<double> sp1FeatureCounts_, sp1FeatureProbs_;
|
||||
std::vector<double> sp2FeatureCounts_, sp2FeatureProbs_;
|
||||
// childOffsets_[f] will be the offset into childCounts_ for feature f.
|
||||
// If f is either superParent1 or superParent2, childOffsets_[f] = -1
|
||||
std::vector<int> childOffsets_;
|
||||
// For each child f, we store p(x_f | c, sp1Val, sp2Val). We'll store the raw
|
||||
// counts in childCounts_, and the probabilities in childProbs_, with a
|
||||
// dimension block of size: states_[f]* statesClass_* states_[sp1]* states_[sp2].
|
||||
std::vector<double> childCounts_;
|
||||
std::vector<double> childProbs_;
|
||||
CountingSemaphore &semaphore_;
|
||||
};
|
||||
|
||||
} // namespace bayesnet
|
||||
#endif // XSP2DE_H
|
450
bayesnet/classifiers/XSPODE.cc
Normal file
450
bayesnet/classifiers/XSPODE.cc
Normal file
@@ -0,0 +1,450 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <limits>
|
||||
#include <numeric>
|
||||
#include <sstream>
|
||||
#include <stdexcept>
|
||||
#include "XSPODE.h"
|
||||
#include "bayesnet/utils/TensorUtils.h"
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
// --------------------------------------
|
||||
// Constructor
|
||||
// --------------------------------------
|
||||
XSpode::XSpode(int spIndex)
|
||||
: superParent_{ spIndex }, nFeatures_{ 0 }, statesClass_{ 0 }, alpha_{ 1.0 },
|
||||
initializer_{ 1.0 }, semaphore_{ CountingSemaphore::getInstance() },
|
||||
Classifier(Network())
|
||||
{
|
||||
validHyperparameters = { "parent" };
|
||||
}
|
||||
|
||||
void XSpode::setHyperparameters(const nlohmann::json& hyperparameters_)
|
||||
{
|
||||
auto hyperparameters = hyperparameters_;
|
||||
if (hyperparameters.contains("parent")) {
|
||||
superParent_ = hyperparameters["parent"];
|
||||
hyperparameters.erase("parent");
|
||||
}
|
||||
Classifier::setHyperparameters(hyperparameters);
|
||||
}
|
||||
|
||||
void XSpode::fitx(torch::Tensor & X, torch::Tensor& y, torch::Tensor& weights_, const Smoothing_t smoothing)
|
||||
{
|
||||
m = X.size(1);
|
||||
n = X.size(0);
|
||||
dataset = X;
|
||||
buildDataset(y);
|
||||
buildModel(weights_);
|
||||
trainModel(weights_, smoothing);
|
||||
fitted = true;
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// trainModel
|
||||
// --------------------------------------
|
||||
// Initialize storage needed for the super-parent and child features counts and
|
||||
// probs.
|
||||
// --------------------------------------
|
||||
void XSpode::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
int numInstances = m;
|
||||
nFeatures_ = n;
|
||||
|
||||
// Derive the number of states for each feature and for the class.
|
||||
// (This is just one approach; adapt to match your environment.)
|
||||
// Here, we assume the user also gave us the total #states per feature in e.g.
|
||||
// statesMap. We'll simply reconstruct the integer states_ array. The last
|
||||
// entry is statesClass_.
|
||||
states_.resize(nFeatures_);
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
// Suppose you look up in “statesMap” by the feature name, or read directly
|
||||
// from X. We'll assume states_[f] = max value in X[f] + 1.
|
||||
states_[f] = dataset[f].max().item<int>() + 1;
|
||||
}
|
||||
// For the class: states_.back() = max(y)+1
|
||||
statesClass_ = dataset[-1].max().item<int>() + 1;
|
||||
|
||||
// Initialize counts
|
||||
classCounts_.resize(statesClass_, 0.0);
|
||||
// p(x_sp = spVal | c)
|
||||
// We'll store these counts in spFeatureCounts_[spVal * statesClass_ + c].
|
||||
spFeatureCounts_.resize(states_[superParent_] * statesClass_, 0.0);
|
||||
|
||||
// For each child ≠ sp, we store p(childVal| c, spVal) in a separate block of
|
||||
// childCounts_. childCounts_ will be sized as sum_{child≠sp} (states_[child]
|
||||
// * statesClass_ * states_[sp]). We also need an offset for each child to
|
||||
// index into childCounts_.
|
||||
childOffsets_.resize(nFeatures_, -1);
|
||||
int totalSize = 0;
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
if (f == superParent_)
|
||||
continue; // skip sp
|
||||
childOffsets_[f] = totalSize;
|
||||
// block size for this child's counts: states_[f] * statesClass_ *
|
||||
// states_[superParent_]
|
||||
totalSize += (states_[f] * statesClass_ * states_[superParent_]);
|
||||
}
|
||||
childCounts_.resize(totalSize, 0.0);
|
||||
}
|
||||
// --------------------------------------
|
||||
// buildModel
|
||||
// --------------------------------------
|
||||
//
|
||||
// We only store conditional probabilities for:
|
||||
// p(x_sp| c) (the super-parent feature)
|
||||
// p(x_child| c, x_sp) for all child ≠ sp
|
||||
//
|
||||
// --------------------------------------
|
||||
void XSpode::trainModel(const torch::Tensor& weights,
|
||||
const bayesnet::Smoothing_t smoothing)
|
||||
{
|
||||
// Accumulate raw counts
|
||||
for (int i = 0; i < m; i++) {
|
||||
std::vector<int> instance(nFeatures_ + 1);
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
instance[f] = dataset[f][i].item<int>();
|
||||
}
|
||||
instance[nFeatures_] = dataset[-1][i].item<int>();
|
||||
addSample(instance, weights[i].item<double>());
|
||||
}
|
||||
switch (smoothing) {
|
||||
case bayesnet::Smoothing_t::ORIGINAL:
|
||||
alpha_ = 1.0 / m;
|
||||
break;
|
||||
case bayesnet::Smoothing_t::LAPLACE:
|
||||
alpha_ = 1.0;
|
||||
break;
|
||||
default:
|
||||
alpha_ = 0.0; // No smoothing
|
||||
}
|
||||
initializer_ = std::numeric_limits<double>::max() /
|
||||
(nFeatures_ * nFeatures_); // for numerical stability
|
||||
// Convert raw counts to probabilities
|
||||
computeProbabilities();
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// addSample
|
||||
// --------------------------------------
|
||||
//
|
||||
// instance has size nFeatures_ + 1, with the class at the end.
|
||||
// We add 1 to the appropriate counters for each (c, superParentVal, childVal).
|
||||
//
|
||||
void XSpode::addSample(const std::vector<int>& instance, double weight)
|
||||
{
|
||||
if (weight <= 0.0)
|
||||
return;
|
||||
|
||||
int c = instance.back();
|
||||
// (A) increment classCounts
|
||||
classCounts_[c] += weight;
|
||||
|
||||
// (B) increment super-parent counts => p(x_sp | c)
|
||||
int spVal = instance[superParent_];
|
||||
spFeatureCounts_[spVal * statesClass_ + c] += weight;
|
||||
|
||||
// (C) increment child counts => p(childVal | c, x_sp)
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
if (f == superParent_)
|
||||
continue;
|
||||
int childVal = instance[f];
|
||||
int offset = childOffsets_[f];
|
||||
// Compute index in childCounts_.
|
||||
// Layout: [ offset + (spVal * states_[f] + childVal) * statesClass_ + c ]
|
||||
int blockSize = states_[f] * statesClass_;
|
||||
int idx = offset + spVal * blockSize + childVal * statesClass_ + c;
|
||||
childCounts_[idx] += weight;
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// computeProbabilities
|
||||
// --------------------------------------
|
||||
//
|
||||
// Once all samples are added in COUNTS mode, call this to:
|
||||
// p(c)
|
||||
// p(x_sp = spVal | c)
|
||||
// p(x_child = v | c, x_sp = s_sp)
|
||||
//
|
||||
// --------------------------------------
|
||||
void XSpode::computeProbabilities()
|
||||
{
|
||||
double totalCount =
|
||||
std::accumulate(classCounts_.begin(), classCounts_.end(), 0.0);
|
||||
|
||||
// p(c) => classPriors_
|
||||
classPriors_.resize(statesClass_, 0.0);
|
||||
if (totalCount <= 0.0) {
|
||||
// fallback => uniform
|
||||
double unif = 1.0 / static_cast<double>(statesClass_);
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
classPriors_[c] = unif;
|
||||
}
|
||||
} else {
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
classPriors_[c] =
|
||||
(classCounts_[c] + alpha_) / (totalCount + alpha_ * statesClass_);
|
||||
}
|
||||
}
|
||||
|
||||
// p(x_sp | c)
|
||||
spFeatureProbs_.resize(spFeatureCounts_.size());
|
||||
// denominator for spVal * statesClass_ + c is just classCounts_[c] + alpha_ *
|
||||
// (#states of sp)
|
||||
int spCard = states_[superParent_];
|
||||
for (int spVal = 0; spVal < spCard; spVal++) {
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
double denom = classCounts_[c] + alpha_ * spCard;
|
||||
double num = spFeatureCounts_[spVal * statesClass_ + c] + alpha_;
|
||||
spFeatureProbs_[spVal * statesClass_ + c] = (denom <= 0.0 ? 0.0 : num / denom);
|
||||
}
|
||||
}
|
||||
|
||||
// p(x_child | c, x_sp)
|
||||
childProbs_.resize(childCounts_.size());
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
if (f == superParent_)
|
||||
continue;
|
||||
int offset = childOffsets_[f];
|
||||
int childCard = states_[f];
|
||||
|
||||
// For each spVal, c, childVal in childCounts_:
|
||||
for (int spVal = 0; spVal < spCard; spVal++) {
|
||||
for (int childVal = 0; childVal < childCard; childVal++) {
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
int idx = offset + spVal * (childCard * statesClass_) +
|
||||
childVal * statesClass_ + c;
|
||||
|
||||
double num = childCounts_[idx] + alpha_;
|
||||
// denominator = spFeatureCounts_[spVal * statesClass_ + c] + alpha_ *
|
||||
// (#states of child)
|
||||
double denom =
|
||||
spFeatureCounts_[spVal * statesClass_ + c] + alpha_ * childCard;
|
||||
childProbs_[idx] = (denom <= 0.0 ? 0.0 : num / denom);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// predict_proba
|
||||
// --------------------------------------
|
||||
//
|
||||
// For a single instance x of dimension nFeatures_:
|
||||
// P(c | x) ∝ p(c) × p(x_sp | c) × ∏(child ≠ sp) p(x_child | c, x_sp).
|
||||
//
|
||||
// --------------------------------------
|
||||
std::vector<double> XSpode::predict_proba(const std::vector<int>& instance) const
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(CLASSIFIER_NOT_FITTED);
|
||||
}
|
||||
std::vector<double> probs(statesClass_, 0.0);
|
||||
// Multiply p(c) × p(x_sp | c)
|
||||
int spVal = instance[superParent_];
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
double pc = classPriors_[c];
|
||||
double pSpC = spFeatureProbs_[spVal * statesClass_ + c];
|
||||
probs[c] = pc * pSpC * initializer_;
|
||||
}
|
||||
|
||||
// Multiply by each child’s probability p(x_child | c, x_sp)
|
||||
for (int feature = 0; feature < nFeatures_; feature++) {
|
||||
if (feature == superParent_)
|
||||
continue; // skip sp
|
||||
int sf = instance[feature];
|
||||
int offset = childOffsets_[feature];
|
||||
int childCard = states_[feature]; // not used directly, but for clarity
|
||||
// Index into childProbs_ = offset + spVal*(childCard*statesClass_) +
|
||||
// childVal*statesClass_ + c
|
||||
int base = offset + spVal * (childCard * statesClass_) + sf * statesClass_;
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
probs[c] *= childProbs_[base + c];
|
||||
}
|
||||
}
|
||||
|
||||
// Normalize
|
||||
normalize(probs);
|
||||
return probs;
|
||||
}
|
||||
std::vector<std::vector<double>> XSpode::predict_proba(std::vector<std::vector<int>>& test_data)
|
||||
{
|
||||
int test_size = test_data[0].size();
|
||||
int sample_size = test_data.size();
|
||||
auto probabilities = std::vector<std::vector<double>>(
|
||||
test_size, std::vector<double>(statesClass_));
|
||||
|
||||
int chunk_size = std::min(150, int(test_size / semaphore_.getMaxCount()) + 1);
|
||||
std::vector<std::thread> threads;
|
||||
auto worker = [&](const std::vector<std::vector<int>>& samples, int begin,
|
||||
int chunk, int sample_size,
|
||||
std::vector<std::vector<double>>& predictions) {
|
||||
std::string threadName =
|
||||
"(V)PWorker-" + std::to_string(begin) + "-" + std::to_string(chunk);
|
||||
#if defined(__linux__)
|
||||
pthread_setname_np(pthread_self(), threadName.c_str());
|
||||
#else
|
||||
pthread_setname_np(threadName.c_str());
|
||||
#endif
|
||||
|
||||
std::vector<int> instance(sample_size);
|
||||
for (int sample = begin; sample < begin + chunk; ++sample) {
|
||||
for (int feature = 0; feature < sample_size; ++feature) {
|
||||
instance[feature] = samples[feature][sample];
|
||||
}
|
||||
predictions[sample] = predict_proba(instance);
|
||||
}
|
||||
semaphore_.release();
|
||||
};
|
||||
for (int begin = 0; begin < test_size; begin += chunk_size) {
|
||||
int chunk = std::min(chunk_size, test_size - begin);
|
||||
semaphore_.acquire();
|
||||
threads.emplace_back(worker, test_data, begin, chunk, sample_size, std::ref(probabilities));
|
||||
}
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
return probabilities;
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// Utility: normalize
|
||||
// --------------------------------------
|
||||
void XSpode::normalize(std::vector<double>& v) const
|
||||
{
|
||||
double sum = 0.0;
|
||||
for (auto val : v) {
|
||||
sum += val;
|
||||
}
|
||||
if (sum <= 0.0) {
|
||||
return;
|
||||
}
|
||||
for (auto& val : v) {
|
||||
val /= sum;
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// representation of the model
|
||||
// --------------------------------------
|
||||
std::string XSpode::to_string() const
|
||||
{
|
||||
std::ostringstream oss;
|
||||
oss << "----- XSpode Model -----" << std::endl
|
||||
<< "nFeatures_ = " << nFeatures_ << std::endl
|
||||
<< "superParent_ = " << superParent_ << std::endl
|
||||
<< "statesClass_ = " << statesClass_ << std::endl
|
||||
<< std::endl;
|
||||
|
||||
oss << "States: [";
|
||||
for (int s : states_)
|
||||
oss << s << " ";
|
||||
oss << "]" << std::endl;
|
||||
oss << "classCounts_: [";
|
||||
for (double c : classCounts_)
|
||||
oss << c << " ";
|
||||
oss << "]" << std::endl;
|
||||
oss << "classPriors_: [";
|
||||
for (double c : classPriors_)
|
||||
oss << c << " ";
|
||||
oss << "]" << std::endl;
|
||||
oss << "spFeatureCounts_: size = " << spFeatureCounts_.size() << std::endl
|
||||
<< "[";
|
||||
for (double c : spFeatureCounts_)
|
||||
oss << c << " ";
|
||||
oss << "]" << std::endl;
|
||||
oss << "spFeatureProbs_: size = " << spFeatureProbs_.size() << std::endl
|
||||
<< "[";
|
||||
for (double c : spFeatureProbs_)
|
||||
oss << c << " ";
|
||||
oss << "]" << std::endl;
|
||||
oss << "childCounts_: size = " << childCounts_.size() << std::endl << "[";
|
||||
for (double cc : childCounts_)
|
||||
oss << cc << " ";
|
||||
oss << "]" << std::endl;
|
||||
|
||||
for (double cp : childProbs_)
|
||||
oss << cp << " ";
|
||||
oss << "]" << std::endl;
|
||||
oss << "childOffsets_: [";
|
||||
for (int co : childOffsets_)
|
||||
oss << co << " ";
|
||||
oss << "]" << std::endl;
|
||||
oss << std::string(40,'-') << std::endl;
|
||||
return oss.str();
|
||||
}
|
||||
int XSpode::getNumberOfNodes() const { return nFeatures_ + 1; }
|
||||
int XSpode::getClassNumStates() const { return statesClass_; }
|
||||
int XSpode::getNFeatures() const { return nFeatures_; }
|
||||
int XSpode::getNumberOfStates() const
|
||||
{
|
||||
return std::accumulate(states_.begin(), states_.end(), 0) * nFeatures_;
|
||||
}
|
||||
int XSpode::getNumberOfEdges() const
|
||||
{
|
||||
return 2 * nFeatures_ + 1;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Predict overrides (classifier interface)
|
||||
// ------------------------------------------------------
|
||||
int XSpode::predict(const std::vector<int>& instance) const
|
||||
{
|
||||
auto p = predict_proba(instance);
|
||||
return static_cast<int>(std::distance(p.begin(), std::max_element(p.begin(), p.end())));
|
||||
}
|
||||
std::vector<int> XSpode::predict(std::vector<std::vector<int>>& test_data)
|
||||
{
|
||||
auto probabilities = predict_proba(test_data);
|
||||
std::vector<int> predictions(probabilities.size(), 0);
|
||||
|
||||
for (size_t i = 0; i < probabilities.size(); i++) {
|
||||
predictions[i] = std::distance(
|
||||
probabilities[i].begin(),
|
||||
std::max_element(probabilities[i].begin(), probabilities[i].end()));
|
||||
}
|
||||
return predictions;
|
||||
}
|
||||
torch::Tensor XSpode::predict(torch::Tensor& X)
|
||||
{
|
||||
auto X_ = TensorUtils::to_matrix(X);
|
||||
auto result_v = predict(X_);
|
||||
return torch::tensor(result_v, torch::kInt32);
|
||||
}
|
||||
torch::Tensor XSpode::predict_proba(torch::Tensor& X)
|
||||
{
|
||||
auto X_ = TensorUtils::to_matrix(X);
|
||||
auto result_v = predict_proba(X_);
|
||||
int n_samples = X.size(1);
|
||||
torch::Tensor result =
|
||||
torch::zeros({ n_samples, statesClass_ }, torch::kDouble);
|
||||
for (int i = 0; i < result_v.size(); ++i) {
|
||||
result.index_put_({ i, "..." }, torch::tensor(result_v[i]));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
float XSpode::score(torch::Tensor& X, torch::Tensor& y)
|
||||
{
|
||||
torch::Tensor y_pred = predict(X);
|
||||
return (y_pred == y).sum().item<float>() / y.size(0);
|
||||
}
|
||||
float XSpode::score(std::vector<std::vector<int>>& X, std::vector<int>& y)
|
||||
{
|
||||
auto y_pred = this->predict(X);
|
||||
int correct = 0;
|
||||
for (int i = 0; i < y_pred.size(); ++i) {
|
||||
if (y_pred[i] == y[i]) {
|
||||
correct++;
|
||||
}
|
||||
}
|
||||
return (double)correct / y_pred.size();
|
||||
}
|
||||
} // namespace bayesnet
|
76
bayesnet/classifiers/XSPODE.h
Normal file
76
bayesnet/classifiers/XSPODE.h
Normal file
@@ -0,0 +1,76 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef XSPODE_H
|
||||
#define XSPODE_H
|
||||
|
||||
#include <vector>
|
||||
#include <torch/torch.h>
|
||||
#include "Classifier.h"
|
||||
#include "bayesnet/utils/CountingSemaphore.h"
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
class XSpode : public Classifier {
|
||||
public:
|
||||
explicit XSpode(int spIndex);
|
||||
std::vector<double> predict_proba(const std::vector<int>& instance) const;
|
||||
std::vector<std::vector<double>> predict_proba(std::vector<std::vector<int>>& X) override;
|
||||
int predict(const std::vector<int>& instance) const;
|
||||
void normalize(std::vector<double>& v) const;
|
||||
std::string to_string() const;
|
||||
int getNFeatures() const;
|
||||
int getNumberOfNodes() const override;
|
||||
int getNumberOfEdges() const override;
|
||||
int getNumberOfStates() const override;
|
||||
int getClassNumStates() const override;
|
||||
std::vector<int>& getStates();
|
||||
std::vector<std::string> graph(const std::string& title) const override { return std::vector<std::string>({ title }); }
|
||||
void fitx(torch::Tensor& X, torch::Tensor& y, torch::Tensor& weights_, const Smoothing_t smoothing);
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters_) override;
|
||||
|
||||
//
|
||||
// Classifier interface
|
||||
//
|
||||
torch::Tensor predict(torch::Tensor& X) override;
|
||||
std::vector<int> predict(std::vector<std::vector<int>>& X) override;
|
||||
torch::Tensor predict_proba(torch::Tensor& X) override;
|
||||
float score(torch::Tensor& X, torch::Tensor& y) override;
|
||||
float score(std::vector<std::vector<int>>& X, std::vector<int>& y) override;
|
||||
protected:
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
void trainModel(const torch::Tensor& weights, const bayesnet::Smoothing_t smoothing) override;
|
||||
private:
|
||||
void addSample(const std::vector<int>& instance, double weight);
|
||||
void computeProbabilities();
|
||||
int superParent_;
|
||||
int nFeatures_;
|
||||
int statesClass_;
|
||||
std::vector<int> states_; // [states_feat0, ..., states_feat(N-1)] (class not included in this array)
|
||||
|
||||
// Class counts
|
||||
std::vector<double> classCounts_; // [c], accumulative
|
||||
std::vector<double> classPriors_; // [c], after normalization
|
||||
|
||||
// For p(x_sp = spVal | c)
|
||||
std::vector<double> spFeatureCounts_; // [spVal * statesClass_ + c]
|
||||
std::vector<double> spFeatureProbs_; // same shape, after normalization
|
||||
|
||||
// For p(x_child = childVal | x_sp = spVal, c)
|
||||
// childCounts_ is big enough to hold all child features except sp:
|
||||
// For each child f, we store childOffsets_[f] as the start index, then
|
||||
// childVal, spVal, c => the data.
|
||||
std::vector<double> childCounts_;
|
||||
std::vector<double> childProbs_;
|
||||
std::vector<int> childOffsets_;
|
||||
|
||||
double alpha_ = 1.0;
|
||||
double initializer_; // for numerical stability
|
||||
CountingSemaphore& semaphore_;
|
||||
};
|
||||
}
|
||||
|
||||
#endif // XSPODE_H
|
@@ -9,6 +9,7 @@
|
||||
namespace bayesnet {
|
||||
AODELd::AODELd(bool predict_voting) : Ensemble(predict_voting), Proposal(dataset, features, className)
|
||||
{
|
||||
validHyperparameters = validHyperparameters_ld; // Inherits the valid hyperparameters from Proposal
|
||||
}
|
||||
AODELd& AODELd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing)
|
||||
{
|
||||
@@ -20,7 +21,8 @@ namespace bayesnet {
|
||||
// Fills std::vectors Xv & yv with the data from tensors X_ (discretized) & y
|
||||
states = fit_local_discretization(y);
|
||||
// We have discretized the input data
|
||||
// 1st we need to fit the model to build the normal TAN structure, TAN::fit initializes the base Bayesian network
|
||||
// 1st we need to fit the model to build the normal AODE structure, Ensemble::fit
|
||||
// calls buildModel to initialize the base models
|
||||
Ensemble::fit(dataset, features, className, states, smoothing);
|
||||
return *this;
|
||||
|
||||
@@ -30,6 +32,7 @@ namespace bayesnet {
|
||||
models.clear();
|
||||
for (int i = 0; i < features.size(); ++i) {
|
||||
models.push_back(std::make_unique<SPODELd>(i));
|
||||
models.back()->setHyperparameters(hyperparameters);
|
||||
}
|
||||
n_models = models.size();
|
||||
significanceModels = std::vector<double>(n_models, 1.0);
|
||||
|
@@ -20,6 +20,8 @@ namespace bayesnet {
|
||||
protected:
|
||||
void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
private:
|
||||
nlohmann::json hyperparameters = {}; // Hyperparameters for the model
|
||||
};
|
||||
}
|
||||
#endif // !AODELD_H
|
@@ -3,29 +3,32 @@
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
#include <folding.hpp>
|
||||
#include "Boost.h"
|
||||
#include "bayesnet/feature_selection/CFS.h"
|
||||
#include "bayesnet/feature_selection/FCBF.h"
|
||||
#include "bayesnet/feature_selection/IWSS.h"
|
||||
#include "Boost.h"
|
||||
#include <folding.hpp>
|
||||
|
||||
namespace bayesnet {
|
||||
Boost::Boost(bool predict_voting) : Ensemble(predict_voting)
|
||||
{
|
||||
validHyperparameters = { "order", "convergence", "convergence_best", "bisection", "threshold", "maxTolerance",
|
||||
"predict_voting", "select_features", "block_update" };
|
||||
}
|
||||
void Boost::setHyperparameters(const nlohmann::json& hyperparameters_)
|
||||
{
|
||||
Boost::Boost(bool predict_voting) : Ensemble(predict_voting) {
|
||||
validHyperparameters = {"alpha_block", "order", "convergence", "convergence_best", "bisection",
|
||||
"threshold", "maxTolerance", "predict_voting", "select_features", "block_update"};
|
||||
}
|
||||
void Boost::setHyperparameters(const nlohmann::json &hyperparameters_) {
|
||||
auto hyperparameters = hyperparameters_;
|
||||
if (hyperparameters.contains("order")) {
|
||||
std::vector<std::string> algos = { Orders.ASC, Orders.DESC, Orders.RAND };
|
||||
std::vector<std::string> algos = {Orders.ASC, Orders.DESC, Orders.RAND};
|
||||
order_algorithm = hyperparameters["order"];
|
||||
if (std::find(algos.begin(), algos.end(), order_algorithm) == algos.end()) {
|
||||
throw std::invalid_argument("Invalid order algorithm, valid values [" + Orders.ASC + ", " + Orders.DESC + ", " + Orders.RAND + "]");
|
||||
throw std::invalid_argument("Invalid order algorithm, valid values [" + Orders.ASC + ", " + Orders.DESC +
|
||||
", " + Orders.RAND + "]");
|
||||
}
|
||||
hyperparameters.erase("order");
|
||||
}
|
||||
if (hyperparameters.contains("alpha_block")) {
|
||||
alpha_block = hyperparameters["alpha_block"];
|
||||
hyperparameters.erase("alpha_block");
|
||||
}
|
||||
if (hyperparameters.contains("convergence")) {
|
||||
convergence = hyperparameters["convergence"];
|
||||
hyperparameters.erase("convergence");
|
||||
@@ -44,8 +47,8 @@ namespace bayesnet {
|
||||
}
|
||||
if (hyperparameters.contains("maxTolerance")) {
|
||||
maxTolerance = hyperparameters["maxTolerance"];
|
||||
if (maxTolerance < 1 || maxTolerance > 4)
|
||||
throw std::invalid_argument("Invalid maxTolerance value, must be greater in [1, 4]");
|
||||
if (maxTolerance < 1 || maxTolerance > 6)
|
||||
throw std::invalid_argument("Invalid maxTolerance value, must be greater in [1, 6]");
|
||||
hyperparameters.erase("maxTolerance");
|
||||
}
|
||||
if (hyperparameters.contains("predict_voting")) {
|
||||
@@ -54,11 +57,12 @@ namespace bayesnet {
|
||||
}
|
||||
if (hyperparameters.contains("select_features")) {
|
||||
auto selectedAlgorithm = hyperparameters["select_features"];
|
||||
std::vector<std::string> algos = { SelectFeatures.IWSS, SelectFeatures.CFS, SelectFeatures.FCBF };
|
||||
std::vector<std::string> algos = {SelectFeatures.IWSS, SelectFeatures.CFS, SelectFeatures.FCBF};
|
||||
selectFeatures = true;
|
||||
select_features_algorithm = selectedAlgorithm;
|
||||
if (std::find(algos.begin(), algos.end(), selectedAlgorithm) == algos.end()) {
|
||||
throw std::invalid_argument("Invalid selectFeatures value, valid values [" + SelectFeatures.IWSS + ", " + SelectFeatures.CFS + ", " + SelectFeatures.FCBF + "]");
|
||||
throw std::invalid_argument("Invalid selectFeatures value, valid values [" + SelectFeatures.IWSS + ", " +
|
||||
SelectFeatures.CFS + ", " + SelectFeatures.FCBF + "]");
|
||||
}
|
||||
hyperparameters.erase("select_features");
|
||||
}
|
||||
@@ -66,16 +70,31 @@ namespace bayesnet {
|
||||
block_update = hyperparameters["block_update"];
|
||||
hyperparameters.erase("block_update");
|
||||
}
|
||||
Classifier::setHyperparameters(hyperparameters);
|
||||
if (block_update && alpha_block) {
|
||||
throw std::invalid_argument("alpha_block and block_update cannot be true at the same time");
|
||||
}
|
||||
void Boost::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
if (block_update && !bisection) {
|
||||
throw std::invalid_argument("block_update needs bisection to be true");
|
||||
}
|
||||
Classifier::setHyperparameters(hyperparameters);
|
||||
}
|
||||
void Boost::add_model(std::unique_ptr<Classifier> model, double significance) {
|
||||
models.push_back(std::move(model));
|
||||
n_models++;
|
||||
significanceModels.push_back(significance);
|
||||
}
|
||||
void Boost::remove_last_model() {
|
||||
models.pop_back();
|
||||
significanceModels.pop_back();
|
||||
n_models--;
|
||||
}
|
||||
void Boost::buildModel(const torch::Tensor &weights) {
|
||||
// Models shall be built in trainModel
|
||||
models.clear();
|
||||
significanceModels.clear();
|
||||
n_models = 0;
|
||||
// Prepare the validation dataset
|
||||
auto y_ = dataset.index({ -1, "..." });
|
||||
auto y_ = dataset.index({-1, "..."});
|
||||
if (convergence) {
|
||||
// Prepare train & validation sets from train data
|
||||
auto fold = folding::StratifiedKFold(5, y_, 271);
|
||||
@@ -83,10 +102,10 @@ namespace bayesnet {
|
||||
auto train_t = torch::tensor(train);
|
||||
auto test_t = torch::tensor(test);
|
||||
// Get train and validation sets
|
||||
X_train = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), train_t });
|
||||
y_train = dataset.index({ -1, train_t });
|
||||
X_test = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), test_t });
|
||||
y_test = dataset.index({ -1, test_t });
|
||||
X_train = dataset.index({torch::indexing::Slice(0, dataset.size(0) - 1), train_t});
|
||||
y_train = dataset.index({-1, train_t});
|
||||
X_test = dataset.index({torch::indexing::Slice(0, dataset.size(0) - 1), test_t});
|
||||
y_test = dataset.index({-1, test_t});
|
||||
dataset = X_train;
|
||||
m = X_train.size(1);
|
||||
auto n_classes = states.at(className).size();
|
||||
@@ -95,39 +114,42 @@ namespace bayesnet {
|
||||
metrics = Metrics(dataset, features, className, n_classes);
|
||||
} else {
|
||||
// Use all data to train
|
||||
X_train = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), "..." });
|
||||
X_train = dataset.index({torch::indexing::Slice(0, dataset.size(0) - 1), "..."});
|
||||
y_train = y_;
|
||||
}
|
||||
}
|
||||
std::vector<int> Boost::featureSelection(torch::Tensor& weights_)
|
||||
{
|
||||
}
|
||||
std::vector<int> Boost::featureSelection(torch::Tensor &weights_) {
|
||||
int maxFeatures = 0;
|
||||
if (select_features_algorithm == SelectFeatures.CFS) {
|
||||
featureSelector = new CFS(dataset, features, className, maxFeatures, states.at(className).size(), weights_);
|
||||
} else if (select_features_algorithm == SelectFeatures.IWSS) {
|
||||
if (threshold < 0 || threshold >0.5) {
|
||||
if (threshold < 0 || threshold > 0.5) {
|
||||
throw std::invalid_argument("Invalid threshold value for " + SelectFeatures.IWSS + " [0, 0.5]");
|
||||
}
|
||||
featureSelector = new IWSS(dataset, features, className, maxFeatures, states.at(className).size(), weights_, threshold);
|
||||
featureSelector =
|
||||
new IWSS(dataset, features, className, maxFeatures, states.at(className).size(), weights_, threshold);
|
||||
} else if (select_features_algorithm == SelectFeatures.FCBF) {
|
||||
if (threshold < 1e-7 || threshold > 1) {
|
||||
throw std::invalid_argument("Invalid threshold value for " + SelectFeatures.FCBF + " [1e-7, 1]");
|
||||
}
|
||||
featureSelector = new FCBF(dataset, features, className, maxFeatures, states.at(className).size(), weights_, threshold);
|
||||
featureSelector =
|
||||
new FCBF(dataset, features, className, maxFeatures, states.at(className).size(), weights_, threshold);
|
||||
}
|
||||
featureSelector->fit();
|
||||
auto featuresUsed = featureSelector->getFeatures();
|
||||
delete featureSelector;
|
||||
return featuresUsed;
|
||||
}
|
||||
std::tuple<torch::Tensor&, double, bool> Boost::update_weights(torch::Tensor& ytrain, torch::Tensor& ypred, torch::Tensor& weights)
|
||||
{
|
||||
}
|
||||
std::tuple<torch::Tensor &, double, bool> Boost::update_weights(torch::Tensor &ytrain, torch::Tensor &ypred,
|
||||
torch::Tensor &weights) {
|
||||
bool terminate = false;
|
||||
double alpha_t = 0;
|
||||
auto mask_wrong = ypred != ytrain;
|
||||
auto mask_right = ypred == ytrain;
|
||||
auto masked_weights = weights * mask_wrong.to(weights.dtype());
|
||||
double epsilon_t = masked_weights.sum().item<double>();
|
||||
// std::cout << "epsilon_t: " << epsilon_t << " count wrong: " << mask_wrong.sum().item<int>() << " count right: "
|
||||
// << mask_right.sum().item<int>() << std::endl;
|
||||
if (epsilon_t > 0.5) {
|
||||
// Inverse the weights policy (plot ln(wt))
|
||||
// "In each round of AdaBoost, there is a sanity check to ensure that the current base
|
||||
@@ -145,10 +167,10 @@ namespace bayesnet {
|
||||
double totalWeights = torch::sum(weights).item<double>();
|
||||
weights = weights / totalWeights;
|
||||
}
|
||||
return { weights, alpha_t, terminate };
|
||||
}
|
||||
std::tuple<torch::Tensor&, double, bool> Boost::update_weights_block(int k, torch::Tensor& ytrain, torch::Tensor& weights)
|
||||
{
|
||||
return {weights, alpha_t, terminate};
|
||||
}
|
||||
std::tuple<torch::Tensor &, double, bool> Boost::update_weights_block(int k, torch::Tensor &ytrain,
|
||||
torch::Tensor &weights) {
|
||||
/* Update Block algorithm
|
||||
k = # of models in block
|
||||
n_models = # of models in ensemble to make predictions
|
||||
@@ -241,6 +263,6 @@ namespace bayesnet {
|
||||
}
|
||||
// 10. n_models <- n_models_bak
|
||||
n_models = n_models_bak;
|
||||
return { weights, alpha_t, terminate };
|
||||
}
|
||||
return {weights, alpha_t, terminate};
|
||||
}
|
||||
} // namespace bayesnet
|
||||
|
@@ -27,26 +27,31 @@ namespace bayesnet {
|
||||
class Boost : public Ensemble {
|
||||
public:
|
||||
explicit Boost(bool predict_voting = false);
|
||||
virtual ~Boost() = default;
|
||||
virtual ~Boost() override = default;
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters_) override;
|
||||
protected:
|
||||
std::vector<int> featureSelection(torch::Tensor& weights_);
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
std::tuple<torch::Tensor&, double, bool> update_weights(torch::Tensor& ytrain, torch::Tensor& ypred, torch::Tensor& weights);
|
||||
std::tuple<torch::Tensor&, double, bool> update_weights_block(int k, torch::Tensor& ytrain, torch::Tensor& weights);
|
||||
void add_model(std::unique_ptr<Classifier> model, double significance);
|
||||
void remove_last_model();
|
||||
//
|
||||
// Attributes
|
||||
//
|
||||
torch::Tensor X_train, y_train, X_test, y_test;
|
||||
// Hyperparameters
|
||||
bool bisection = true; // if true, use bisection stratety to add k models at once to the ensemble
|
||||
int maxTolerance = 3;
|
||||
std::string order_algorithm; // order to process the KBest features asc, desc, rand
|
||||
std::string order_algorithm = Orders.DESC; // order to process the KBest features asc, desc, rand
|
||||
bool convergence = true; //if true, stop when the model does not improve
|
||||
bool convergence_best = false; // wether to keep the best accuracy to the moment or the last accuracy as prior accuracy
|
||||
bool selectFeatures = false; // if true, use feature selection
|
||||
std::string select_features_algorithm = Orders.DESC; // Selected feature selection algorithm
|
||||
std::string select_features_algorithm; // Selected feature selection algorithm
|
||||
FeatureSelect* featureSelector = nullptr;
|
||||
double threshold = -1;
|
||||
bool block_update = false;
|
||||
|
||||
bool block_update = false; // if true, use block update algorithm, only meaningful if bisection is true
|
||||
bool alpha_block = false; // if true, the alpha is computed with the ensemble built so far and the new model
|
||||
};
|
||||
}
|
||||
#endif
|
@@ -4,14 +4,9 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include <set>
|
||||
#include <functional>
|
||||
#include <limits.h>
|
||||
#include <tuple>
|
||||
#include <folding.hpp>
|
||||
#include "bayesnet/feature_selection/CFS.h"
|
||||
#include "bayesnet/feature_selection/FCBF.h"
|
||||
#include "bayesnet/feature_selection/IWSS.h"
|
||||
#include "BoostA2DE.h"
|
||||
|
||||
namespace bayesnet {
|
||||
@@ -59,6 +54,9 @@ namespace bayesnet {
|
||||
std::vector<int> featuresUsed;
|
||||
if (selectFeatures) {
|
||||
featuresUsed = initializeModels(smoothing);
|
||||
if (featuresUsed.size() == 0) {
|
||||
return;
|
||||
}
|
||||
auto ypred = predict(X_train);
|
||||
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
|
||||
// Update significance of the models
|
||||
|
@@ -4,12 +4,14 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include "BoostAODE.h"
|
||||
#include "bayesnet/classifiers/SPODE.h"
|
||||
#include <limits.h>
|
||||
// #include <loguru.cpp>
|
||||
// #include <loguru.hpp>
|
||||
#include <random>
|
||||
#include <set>
|
||||
#include <functional>
|
||||
#include <limits.h>
|
||||
#include <tuple>
|
||||
#include "BoostAODE.h"
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
@@ -46,14 +48,16 @@ namespace bayesnet {
|
||||
torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);
|
||||
bool finished = false;
|
||||
std::vector<int> featuresUsed;
|
||||
n_models = 0;
|
||||
if (selectFeatures) {
|
||||
featuresUsed = initializeModels(smoothing);
|
||||
auto ypred = predict(X_train);
|
||||
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
|
||||
// Update significance of the models
|
||||
for (int i = 0; i < n_models; ++i) {
|
||||
significanceModels[i] = alpha_t;
|
||||
significanceModels.push_back(alpha_t);
|
||||
}
|
||||
// VLOG_SCOPE_F(1, "SelectFeatures. alpha_t: %f n_models: %d", alpha_t, n_models);
|
||||
if (finished) {
|
||||
return;
|
||||
}
|
||||
@@ -77,10 +81,8 @@ namespace bayesnet {
|
||||
std::shuffle(featureSelection.begin(), featureSelection.end(), g);
|
||||
}
|
||||
// Remove used features
|
||||
featureSelection.erase(remove_if(begin(featureSelection), end(featureSelection), [&](auto x)
|
||||
{ return std::find(begin(featuresUsed), end(featuresUsed), x) != end(featuresUsed);}),
|
||||
end(featureSelection)
|
||||
);
|
||||
featureSelection.erase(remove_if(begin(featureSelection), end(featureSelection), [&](auto x) { return std::find(begin(featuresUsed), end(featuresUsed), x) != end(featuresUsed); }),
|
||||
end(featureSelection));
|
||||
int k = bisection ? pow(2, tolerance) : 1;
|
||||
int counter = 0; // The model counter of the current pack
|
||||
// VLOG_SCOPE_F(1, "counter=%d k=%d featureSelection.size: %zu", counter, k, featureSelection.size());
|
||||
@@ -92,7 +94,25 @@ namespace bayesnet {
|
||||
model->fit(dataset, features, className, states, weights_, smoothing);
|
||||
alpha_t = 0.0;
|
||||
if (!block_update) {
|
||||
auto ypred = model->predict(X_train);
|
||||
torch::Tensor ypred;
|
||||
if (alpha_block) {
|
||||
//
|
||||
// Compute the prediction with the current ensemble + model
|
||||
//
|
||||
// Add the model to the ensemble
|
||||
n_models++;
|
||||
models.push_back(std::move(model));
|
||||
significanceModels.push_back(1);
|
||||
// Compute the prediction
|
||||
ypred = predict(X_train);
|
||||
// Remove the model from the ensemble
|
||||
model = std::move(models.back());
|
||||
models.pop_back();
|
||||
significanceModels.pop_back();
|
||||
n_models--;
|
||||
} else {
|
||||
ypred = model->predict(X_train);
|
||||
}
|
||||
// Step 3.1: Compute the classifier amout of say
|
||||
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
|
||||
}
|
||||
@@ -102,7 +122,7 @@ namespace bayesnet {
|
||||
models.push_back(std::move(model));
|
||||
significanceModels.push_back(alpha_t);
|
||||
n_models++;
|
||||
// VLOG_SCOPE_F(2, "numItemsPack: %d n_models: %d featuresUsed: %zu", numItemsPack, n_models, featuresUsed.size());
|
||||
// VLOG_SCOPE_F(2, "finished: %d numItemsPack: %d n_models: %d featuresUsed: %zu", finished, numItemsPack, n_models, featuresUsed.size());
|
||||
}
|
||||
if (block_update) {
|
||||
std::tie(weights_, alpha_t, finished) = update_weights_block(k, y_train, weights_);
|
||||
@@ -145,7 +165,7 @@ namespace bayesnet {
|
||||
}
|
||||
} else {
|
||||
notes.push_back("Convergence threshold reached & 0 models eliminated");
|
||||
// VLOG_SCOPE_F(4, "Convergence threshold reached & 0 models eliminated n_models=%d numItemsPack=%d", n_models, numItemsPack);
|
||||
// VLG_SCOPE_F(4, "Convergence threshold reached & 0 models eliminated n_models=%d numItemsPack=%d", n_models, numItemsPack);
|
||||
}
|
||||
}
|
||||
if (featuresUsed.size() != features.size()) {
|
||||
|
@@ -8,7 +8,6 @@
|
||||
#define BOOSTAODE_H
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "bayesnet/classifiers/SPODE.h"
|
||||
#include "Boost.h"
|
||||
|
||||
namespace bayesnet {
|
||||
|
@@ -4,7 +4,6 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
#include "Ensemble.h"
|
||||
#include "bayesnet/utils/CountingSemaphore.h"
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
@@ -86,6 +85,7 @@ namespace bayesnet {
|
||||
torch::Tensor y_pred = torch::zeros({ X.size(1), n_states }, torch::kFloat32);
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
auto ypredict = models[i]->predict_proba(X);
|
||||
/*std::cout << "model " << i << " prediction: " << ypredict << " significance " << significanceModels[i] << std::endl;*/
|
||||
y_pred += ypredict * significanceModels[i];
|
||||
}
|
||||
auto sum = std::reduce(significanceModels.begin(), significanceModels.end());
|
||||
|
@@ -33,9 +33,15 @@ namespace bayesnet {
|
||||
}
|
||||
std::string dump_cpt() const override
|
||||
{
|
||||
return "";
|
||||
std::string output;
|
||||
for (auto& model : models) {
|
||||
output += model->dump_cpt();
|
||||
output += std::string(80, '-') + "\n";
|
||||
}
|
||||
return output;
|
||||
}
|
||||
protected:
|
||||
void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
|
||||
torch::Tensor predict_average_voting(torch::Tensor& X);
|
||||
std::vector<std::vector<double>> predict_average_voting(std::vector<std::vector<int>>& X);
|
||||
torch::Tensor predict_average_proba(torch::Tensor& X);
|
||||
@@ -43,10 +49,10 @@ namespace bayesnet {
|
||||
torch::Tensor compute_arg_max(torch::Tensor& X);
|
||||
std::vector<int> compute_arg_max(std::vector<std::vector<double>>& X);
|
||||
torch::Tensor voting(torch::Tensor& votes);
|
||||
// Attributes
|
||||
unsigned n_models;
|
||||
std::vector<std::unique_ptr<Classifier>> models;
|
||||
std::vector<double> significanceModels;
|
||||
void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
|
||||
bool predict_voting;
|
||||
};
|
||||
}
|
||||
|
168
bayesnet/ensembles/XBA2DE.cc
Normal file
168
bayesnet/ensembles/XBA2DE.cc
Normal file
@@ -0,0 +1,168 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include <folding.hpp>
|
||||
#include <limits.h>
|
||||
#include "XBA2DE.h"
|
||||
#include "bayesnet/classifiers/XSP2DE.h"
|
||||
#include "bayesnet/utils/TensorUtils.h"
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
XBA2DE::XBA2DE(bool predict_voting) : Boost(predict_voting) {}
|
||||
std::vector<int> XBA2DE::initializeModels(const Smoothing_t smoothing) {
|
||||
torch::Tensor weights_ = torch::full({m}, 1.0 / m, torch::kFloat64);
|
||||
std::vector<int> featuresSelected = featureSelection(weights_);
|
||||
if (featuresSelected.size() < 2) {
|
||||
notes.push_back("No features selected in initialization");
|
||||
status = ERROR;
|
||||
return std::vector<int>();
|
||||
}
|
||||
for (int i = 0; i < featuresSelected.size() - 1; i++) {
|
||||
for (int j = i + 1; j < featuresSelected.size(); j++) {
|
||||
std::unique_ptr<Classifier> model = std::make_unique<XSp2de>(featuresSelected[i], featuresSelected[j]);
|
||||
model->fit(dataset, features, className, states, weights_, smoothing);
|
||||
add_model(std::move(model), 1.0);
|
||||
}
|
||||
}
|
||||
notes.push_back("Used features in initialization: " + std::to_string(featuresSelected.size()) + " of " +
|
||||
std::to_string(features.size()) + " with " + select_features_algorithm);
|
||||
return featuresSelected;
|
||||
}
|
||||
void XBA2DE::trainModel(const torch::Tensor &weights, const Smoothing_t smoothing) {
|
||||
//
|
||||
// Logging setup
|
||||
//
|
||||
// loguru::set_thread_name("XBA2DE");
|
||||
// loguru::g_stderr_verbosity = loguru::Verbosity_OFF;
|
||||
// loguru::add_file("boostA2DE.log", loguru::Truncate, loguru::Verbosity_MAX);
|
||||
|
||||
// Algorithm based on the adaboost algorithm for classification
|
||||
// as explained in Ensemble methods (Zhi-Hua Zhou, 2012)
|
||||
X_train_ = TensorUtils::to_matrix(X_train);
|
||||
y_train_ = TensorUtils::to_vector<int>(y_train);
|
||||
if (convergence) {
|
||||
X_test_ = TensorUtils::to_matrix(X_test);
|
||||
y_test_ = TensorUtils::to_vector<int>(y_test);
|
||||
}
|
||||
fitted = true;
|
||||
double alpha_t = 0;
|
||||
torch::Tensor weights_ = torch::full({m}, 1.0 / m, torch::kFloat64);
|
||||
bool finished = false;
|
||||
std::vector<int> featuresUsed;
|
||||
if (selectFeatures) {
|
||||
featuresUsed = initializeModels(smoothing);
|
||||
if (featuresUsed.size() == 0) {
|
||||
return;
|
||||
}
|
||||
auto ypred = predict(X_train);
|
||||
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
|
||||
// Update significance of the models
|
||||
for (int i = 0; i < n_models; ++i) {
|
||||
significanceModels[i] = alpha_t;
|
||||
}
|
||||
if (finished) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
int numItemsPack = 0; // The counter of the models inserted in the current pack
|
||||
// Variables to control the accuracy finish condition
|
||||
double priorAccuracy = 0.0;
|
||||
double improvement = 1.0;
|
||||
double convergence_threshold = 1e-4;
|
||||
int tolerance = 0; // number of times the accuracy is lower than the convergence_threshold
|
||||
// Step 0: Set the finish condition
|
||||
// epsilon sub t > 0.5 => inverse the weights policy
|
||||
// validation error is not decreasing
|
||||
// run out of features
|
||||
bool ascending = order_algorithm == Orders.ASC;
|
||||
std::mt19937 g{173};
|
||||
std::vector<std::pair<int, int>> pairSelection;
|
||||
while (!finished) {
|
||||
// Step 1: Build ranking with mutual information
|
||||
pairSelection = metrics.SelectKPairs(weights_, featuresUsed, ascending, 0); // Get all the pairs sorted
|
||||
if (order_algorithm == Orders.RAND) {
|
||||
std::shuffle(pairSelection.begin(), pairSelection.end(), g);
|
||||
}
|
||||
int k = bisection ? pow(2, tolerance) : 1;
|
||||
int counter = 0; // The model counter of the current pack
|
||||
// VLOG_SCOPE_F(1, "counter=%d k=%d featureSelection.size: %zu", counter, k, featureSelection.size());
|
||||
while (counter++ < k && pairSelection.size() > 0) {
|
||||
auto feature_pair = pairSelection[0];
|
||||
pairSelection.erase(pairSelection.begin());
|
||||
std::unique_ptr<Classifier> model;
|
||||
model = std::make_unique<XSp2de>(feature_pair.first, feature_pair.second);
|
||||
model->fit(dataset, features, className, states, weights_, smoothing);
|
||||
alpha_t = 0.0;
|
||||
if (!block_update) {
|
||||
auto ypred = model->predict(X_train);
|
||||
// Step 3.1: Compute the classifier amout of say
|
||||
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
|
||||
}
|
||||
// Step 3.4: Store classifier and its accuracy to weigh its future vote
|
||||
numItemsPack++;
|
||||
models.push_back(std::move(model));
|
||||
significanceModels.push_back(alpha_t);
|
||||
n_models++;
|
||||
// VLOG_SCOPE_F(2, "numItemsPack: %d n_models: %d featuresUsed: %zu", numItemsPack, n_models,
|
||||
// featuresUsed.size());
|
||||
}
|
||||
if (block_update) {
|
||||
std::tie(weights_, alpha_t, finished) = update_weights_block(k, y_train, weights_);
|
||||
}
|
||||
if (convergence && !finished) {
|
||||
auto y_val_predict = predict(X_test);
|
||||
double accuracy = (y_val_predict == y_test).sum().item<double>() / (double)y_test.size(0);
|
||||
if (priorAccuracy == 0) {
|
||||
priorAccuracy = accuracy;
|
||||
} else {
|
||||
improvement = accuracy - priorAccuracy;
|
||||
}
|
||||
if (improvement < convergence_threshold) {
|
||||
// VLOG_SCOPE_F(3, " (improvement<threshold) tolerance: %d numItemsPack: %d improvement: %f prior: %f
|
||||
// current: %f", tolerance, numItemsPack, improvement, priorAccuracy, accuracy);
|
||||
tolerance++;
|
||||
} else {
|
||||
// VLOG_SCOPE_F(3, "* (improvement>=threshold) Reset. tolerance: %d numItemsPack: %d improvement: %f
|
||||
// prior: %f current: %f", tolerance, numItemsPack, improvement, priorAccuracy, accuracy);
|
||||
tolerance = 0; // Reset the counter if the model performs better
|
||||
numItemsPack = 0;
|
||||
}
|
||||
if (convergence_best) {
|
||||
// Keep the best accuracy until now as the prior accuracy
|
||||
priorAccuracy = std::max(accuracy, priorAccuracy);
|
||||
} else {
|
||||
// Keep the last accuray obtained as the prior accuracy
|
||||
priorAccuracy = accuracy;
|
||||
}
|
||||
}
|
||||
// VLOG_SCOPE_F(1, "tolerance: %d featuresUsed.size: %zu features.size: %zu", tolerance, featuresUsed.size(),
|
||||
// features.size());
|
||||
finished = finished || tolerance > maxTolerance || pairSelection.size() == 0;
|
||||
}
|
||||
if (tolerance > maxTolerance) {
|
||||
if (numItemsPack < n_models) {
|
||||
notes.push_back("Convergence threshold reached & " + std::to_string(numItemsPack) + " models eliminated");
|
||||
// VLOG_SCOPE_F(4, "Convergence threshold reached & %d models eliminated of %d", numItemsPack, n_models);
|
||||
for (int i = 0; i < numItemsPack; ++i) {
|
||||
significanceModels.pop_back();
|
||||
models.pop_back();
|
||||
n_models--;
|
||||
}
|
||||
} else {
|
||||
notes.push_back("Convergence threshold reached & 0 models eliminated");
|
||||
// VLOG_SCOPE_F(4, "Convergence threshold reached & 0 models eliminated n_models=%d numItemsPack=%d",
|
||||
// n_models, numItemsPack);
|
||||
}
|
||||
}
|
||||
if (pairSelection.size() > 0) {
|
||||
notes.push_back("Pairs not used in train: " + std::to_string(pairSelection.size()));
|
||||
status = WARNING;
|
||||
}
|
||||
notes.push_back("Number of models: " + std::to_string(n_models));
|
||||
}
|
||||
std::vector<std::string> XBA2DE::graph(const std::string &title) const { return Ensemble::graph(title); }
|
||||
} // namespace bayesnet
|
28
bayesnet/ensembles/XBA2DE.h
Normal file
28
bayesnet/ensembles/XBA2DE.h
Normal file
@@ -0,0 +1,28 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef XBA2DE_H
|
||||
#define XBA2DE_H
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "Boost.h"
|
||||
namespace bayesnet {
|
||||
class XBA2DE : public Boost {
|
||||
public:
|
||||
explicit XBA2DE(bool predict_voting = false);
|
||||
virtual ~XBA2DE() = default;
|
||||
std::vector<std::string> graph(const std::string& title = "XBA2DE") const override;
|
||||
std::string getVersion() override { return version; };
|
||||
protected:
|
||||
void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
|
||||
private:
|
||||
std::vector<int> initializeModels(const Smoothing_t smoothing);
|
||||
std::vector<std::vector<int>> X_train_, X_test_;
|
||||
std::vector<int> y_train_, y_test_;
|
||||
std::string version = "0.9.7";
|
||||
};
|
||||
}
|
||||
#endif
|
184
bayesnet/ensembles/XBAODE.cc
Normal file
184
bayesnet/ensembles/XBAODE.cc
Normal file
@@ -0,0 +1,184 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
#include "XBAODE.h"
|
||||
#include "bayesnet/classifiers/XSPODE.h"
|
||||
#include "bayesnet/utils/TensorUtils.h"
|
||||
#include <limits.h>
|
||||
#include <random>
|
||||
#include <tuple>
|
||||
|
||||
namespace bayesnet
|
||||
{
|
||||
XBAODE::XBAODE() : Boost(false) {}
|
||||
std::vector<int> XBAODE::initializeModels(const Smoothing_t smoothing)
|
||||
{
|
||||
torch::Tensor weights_ = torch::full({m}, 1.0 / m, torch::kFloat64);
|
||||
std::vector<int> featuresSelected = featureSelection(weights_);
|
||||
for (const int &feature : featuresSelected) {
|
||||
std::unique_ptr<Classifier> model = std::make_unique<XSpode>(feature);
|
||||
model->fit(dataset, features, className, states, weights_, smoothing);
|
||||
add_model(std::move(model), 1.0);
|
||||
}
|
||||
notes.push_back("Used features in initialization: " + std::to_string(featuresSelected.size()) + " of " +
|
||||
std::to_string(features.size()) + " with " + select_features_algorithm);
|
||||
return featuresSelected;
|
||||
}
|
||||
void XBAODE::trainModel(const torch::Tensor &weights, const bayesnet::Smoothing_t smoothing)
|
||||
{
|
||||
X_train_ = TensorUtils::to_matrix(X_train);
|
||||
y_train_ = TensorUtils::to_vector<int>(y_train);
|
||||
if (convergence) {
|
||||
X_test_ = TensorUtils::to_matrix(X_test);
|
||||
y_test_ = TensorUtils::to_vector<int>(y_test);
|
||||
}
|
||||
fitted = true;
|
||||
double alpha_t;
|
||||
torch::Tensor weights_ = torch::full({m}, 1.0 / m, torch::kFloat64);
|
||||
bool finished = false;
|
||||
std::vector<int> featuresUsed;
|
||||
n_models = 0;
|
||||
if (selectFeatures) {
|
||||
featuresUsed = initializeModels(smoothing);
|
||||
auto ypred = predict(X_train_);
|
||||
auto ypred_t = torch::tensor(ypred);
|
||||
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred_t, weights_);
|
||||
// Update significance of the models
|
||||
for (const int &feature : featuresUsed) {
|
||||
significanceModels.pop_back();
|
||||
}
|
||||
for (const int &feature : featuresUsed) {
|
||||
significanceModels.push_back(alpha_t);
|
||||
}
|
||||
// VLOG_SCOPE_F(1, "SelectFeatures. alpha_t: %f n_models: %d", alpha_t,
|
||||
// n_models);
|
||||
if (finished) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
int numItemsPack = 0; // The counter of the models inserted in the current pack
|
||||
// Variables to control the accuracy finish condition
|
||||
double priorAccuracy = 0.0;
|
||||
double improvement = 1.0;
|
||||
double convergence_threshold = 1e-4;
|
||||
int tolerance = 0; // number of times the accuracy is lower than the convergence_threshold
|
||||
// Step 0: Set the finish condition
|
||||
// epsilon sub t > 0.5 => inverse the weights_ policy
|
||||
// validation error is not decreasing
|
||||
// run out of features
|
||||
bool ascending = order_algorithm == bayesnet::Orders.ASC;
|
||||
std::mt19937 g{173};
|
||||
while (!finished) {
|
||||
// Step 1: Build ranking with mutual information
|
||||
auto featureSelection = metrics.SelectKBestWeighted(weights_, ascending, n); // Get all the features sorted
|
||||
if (order_algorithm == bayesnet::Orders.RAND) {
|
||||
std::shuffle(featureSelection.begin(), featureSelection.end(), g);
|
||||
}
|
||||
// Remove used features
|
||||
featureSelection.erase(remove_if(featureSelection.begin(), featureSelection.end(),
|
||||
[&](auto x) {
|
||||
return std::find(featuresUsed.begin(), featuresUsed.end(), x) !=
|
||||
featuresUsed.end();
|
||||
}),
|
||||
featureSelection.end());
|
||||
int k = bisection ? pow(2, tolerance) : 1;
|
||||
int counter = 0; // The model counter of the current pack
|
||||
// VLOG_SCOPE_F(1, "counter=%d k=%d featureSelection.size: %zu", counter, k,
|
||||
// featureSelection.size());
|
||||
while (counter++ < k && featureSelection.size() > 0) {
|
||||
auto feature = featureSelection[0];
|
||||
featureSelection.erase(featureSelection.begin());
|
||||
std::unique_ptr<Classifier> model;
|
||||
model = std::make_unique<XSpode>(feature);
|
||||
model->fit(dataset, features, className, states, weights_, smoothing);
|
||||
/*dynamic_cast<XSpode*>(model.get())->fitx(X_train, y_train, weights_,
|
||||
* smoothing); // using exclusive XSpode fit method*/
|
||||
// DEBUG
|
||||
/*std::cout << dynamic_cast<XSpode*>(model.get())->to_string() <<
|
||||
* std::endl;*/
|
||||
// DEBUG
|
||||
std::vector<int> ypred;
|
||||
if (alpha_block) {
|
||||
//
|
||||
// Compute the prediction with the current ensemble + model
|
||||
//
|
||||
// Add the model to the ensemble
|
||||
add_model(std::move(model), 1.0);
|
||||
// Compute the prediction
|
||||
ypred = predict(X_train_);
|
||||
model = std::move(models.back());
|
||||
// Remove the model from the ensemble
|
||||
remove_last_model();
|
||||
} else {
|
||||
ypred = model->predict(X_train_);
|
||||
}
|
||||
// Step 3.1: Compute the classifier amout of say
|
||||
auto ypred_t = torch::tensor(ypred);
|
||||
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred_t, weights_);
|
||||
// Step 3.4: Store classifier and its accuracy to weigh its future vote
|
||||
numItemsPack++;
|
||||
featuresUsed.push_back(feature);
|
||||
add_model(std::move(model), alpha_t);
|
||||
// VLOG_SCOPE_F(2, "finished: %d numItemsPack: %d n_models: %d
|
||||
// featuresUsed: %zu", finished, numItemsPack, n_models,
|
||||
// featuresUsed.size());
|
||||
} // End of the pack
|
||||
if (convergence && !finished) {
|
||||
auto y_val_predict = predict(X_test);
|
||||
double accuracy = (y_val_predict == y_test).sum().item<double>() / (double)y_test.size(0);
|
||||
if (priorAccuracy == 0) {
|
||||
priorAccuracy = accuracy;
|
||||
} else {
|
||||
improvement = accuracy - priorAccuracy;
|
||||
}
|
||||
if (improvement < convergence_threshold) {
|
||||
// VLOG_SCOPE_F(3, " (improvement<threshold) tolerance: %d
|
||||
// numItemsPack: %d improvement: %f prior: %f current: %f", tolerance,
|
||||
// numItemsPack, improvement, priorAccuracy, accuracy);
|
||||
tolerance++;
|
||||
} else {
|
||||
// VLOG_SCOPE_F(3, "* (improvement>=threshold) Reset. tolerance: %d
|
||||
// numItemsPack: %d improvement: %f prior: %f current: %f", tolerance,
|
||||
// numItemsPack, improvement, priorAccuracy, accuracy);
|
||||
tolerance = 0; // Reset the counter if the model performs better
|
||||
numItemsPack = 0;
|
||||
}
|
||||
if (convergence_best) {
|
||||
// Keep the best accuracy until now as the prior accuracy
|
||||
priorAccuracy = std::max(accuracy, priorAccuracy);
|
||||
} else {
|
||||
// Keep the last accuray obtained as the prior accuracy
|
||||
priorAccuracy = accuracy;
|
||||
}
|
||||
}
|
||||
// VLOG_SCOPE_F(1, "tolerance: %d featuresUsed.size: %zu features.size:
|
||||
// %zu", tolerance, featuresUsed.size(), features.size());
|
||||
finished = finished || tolerance > maxTolerance || featuresUsed.size() == features.size();
|
||||
}
|
||||
if (tolerance > maxTolerance) {
|
||||
if (numItemsPack < n_models) {
|
||||
notes.push_back("Convergence threshold reached & " + std::to_string(numItemsPack) + " models eliminated");
|
||||
// VLOG_SCOPE_F(4, "Convergence threshold reached & %d models eliminated
|
||||
// of %d", numItemsPack, n_models);
|
||||
for (int i = featuresUsed.size() - 1; i >= featuresUsed.size() - numItemsPack; --i) {
|
||||
remove_last_model();
|
||||
}
|
||||
// VLOG_SCOPE_F(4, "*Convergence threshold %d models left & %d features
|
||||
// used.", n_models, featuresUsed.size());
|
||||
} else {
|
||||
notes.push_back("Convergence threshold reached & 0 models eliminated");
|
||||
// VLOG_SCOPE_F(4, "Convergence threshold reached & 0 models eliminated
|
||||
// n_models=%d numItemsPack=%d", n_models, numItemsPack);
|
||||
}
|
||||
}
|
||||
if (featuresUsed.size() != features.size()) {
|
||||
notes.push_back("Used features in train: " + std::to_string(featuresUsed.size()) + " of " +
|
||||
std::to_string(features.size()));
|
||||
status = bayesnet::WARNING;
|
||||
}
|
||||
notes.push_back("Number of models: " + std::to_string(n_models));
|
||||
return;
|
||||
}
|
||||
} // namespace bayesnet
|
27
bayesnet/ensembles/XBAODE.h
Normal file
27
bayesnet/ensembles/XBAODE.h
Normal file
@@ -0,0 +1,27 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef XBAODE_H
|
||||
#define XBAODE_H
|
||||
#include <vector>
|
||||
#include <cmath>
|
||||
#include "Boost.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class XBAODE : public Boost {
|
||||
public:
|
||||
XBAODE();
|
||||
std::string getVersion() override { return version; };
|
||||
protected:
|
||||
void trainModel(const torch::Tensor& weights, const bayesnet::Smoothing_t smoothing) override;
|
||||
private:
|
||||
std::vector<int> initializeModels(const Smoothing_t smoothing);
|
||||
std::vector<std::vector<int>> X_train_, X_test_;
|
||||
std::vector<int> y_train_, y_test_;
|
||||
std::string version = "0.9.7";
|
||||
};
|
||||
}
|
||||
#endif // XBAODE_H
|
@@ -4,81 +4,136 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include <limits>
|
||||
#include "bayesnet/utils/bayesnetUtils.h"
|
||||
#include "FeatureSelect.h"
|
||||
namespace bayesnet {
|
||||
FeatureSelect::FeatureSelect(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights) :
|
||||
Metrics(samples, features, className, classNumStates), maxFeatures(maxFeatures == 0 ? samples.size(0) - 1 : maxFeatures), weights(weights)
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
using namespace torch::indexing; // for Ellipsis constant
|
||||
|
||||
//---------------------------------------------------------------------
|
||||
// ctor
|
||||
//---------------------------------------------------------------------
|
||||
FeatureSelect::FeatureSelect(const torch::Tensor& samples,
|
||||
const std::vector<std::string>& features,
|
||||
const std::string& className,
|
||||
int maxFeatures,
|
||||
int classNumStates,
|
||||
const torch::Tensor& weights)
|
||||
: Metrics(samples, features, className, classNumStates),
|
||||
maxFeatures(maxFeatures == 0 ? samples.size(0) - 1 : maxFeatures),
|
||||
weights(weights)
|
||||
{
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------
|
||||
// public helpers
|
||||
//---------------------------------------------------------------------
|
||||
void FeatureSelect::initialize()
|
||||
{
|
||||
selectedFeatures.clear();
|
||||
selectedScores.clear();
|
||||
suLabels.clear();
|
||||
suFeatures.clear();
|
||||
|
||||
fitted = false;
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------
|
||||
// Symmetrical Uncertainty (SU)
|
||||
//---------------------------------------------------------------------
|
||||
double FeatureSelect::symmetricalUncertainty(int a, int b)
|
||||
{
|
||||
/*
|
||||
Compute symmetrical uncertainty. Normalize* information gain (mutual
|
||||
information) with the entropies of the features in order to compensate
|
||||
the bias due to high cardinality features. *Range [0, 1]
|
||||
(https://www.sciencedirect.com/science/article/pii/S0020025519303603)
|
||||
* Compute symmetrical uncertainty. Normalises the information gain
|
||||
* (mutual information) with the entropies of the variables to compensate
|
||||
* the bias due to high‑cardinality features. Range: [0, 1]
|
||||
* See: https://www.sciencedirect.com/science/article/pii/S0020025519303603
|
||||
*/
|
||||
auto x = samples.index({ a, "..." });
|
||||
auto y = samples.index({ b, "..." });
|
||||
auto mu = mutualInformation(x, y, weights);
|
||||
auto hx = entropy(x, weights);
|
||||
auto hy = entropy(y, weights);
|
||||
return 2.0 * mu / (hx + hy);
|
||||
|
||||
auto x = samples.index({ a, Ellipsis }); // row a => feature a
|
||||
auto y = (b >= 0) ? samples.index({ b, Ellipsis }) // row b (>=0) => feature b
|
||||
: samples.index({ -1, Ellipsis }); // ‑1 treated as last row = labels
|
||||
|
||||
double mu = mutualInformation(x, y, weights);
|
||||
double hx = entropy(x, weights);
|
||||
double hy = entropy(y, weights);
|
||||
|
||||
const double denom = hx + hy;
|
||||
if (denom == 0.0) return 0.0; // perfectly pure variables
|
||||
|
||||
return 2.0 * mu / denom;
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------
|
||||
// SU feature–class
|
||||
//---------------------------------------------------------------------
|
||||
void FeatureSelect::computeSuLabels()
|
||||
{
|
||||
// Compute Simmetrical Uncertainty between features and labels
|
||||
// Compute Symmetrical Uncertainty between each feature and the class labels
|
||||
// https://en.wikipedia.org/wiki/Symmetric_uncertainty
|
||||
for (int i = 0; i < features.size(); ++i) {
|
||||
suLabels.push_back(symmetricalUncertainty(i, -1));
|
||||
const int classIdx = static_cast<int>(samples.size(0)) - 1; // labels in last row
|
||||
suLabels.reserve(features.size());
|
||||
for (int i = 0; i < static_cast<int>(features.size()); ++i) {
|
||||
suLabels.emplace_back(symmetricalUncertainty(i, classIdx));
|
||||
}
|
||||
}
|
||||
double FeatureSelect::computeSuFeatures(const int firstFeature, const int secondFeature)
|
||||
|
||||
//---------------------------------------------------------------------
|
||||
// SU feature–feature with cache
|
||||
//---------------------------------------------------------------------
|
||||
double FeatureSelect::computeSuFeatures(int firstFeature, int secondFeature)
|
||||
{
|
||||
// Compute Simmetrical Uncertainty between features
|
||||
// https://en.wikipedia.org/wiki/Symmetric_uncertainty
|
||||
try {
|
||||
return suFeatures.at({ firstFeature, secondFeature });
|
||||
}
|
||||
catch (const std::out_of_range& e) {
|
||||
double result = symmetricalUncertainty(firstFeature, secondFeature);
|
||||
suFeatures[{firstFeature, secondFeature}] = result;
|
||||
// Order the pair to exploit symmetry => only one entry in the map
|
||||
auto ordered = std::minmax(firstFeature, secondFeature);
|
||||
const std::pair<int, int> key{ ordered.first, ordered.second };
|
||||
|
||||
auto it = suFeatures.find(key);
|
||||
if (it != suFeatures.end()) return it->second;
|
||||
|
||||
double result = symmetricalUncertainty(key.first, key.second);
|
||||
suFeatures[key] = result; // store once (symmetry handled by ordering)
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------
|
||||
// Correlation‑based Feature Selection (CFS) merit
|
||||
//---------------------------------------------------------------------
|
||||
double FeatureSelect::computeMeritCFS()
|
||||
{
|
||||
double rcf = 0;
|
||||
for (auto feature : selectedFeatures) {
|
||||
rcf += suLabels[feature];
|
||||
}
|
||||
double rff = 0;
|
||||
int n = selectedFeatures.size();
|
||||
for (const auto& item : doCombinations(selectedFeatures)) {
|
||||
rff += computeSuFeatures(item.first, item.second);
|
||||
}
|
||||
return rcf / sqrt(n + (n * n - n) * rff);
|
||||
const int n = static_cast<int>(selectedFeatures.size());
|
||||
if (n == 0) return 0.0;
|
||||
|
||||
// average r_cf (feature–class)
|
||||
double rcf_sum = 0.0;
|
||||
for (int f : selectedFeatures) rcf_sum += suLabels[f];
|
||||
const double rcf_avg = rcf_sum / n;
|
||||
|
||||
// average r_ff (feature–feature)
|
||||
double rff_sum = 0.0;
|
||||
const auto& pairs = doCombinations(selectedFeatures); // generates each unordered pair once
|
||||
for (const auto& p : pairs) rff_sum += computeSuFeatures(p.first, p.second);
|
||||
|
||||
const double numPairs = n * (n - 1) * 0.5;
|
||||
const double rff_avg = (numPairs > 0) ? rff_sum / numPairs : 0.0;
|
||||
|
||||
// Merit_S = k * r_cf / sqrt( k + k*(k‑1) * r_ff ) (Hall, 1999)
|
||||
const double k = static_cast<double>(n);
|
||||
return (k * rcf_avg) / std::sqrt(k + k * (k - 1) * rff_avg);
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------
|
||||
// getters
|
||||
//---------------------------------------------------------------------
|
||||
std::vector<int> FeatureSelect::getFeatures() const
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::runtime_error("FeatureSelect not fitted");
|
||||
}
|
||||
if (!fitted) throw std::runtime_error("FeatureSelect not fitted");
|
||||
return selectedFeatures;
|
||||
}
|
||||
|
||||
std::vector<double> FeatureSelect::getScores() const
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::runtime_error("FeatureSelect not fitted");
|
||||
}
|
||||
if (!fitted) throw std::runtime_error("FeatureSelect not fitted");
|
||||
return selectedScores;
|
||||
}
|
||||
}
|
@@ -26,10 +26,26 @@ namespace bayesnet {
|
||||
auto first_feature = pop_first(featureOrderCopy);
|
||||
selectedFeatures.push_back(first_feature);
|
||||
selectedScores.push_back(suLabels.at(first_feature));
|
||||
// Second with the score of the candidates
|
||||
selectedFeatures.push_back(pop_first(featureOrderCopy));
|
||||
auto merit = computeMeritCFS();
|
||||
selectedScores.push_back(merit);
|
||||
// Select second feature that maximizes merit with first
|
||||
double maxMerit = 0.0;
|
||||
int secondFeature = -1;
|
||||
for (const auto& candidate : featureOrderCopy) {
|
||||
selectedFeatures.push_back(candidate);
|
||||
double candidateMerit = computeMeritCFS();
|
||||
if (candidateMerit > maxMerit) {
|
||||
maxMerit = candidateMerit;
|
||||
secondFeature = candidate;
|
||||
}
|
||||
selectedFeatures.pop_back();
|
||||
}
|
||||
|
||||
if (secondFeature != -1) {
|
||||
selectedFeatures.push_back(secondFeature);
|
||||
selectedScores.push_back(maxMerit);
|
||||
// Remove from featureOrderCopy
|
||||
featureOrderCopy.erase(std::remove(featureOrderCopy.begin(), featureOrderCopy.end(), secondFeature), featureOrderCopy.end());
|
||||
}
|
||||
double merit = maxMerit;
|
||||
for (const auto feature : featureOrderCopy) {
|
||||
selectedFeatures.push_back(feature);
|
||||
// Compute merit with selectedFeatures
|
||||
|
279
bayesnet/feature_selection/L1FS.cc
Normal file
279
bayesnet/feature_selection/L1FS.cc
Normal file
@@ -0,0 +1,279 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <numeric>
|
||||
#include "bayesnet/utils/bayesnetUtils.h"
|
||||
#include "L1FS.h"
|
||||
|
||||
namespace bayesnet {
|
||||
using namespace torch::indexing;
|
||||
|
||||
L1FS::L1FS(const torch::Tensor& samples,
|
||||
const std::vector<std::string>& features,
|
||||
const std::string& className,
|
||||
const int maxFeatures,
|
||||
const int classNumStates,
|
||||
const torch::Tensor& weights,
|
||||
const double alpha,
|
||||
const int maxIter,
|
||||
const double tolerance,
|
||||
const bool fitIntercept)
|
||||
: FeatureSelect(samples, features, className, maxFeatures, classNumStates, weights),
|
||||
alpha(alpha), maxIter(maxIter), tolerance(tolerance), fitIntercept(fitIntercept)
|
||||
{
|
||||
if (alpha < 0) {
|
||||
throw std::invalid_argument("Alpha (regularization strength) must be non-negative");
|
||||
}
|
||||
if (maxIter < 1) {
|
||||
throw std::invalid_argument("Maximum iterations must be positive");
|
||||
}
|
||||
if (tolerance <= 0) {
|
||||
throw std::invalid_argument("Tolerance must be positive");
|
||||
}
|
||||
|
||||
// Determine if this is a regression or classification task
|
||||
// For simplicity, assume binary classification if classNumStates == 2
|
||||
// and regression otherwise (this can be refined based on your needs)
|
||||
isRegression = (classNumStates > 2 || classNumStates == 0);
|
||||
}
|
||||
|
||||
void L1FS::fit()
|
||||
{
|
||||
initialize();
|
||||
|
||||
// Prepare data
|
||||
int n_samples = samples.size(1);
|
||||
int n_features = features.size();
|
||||
|
||||
// Extract features (all rows except last)
|
||||
auto X = samples.index({ Slice(0, n_features), Slice() }).t().contiguous();
|
||||
|
||||
// Extract labels (last row)
|
||||
auto y = samples.index({ -1, Slice() }).contiguous();
|
||||
|
||||
// Convert to float for numerical operations
|
||||
X = X.to(torch::kFloat32);
|
||||
y = y.to(torch::kFloat32);
|
||||
|
||||
// Normalize features for better convergence
|
||||
auto X_mean = X.mean(0);
|
||||
auto X_std = X.std(0);
|
||||
X_std = torch::where(X_std == 0, torch::ones_like(X_std), X_std);
|
||||
X = (X - X_mean) / X_std;
|
||||
|
||||
if (isRegression) {
|
||||
// Normalize y for regression
|
||||
auto y_mean = y.mean();
|
||||
auto y_std = y.std();
|
||||
if (y_std.item<double>() > 0) {
|
||||
y = (y - y_mean) / y_std;
|
||||
}
|
||||
fitLasso(X, y, weights);
|
||||
} else {
|
||||
// For binary classification
|
||||
fitL1Logistic(X, y, weights);
|
||||
}
|
||||
|
||||
// Select features based on non-zero coefficients
|
||||
std::vector<std::pair<int, double>> featureImportance;
|
||||
for (int i = 0; i < n_features; ++i) {
|
||||
double coef_magnitude = std::abs(coefficients[i]);
|
||||
if (coef_magnitude > 1e-10) { // Threshold for numerical zero
|
||||
featureImportance.push_back({ i, coef_magnitude });
|
||||
}
|
||||
}
|
||||
|
||||
// If all coefficients are zero (high regularization), select based on original feature-class correlation
|
||||
if (featureImportance.empty() && maxFeatures > 0) {
|
||||
// Compute SU with labels as fallback
|
||||
computeSuLabels();
|
||||
auto featureOrder = argsort(suLabels);
|
||||
|
||||
// Select top features by SU score
|
||||
int numToSelect = std::min(static_cast<int>(featureOrder.size()),
|
||||
std::min(maxFeatures, 3)); // At most 3 features as fallback
|
||||
|
||||
for (int i = 0; i < numToSelect; ++i) {
|
||||
selectedFeatures.push_back(featureOrder[i]);
|
||||
selectedScores.push_back(suLabels[featureOrder[i]]);
|
||||
}
|
||||
} else {
|
||||
// Sort by importance (absolute coefficient value)
|
||||
std::sort(featureImportance.begin(), featureImportance.end(),
|
||||
[](const auto& a, const auto& b) { return a.second > b.second; });
|
||||
|
||||
// Select top features up to maxFeatures
|
||||
int numToSelect = std::min(static_cast<int>(featureImportance.size()),
|
||||
maxFeatures);
|
||||
|
||||
for (int i = 0; i < numToSelect; ++i) {
|
||||
selectedFeatures.push_back(featureImportance[i].first);
|
||||
selectedScores.push_back(featureImportance[i].second);
|
||||
}
|
||||
}
|
||||
|
||||
fitted = true;
|
||||
}
|
||||
|
||||
void L1FS::fitLasso(const torch::Tensor& X, const torch::Tensor& y,
|
||||
const torch::Tensor& sampleWeights)
|
||||
{
|
||||
int n_samples = X.size(0);
|
||||
int n_features = X.size(1);
|
||||
|
||||
// Initialize coefficients
|
||||
coefficients.resize(n_features, 0.0);
|
||||
double intercept = 0.0;
|
||||
|
||||
// Ensure consistent types
|
||||
torch::Tensor weights = sampleWeights.to(torch::kFloat32);
|
||||
|
||||
// Coordinate descent for Lasso
|
||||
torch::Tensor residuals = y.clone();
|
||||
if (fitIntercept) {
|
||||
intercept = (y * weights).sum().item<float>() / weights.sum().item<float>();
|
||||
residuals = y - intercept;
|
||||
}
|
||||
|
||||
// Precompute feature norms
|
||||
std::vector<double> featureNorms(n_features);
|
||||
for (int j = 0; j < n_features; ++j) {
|
||||
auto Xj = X.index({ Slice(), j });
|
||||
featureNorms[j] = (Xj * Xj * weights).sum().item<float>();
|
||||
}
|
||||
|
||||
// Coordinate descent iterations
|
||||
for (int iter = 0; iter < maxIter; ++iter) {
|
||||
double maxChange = 0.0;
|
||||
|
||||
// Update each coordinate
|
||||
for (int j = 0; j < n_features; ++j) {
|
||||
auto Xj = X.index({ Slice(), j });
|
||||
|
||||
// Compute partial residuals (excluding feature j)
|
||||
torch::Tensor partialResiduals = residuals + coefficients[j] * Xj;
|
||||
|
||||
// Compute rho (correlation with residuals)
|
||||
double rho = (Xj * partialResiduals * weights).sum().item<float>();
|
||||
|
||||
// Soft thresholding
|
||||
double oldCoef = coefficients[j];
|
||||
coefficients[j] = softThreshold(rho, alpha) / featureNorms[j];
|
||||
|
||||
// Update residuals
|
||||
residuals = partialResiduals - coefficients[j] * Xj;
|
||||
|
||||
maxChange = std::max(maxChange, std::abs(coefficients[j] - oldCoef));
|
||||
}
|
||||
|
||||
// Update intercept if needed
|
||||
if (fitIntercept) {
|
||||
double oldIntercept = intercept;
|
||||
intercept = (residuals * weights).sum().item<float>() /
|
||||
weights.sum().item<float>();
|
||||
residuals = residuals - (intercept - oldIntercept);
|
||||
maxChange = std::max(maxChange, std::abs(intercept - oldIntercept));
|
||||
}
|
||||
|
||||
// Check convergence
|
||||
if (maxChange < tolerance) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void L1FS::fitL1Logistic(const torch::Tensor& X, const torch::Tensor& y,
|
||||
const torch::Tensor& sampleWeights)
|
||||
{
|
||||
int n_samples = X.size(0);
|
||||
int n_features = X.size(1);
|
||||
|
||||
// Initialize coefficients
|
||||
torch::Tensor coef = torch::zeros({ n_features }, torch::kFloat32);
|
||||
double intercept = 0.0;
|
||||
|
||||
// Ensure consistent types
|
||||
torch::Tensor weights = sampleWeights.to(torch::kFloat32);
|
||||
|
||||
// Learning rate (can be adaptive)
|
||||
double learningRate = 0.01;
|
||||
|
||||
// Proximal gradient descent
|
||||
for (int iter = 0; iter < maxIter; ++iter) {
|
||||
// Compute predictions
|
||||
torch::Tensor linearPred = X.matmul(coef);
|
||||
if (fitIntercept) {
|
||||
linearPred = linearPred + intercept;
|
||||
}
|
||||
torch::Tensor pred = sigmoid(linearPred);
|
||||
|
||||
// Compute gradient
|
||||
torch::Tensor diff = pred - y;
|
||||
torch::Tensor grad = X.t().matmul(diff * weights) / n_samples;
|
||||
|
||||
// Gradient descent step
|
||||
torch::Tensor coef_new = coef - learningRate * grad;
|
||||
|
||||
// Proximal step (soft thresholding)
|
||||
for (int j = 0; j < n_features; ++j) {
|
||||
coef_new[j] = softThreshold(coef_new[j].item<float>(),
|
||||
learningRate * alpha);
|
||||
}
|
||||
|
||||
// Update intercept if needed
|
||||
if (fitIntercept) {
|
||||
double grad_intercept = (diff * weights).sum().item<float>() / n_samples;
|
||||
intercept -= learningRate * grad_intercept;
|
||||
}
|
||||
|
||||
// Check convergence
|
||||
double change = (coef_new - coef).abs().max().item<float>();
|
||||
coef = coef_new;
|
||||
|
||||
if (change < tolerance) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Adaptive learning rate (optional)
|
||||
if (iter % 100 == 0) {
|
||||
learningRate *= 0.9;
|
||||
}
|
||||
}
|
||||
|
||||
// Store final coefficients
|
||||
coefficients.resize(n_features);
|
||||
for (int j = 0; j < n_features; ++j) {
|
||||
coefficients[j] = coef[j].item<float>();
|
||||
}
|
||||
}
|
||||
|
||||
double L1FS::softThreshold(double x, double lambda) const
|
||||
{
|
||||
if (x > lambda) {
|
||||
return x - lambda;
|
||||
} else if (x < -lambda) {
|
||||
return x + lambda;
|
||||
} else {
|
||||
return 0.0;
|
||||
}
|
||||
}
|
||||
|
||||
torch::Tensor L1FS::sigmoid(const torch::Tensor& z) const
|
||||
{
|
||||
return 1.0 / (1.0 + torch::exp(-z));
|
||||
}
|
||||
|
||||
std::vector<double> L1FS::getCoefficients() const
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::runtime_error("L1FS not fitted");
|
||||
}
|
||||
return coefficients;
|
||||
}
|
||||
|
||||
} // namespace bayesnet
|
83
bayesnet/feature_selection/L1FS.h
Normal file
83
bayesnet/feature_selection/L1FS.h
Normal file
@@ -0,0 +1,83 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef L1FS_H
|
||||
#define L1FS_H
|
||||
#include <torch/torch.h>
|
||||
#include <vector>
|
||||
#include "bayesnet/feature_selection/FeatureSelect.h"
|
||||
|
||||
namespace bayesnet {
|
||||
/**
|
||||
* L1-Regularized Feature Selection (L1FS)
|
||||
*
|
||||
* This class implements feature selection using L1-regularized linear models.
|
||||
* For classification tasks, it uses one-vs-rest logistic regression with L1 penalty.
|
||||
* For regression tasks, it uses Lasso regression.
|
||||
*
|
||||
* The L1 penalty induces sparsity in the model coefficients, effectively
|
||||
* performing feature selection by setting irrelevant feature weights to zero.
|
||||
*/
|
||||
class L1FS : public FeatureSelect {
|
||||
public:
|
||||
/**
|
||||
* Constructor for L1FS
|
||||
* @param samples n+1xm tensor where samples[-1] is the target variable
|
||||
* @param features vector of feature names
|
||||
* @param className name of the class/target variable
|
||||
* @param maxFeatures maximum number of features to select (0 = all)
|
||||
* @param classNumStates number of states for classification (ignored for regression)
|
||||
* @param weights sample weights
|
||||
* @param alpha L1 regularization strength (higher = more sparsity)
|
||||
* @param maxIter maximum iterations for optimization
|
||||
* @param tolerance convergence tolerance
|
||||
* @param fitIntercept whether to fit an intercept term
|
||||
*/
|
||||
L1FS(const torch::Tensor& samples,
|
||||
const std::vector<std::string>& features,
|
||||
const std::string& className,
|
||||
const int maxFeatures,
|
||||
const int classNumStates,
|
||||
const torch::Tensor& weights,
|
||||
const double alpha = 1.0,
|
||||
const int maxIter = 1000,
|
||||
const double tolerance = 1e-4,
|
||||
const bool fitIntercept = true);
|
||||
|
||||
virtual ~L1FS() {};
|
||||
|
||||
void fit() override;
|
||||
|
||||
// Get the learned coefficients for each feature
|
||||
std::vector<double> getCoefficients() const;
|
||||
|
||||
private:
|
||||
double alpha; // L1 regularization strength
|
||||
int maxIter; // Maximum iterations for optimization
|
||||
double tolerance; // Convergence tolerance
|
||||
bool fitIntercept; // Whether to fit intercept
|
||||
bool isRegression; // Task type (regression vs classification)
|
||||
|
||||
std::vector<double> coefficients; // Learned coefficients
|
||||
|
||||
// Coordinate descent for Lasso regression
|
||||
void fitLasso(const torch::Tensor& X, const torch::Tensor& y, const torch::Tensor& sampleWeights);
|
||||
|
||||
// Proximal gradient descent for L1-regularized logistic regression
|
||||
void fitL1Logistic(const torch::Tensor& X, const torch::Tensor& y, const torch::Tensor& sampleWeights);
|
||||
|
||||
// Soft thresholding operator for L1 regularization
|
||||
double softThreshold(double x, double lambda) const;
|
||||
|
||||
// Logistic function
|
||||
torch::Tensor sigmoid(const torch::Tensor& z) const;
|
||||
|
||||
// Compute logistic loss
|
||||
double logisticLoss(const torch::Tensor& X, const torch::Tensor& y,
|
||||
const torch::Tensor& coef, const torch::Tensor& sampleWeights) const;
|
||||
};
|
||||
}
|
||||
#endif
|
@@ -209,7 +209,7 @@ namespace bayesnet {
|
||||
pthread_setname_np(threadName.c_str());
|
||||
#endif
|
||||
double numStates = static_cast<double>(node.second->getNumStates());
|
||||
double smoothing_factor = 0.0;
|
||||
double smoothing_factor;
|
||||
switch (smoothing) {
|
||||
case Smoothing_t::ORIGINAL:
|
||||
smoothing_factor = 1.0 / n_samples;
|
||||
@@ -221,7 +221,7 @@ namespace bayesnet {
|
||||
smoothing_factor = 1 / numStates;
|
||||
break;
|
||||
default:
|
||||
throw std::invalid_argument("Smoothing method not recognized " + std::to_string(static_cast<int>(smoothing)));
|
||||
smoothing_factor = 0.0; // No smoothing
|
||||
}
|
||||
node.second->computeCPT(samples, features, smoothing_factor, weights);
|
||||
semaphore.release();
|
||||
@@ -234,16 +234,6 @@ namespace bayesnet {
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
// std::fstream file;
|
||||
// file.open("cpt.txt", std::fstream::out | std::fstream::app);
|
||||
// file << std::string(80, '*') << std::endl;
|
||||
// for (const auto& item : graph("Test")) {
|
||||
// file << item << std::endl;
|
||||
// }
|
||||
// file << std::string(80, '-') << std::endl;
|
||||
// file << dump_cpt() << std::endl;
|
||||
// file << std::string(80, '=') << std::endl;
|
||||
// file.close();
|
||||
fitted = true;
|
||||
}
|
||||
torch::Tensor Network::predict_tensor(const torch::Tensor& samples, const bool proba)
|
||||
|
@@ -10,14 +10,10 @@
|
||||
#include <vector>
|
||||
#include "bayesnet/config.h"
|
||||
#include "Node.h"
|
||||
#include "Smoothing.h"
|
||||
|
||||
namespace bayesnet {
|
||||
enum class Smoothing_t {
|
||||
NONE = -1,
|
||||
ORIGINAL = 0,
|
||||
LAPLACE,
|
||||
CESTNIK
|
||||
};
|
||||
|
||||
class Network {
|
||||
public:
|
||||
Network();
|
||||
|
@@ -5,6 +5,7 @@
|
||||
// ***************************************************************
|
||||
|
||||
#include "Node.h"
|
||||
#include <iterator>
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
@@ -93,36 +94,54 @@ namespace bayesnet {
|
||||
void Node::computeCPT(const torch::Tensor& dataset, const std::vector<std::string>& features, const double smoothing, const torch::Tensor& weights)
|
||||
{
|
||||
dimensions.clear();
|
||||
// Get dimensions of the CPT
|
||||
dimensions.reserve(parents.size() + 1);
|
||||
dimensions.push_back(numStates);
|
||||
transform(parents.begin(), parents.end(), back_inserter(dimensions), [](const auto& parent) { return parent->getNumStates(); });
|
||||
// Create a tensor of zeros with the dimensions of the CPT
|
||||
cpTable = torch::zeros(dimensions, torch::kDouble) + smoothing;
|
||||
// Fill table with counts
|
||||
auto pos = find(features.begin(), features.end(), name);
|
||||
if (pos == features.end()) {
|
||||
throw std::logic_error("Feature " + name + " not found in dataset");
|
||||
for (const auto& parent : parents) {
|
||||
dimensions.push_back(parent->getNumStates());
|
||||
}
|
||||
int name_index = pos - features.begin();
|
||||
c10::List<c10::optional<at::Tensor>> coordinates;
|
||||
for (int n_sample = 0; n_sample < dataset.size(1); ++n_sample) {
|
||||
coordinates.clear();
|
||||
auto sample = dataset.index({ "...", n_sample });
|
||||
coordinates.push_back(sample[name_index]);
|
||||
for (auto parent : parents) {
|
||||
pos = find(features.begin(), features.end(), parent->getName());
|
||||
if (pos == features.end()) {
|
||||
throw std::logic_error("Feature parent " + parent->getName() + " not found in dataset");
|
||||
cpTable = torch::full(dimensions, smoothing, torch::kDouble);
|
||||
|
||||
// Build feature index map
|
||||
std::unordered_map<std::string, int> featureIndexMap;
|
||||
for (size_t i = 0; i < features.size(); ++i) {
|
||||
featureIndexMap[features[i]] = i;
|
||||
}
|
||||
int parent_index = pos - features.begin();
|
||||
coordinates.push_back(sample[parent_index]);
|
||||
|
||||
// Gather indices for node and parents
|
||||
std::vector<int64_t> all_indices;
|
||||
all_indices.push_back(featureIndexMap[name]);
|
||||
for (const auto& parent : parents) {
|
||||
all_indices.push_back(featureIndexMap[parent->getName()]);
|
||||
}
|
||||
// Increment the count of the corresponding coordinate
|
||||
cpTable.index_put_({ coordinates }, weights.index({ n_sample }), true);
|
||||
|
||||
// Extract relevant columns: shape (num_features, num_samples)
|
||||
auto indices_tensor = dataset.index_select(0, torch::tensor(all_indices, torch::kLong));
|
||||
indices_tensor = indices_tensor.transpose(0, 1).to(torch::kLong); // (num_samples, num_features)
|
||||
|
||||
// Manual flattening of indices
|
||||
std::vector<int64_t> strides(all_indices.size(), 1);
|
||||
for (int i = strides.size() - 2; i >= 0; --i) {
|
||||
strides[i] = strides[i + 1] * cpTable.size(i + 1);
|
||||
}
|
||||
// Normalize the counts
|
||||
// Divide each row by the sum of the row
|
||||
cpTable = cpTable / cpTable.sum(0);
|
||||
auto indices_tensor_cpu = indices_tensor.cpu();
|
||||
auto indices_accessor = indices_tensor_cpu.accessor<int64_t, 2>();
|
||||
std::vector<int64_t> flat_indices(indices_tensor.size(0));
|
||||
for (int64_t i = 0; i < indices_tensor.size(0); ++i) {
|
||||
int64_t idx = 0;
|
||||
for (size_t j = 0; j < strides.size(); ++j) {
|
||||
idx += indices_accessor[i][j] * strides[j];
|
||||
}
|
||||
flat_indices[i] = idx;
|
||||
}
|
||||
|
||||
// Accumulate weights into flat CPT
|
||||
auto flat_cpt = cpTable.flatten();
|
||||
auto flat_indices_tensor = torch::from_blob(flat_indices.data(), { (int64_t)flat_indices.size() }, torch::kLong).clone();
|
||||
flat_cpt.index_add_(0, flat_indices_tensor, weights.cpu());
|
||||
cpTable = flat_cpt.view(cpTable.sizes());
|
||||
|
||||
// Normalize the counts (dividing each row by the sum of the row)
|
||||
cpTable /= cpTable.sum(0, true);
|
||||
}
|
||||
double Node::getFactorValue(std::map<std::string, int>& evidence)
|
||||
{
|
||||
|
17
bayesnet/network/Smoothing.h
Normal file
17
bayesnet/network/Smoothing.h
Normal file
@@ -0,0 +1,17 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef SMOOTHING_H
|
||||
#define SMOOTHING_H
|
||||
namespace bayesnet {
|
||||
enum class Smoothing_t {
|
||||
NONE = -1,
|
||||
ORIGINAL = 0,
|
||||
LAPLACE,
|
||||
CESTNIK
|
||||
};
|
||||
}
|
||||
#endif // SMOOTHING_H
|
@@ -4,9 +4,6 @@
|
||||
#include <condition_variable>
|
||||
#include <algorithm>
|
||||
#include <thread>
|
||||
#include <mutex>
|
||||
#include <condition_variable>
|
||||
#include <thread>
|
||||
|
||||
class CountingSemaphore {
|
||||
public:
|
||||
@@ -32,6 +29,14 @@ public:
|
||||
cv_.notify_one();
|
||||
}
|
||||
}
|
||||
uint getCount() const
|
||||
{
|
||||
return count_;
|
||||
}
|
||||
uint getMaxCount() const
|
||||
{
|
||||
return max_count_;
|
||||
}
|
||||
private:
|
||||
CountingSemaphore()
|
||||
: max_count_(std::max(1u, static_cast<uint>(0.95 * std::thread::hardware_concurrency()))),
|
||||
|
@@ -53,14 +53,14 @@ namespace bayesnet {
|
||||
}
|
||||
}
|
||||
|
||||
void insertElement(std::list<int>& variables, int variable)
|
||||
void MST::insertElement(std::list<int>& variables, int variable)
|
||||
{
|
||||
if (std::find(variables.begin(), variables.end(), variable) == variables.end()) {
|
||||
variables.push_front(variable);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::pair<int, int>> reorder(std::vector<std::pair<float, std::pair<int, int>>> T, int root_original)
|
||||
std::vector<std::pair<int, int>> MST::reorder(std::vector<std::pair<float, std::pair<int, int>>> T, int root_original)
|
||||
{
|
||||
// Create the edges of a DAG from the MST
|
||||
// replacing unordered_set with list because unordered_set cannot guarantee the order of the elements inserted
|
||||
|
@@ -14,6 +14,8 @@ namespace bayesnet {
|
||||
public:
|
||||
MST() = default;
|
||||
MST(const std::vector<std::string>& features, const torch::Tensor& weights, const int root);
|
||||
void insertElement(std::list<int>& variables, int variable);
|
||||
std::vector<std::pair<int, int>> reorder(std::vector<std::pair<float, std::pair<int, int>>> T, int root_original);
|
||||
std::vector<std::pair<int, int>> maximumSpanningTree();
|
||||
private:
|
||||
torch::Tensor weights;
|
||||
|
51
bayesnet/utils/TensorUtils.h
Normal file
51
bayesnet/utils/TensorUtils.h
Normal file
@@ -0,0 +1,51 @@
|
||||
#ifndef TENSORUTILS_H
|
||||
#define TENSORUTILS_H
|
||||
#include <torch/torch.h>
|
||||
#include <vector>
|
||||
namespace bayesnet {
|
||||
class TensorUtils {
|
||||
public:
|
||||
static std::vector<std::vector<int>> to_matrix(const torch::Tensor& X)
|
||||
{
|
||||
// Ensure tensor is contiguous in memory
|
||||
auto X_contig = X.contiguous();
|
||||
|
||||
// Access tensor data pointer directly
|
||||
auto data_ptr = X_contig.data_ptr<int>();
|
||||
|
||||
// IF you are using int64_t as the data type, use the following line
|
||||
//auto data_ptr = X_contig.data_ptr<int64_t>();
|
||||
//std::vector<std::vector<int64_t>> data(X.size(0), std::vector<int64_t>(X.size(1)));
|
||||
|
||||
// Prepare output container
|
||||
std::vector<std::vector<int>> data(X.size(0), std::vector<int>(X.size(1)));
|
||||
|
||||
// Fill the 2D vector in a single loop using pointer arithmetic
|
||||
int rows = X.size(0);
|
||||
int cols = X.size(1);
|
||||
for (int i = 0; i < rows; ++i) {
|
||||
std::copy(data_ptr + i * cols, data_ptr + (i + 1) * cols, data[i].begin());
|
||||
}
|
||||
return data;
|
||||
}
|
||||
template <typename T>
|
||||
static std::vector<T> to_vector(const torch::Tensor& y)
|
||||
{
|
||||
// Ensure the tensor is contiguous in memory
|
||||
auto y_contig = y.contiguous();
|
||||
|
||||
// Access data pointer
|
||||
auto data_ptr = y_contig.data_ptr<T>();
|
||||
|
||||
// Prepare output container
|
||||
std::vector<T> data(y.size(0));
|
||||
|
||||
// Copy data efficiently
|
||||
std::copy(data_ptr, data_ptr + y.size(0), data.begin());
|
||||
|
||||
return data;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#endif // TENSORUTILS_H
|
4
bayesnetConfig.cmake.in
Normal file
4
bayesnetConfig.cmake.in
Normal file
@@ -0,0 +1,4 @@
|
||||
@PACKAGE_INIT@
|
||||
|
||||
include("${CMAKE_CURRENT_LIST_DIR}/bayesnetTargets.cmake")
|
||||
|
@@ -1,12 +0,0 @@
|
||||
|
||||
function(add_git_submodule dir)
|
||||
find_package(Git REQUIRED)
|
||||
|
||||
if(NOT EXISTS ${dir}/CMakeLists.txt)
|
||||
message(STATUS "🚨 Adding git submodule => ${dir}")
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE}
|
||||
submodule update --init --recursive -- ${dir}
|
||||
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR})
|
||||
endif()
|
||||
add_subdirectory(${dir})
|
||||
endfunction(add_git_submodule)
|
@@ -1,742 +0,0 @@
|
||||
# Copyright (c) 2012 - 2017, Lars Bilke
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its contributors
|
||||
# may be used to endorse or promote products derived from this software without
|
||||
# specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# CHANGES:
|
||||
#
|
||||
# 2012-01-31, Lars Bilke
|
||||
# - Enable Code Coverage
|
||||
#
|
||||
# 2013-09-17, Joakim Söderberg
|
||||
# - Added support for Clang.
|
||||
# - Some additional usage instructions.
|
||||
#
|
||||
# 2016-02-03, Lars Bilke
|
||||
# - Refactored functions to use named parameters
|
||||
#
|
||||
# 2017-06-02, Lars Bilke
|
||||
# - Merged with modified version from github.com/ufz/ogs
|
||||
#
|
||||
# 2019-05-06, Anatolii Kurotych
|
||||
# - Remove unnecessary --coverage flag
|
||||
#
|
||||
# 2019-12-13, FeRD (Frank Dana)
|
||||
# - Deprecate COVERAGE_LCOVR_EXCLUDES and COVERAGE_GCOVR_EXCLUDES lists in favor
|
||||
# of tool-agnostic COVERAGE_EXCLUDES variable, or EXCLUDE setup arguments.
|
||||
# - CMake 3.4+: All excludes can be specified relative to BASE_DIRECTORY
|
||||
# - All setup functions: accept BASE_DIRECTORY, EXCLUDE list
|
||||
# - Set lcov basedir with -b argument
|
||||
# - Add automatic --demangle-cpp in lcovr, if 'c++filt' is available (can be
|
||||
# overridden with NO_DEMANGLE option in setup_target_for_coverage_lcovr().)
|
||||
# - Delete output dir, .info file on 'make clean'
|
||||
# - Remove Python detection, since version mismatches will break gcovr
|
||||
# - Minor cleanup (lowercase function names, update examples...)
|
||||
#
|
||||
# 2019-12-19, FeRD (Frank Dana)
|
||||
# - Rename Lcov outputs, make filtered file canonical, fix cleanup for targets
|
||||
#
|
||||
# 2020-01-19, Bob Apthorpe
|
||||
# - Added gfortran support
|
||||
#
|
||||
# 2020-02-17, FeRD (Frank Dana)
|
||||
# - Make all add_custom_target()s VERBATIM to auto-escape wildcard characters
|
||||
# in EXCLUDEs, and remove manual escaping from gcovr targets
|
||||
#
|
||||
# 2021-01-19, Robin Mueller
|
||||
# - Add CODE_COVERAGE_VERBOSE option which will allow to print out commands which are run
|
||||
# - Added the option for users to set the GCOVR_ADDITIONAL_ARGS variable to supply additional
|
||||
# flags to the gcovr command
|
||||
#
|
||||
# 2020-05-04, Mihchael Davis
|
||||
# - Add -fprofile-abs-path to make gcno files contain absolute paths
|
||||
# - Fix BASE_DIRECTORY not working when defined
|
||||
# - Change BYPRODUCT from folder to index.html to stop ninja from complaining about double defines
|
||||
#
|
||||
# 2021-05-10, Martin Stump
|
||||
# - Check if the generator is multi-config before warning about non-Debug builds
|
||||
#
|
||||
# 2022-02-22, Marko Wehle
|
||||
# - Change gcovr output from -o <filename> for --xml <filename> and --html <filename> output respectively.
|
||||
# This will allow for Multiple Output Formats at the same time by making use of GCOVR_ADDITIONAL_ARGS, e.g. GCOVR_ADDITIONAL_ARGS "--txt".
|
||||
#
|
||||
# 2022-09-28, Sebastian Mueller
|
||||
# - fix append_coverage_compiler_flags_to_target to correctly add flags
|
||||
# - replace "-fprofile-arcs -ftest-coverage" with "--coverage" (equivalent)
|
||||
#
|
||||
# USAGE:
|
||||
#
|
||||
# 1. Copy this file into your cmake modules path.
|
||||
#
|
||||
# 2. Add the following line to your CMakeLists.txt (best inside an if-condition
|
||||
# using a CMake option() to enable it just optionally):
|
||||
# include(CodeCoverage)
|
||||
#
|
||||
# 3. Append necessary compiler flags for all supported source files:
|
||||
# append_coverage_compiler_flags()
|
||||
# Or for specific target:
|
||||
# append_coverage_compiler_flags_to_target(YOUR_TARGET_NAME)
|
||||
#
|
||||
# 3.a (OPTIONAL) Set appropriate optimization flags, e.g. -O0, -O1 or -Og
|
||||
#
|
||||
# 4. If you need to exclude additional directories from the report, specify them
|
||||
# using full paths in the COVERAGE_EXCLUDES variable before calling
|
||||
# setup_target_for_coverage_*().
|
||||
# Example:
|
||||
# set(COVERAGE_EXCLUDES
|
||||
# '${PROJECT_SOURCE_DIR}/src/dir1/*'
|
||||
# '/path/to/my/src/dir2/*')
|
||||
# Or, use the EXCLUDE argument to setup_target_for_coverage_*().
|
||||
# Example:
|
||||
# setup_target_for_coverage_lcov(
|
||||
# NAME coverage
|
||||
# EXECUTABLE testrunner
|
||||
# EXCLUDE "${PROJECT_SOURCE_DIR}/src/dir1/*" "/path/to/my/src/dir2/*")
|
||||
#
|
||||
# 4.a NOTE: With CMake 3.4+, COVERAGE_EXCLUDES or EXCLUDE can also be set
|
||||
# relative to the BASE_DIRECTORY (default: PROJECT_SOURCE_DIR)
|
||||
# Example:
|
||||
# set(COVERAGE_EXCLUDES "dir1/*")
|
||||
# setup_target_for_coverage_gcovr_html(
|
||||
# NAME coverage
|
||||
# EXECUTABLE testrunner
|
||||
# BASE_DIRECTORY "${PROJECT_SOURCE_DIR}/src"
|
||||
# EXCLUDE "dir2/*")
|
||||
#
|
||||
# 5. Use the functions described below to create a custom make target which
|
||||
# runs your test executable and produces a code coverage report.
|
||||
#
|
||||
# 6. Build a Debug build:
|
||||
# cmake -DCMAKE_BUILD_TYPE=Debug ..
|
||||
# make
|
||||
# make my_coverage_target
|
||||
#
|
||||
|
||||
include(CMakeParseArguments)
|
||||
|
||||
option(CODE_COVERAGE_VERBOSE "Verbose information" FALSE)
|
||||
|
||||
# Check prereqs
|
||||
find_program( GCOV_PATH gcov )
|
||||
find_program( LCOV_PATH NAMES lcov lcov.bat lcov.exe lcov.perl)
|
||||
find_program( FASTCOV_PATH NAMES fastcov fastcov.py )
|
||||
find_program( GENHTML_PATH NAMES genhtml genhtml.perl genhtml.bat )
|
||||
find_program( GCOVR_PATH gcovr PATHS ${CMAKE_SOURCE_DIR}/scripts/test)
|
||||
find_program( CPPFILT_PATH NAMES c++filt )
|
||||
|
||||
if(NOT GCOV_PATH)
|
||||
message(FATAL_ERROR "gcov not found! Aborting...")
|
||||
endif() # NOT GCOV_PATH
|
||||
|
||||
# Check supported compiler (Clang, GNU and Flang)
|
||||
get_property(LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES)
|
||||
foreach(LANG ${LANGUAGES})
|
||||
if("${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
|
||||
if("${CMAKE_${LANG}_COMPILER_VERSION}" VERSION_LESS 3)
|
||||
message(FATAL_ERROR "Clang version must be 3.0.0 or greater! Aborting...")
|
||||
endif()
|
||||
elseif(NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "GNU"
|
||||
AND NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(LLVM)?[Ff]lang")
|
||||
message(FATAL_ERROR "Compiler is not GNU or Flang! Aborting...")
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
set(COVERAGE_COMPILER_FLAGS "-g --coverage"
|
||||
CACHE INTERNAL "")
|
||||
if(CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Clang)")
|
||||
include(CheckCXXCompilerFlag)
|
||||
check_cxx_compiler_flag(-fprofile-abs-path HAVE_fprofile_abs_path)
|
||||
if(HAVE_fprofile_abs_path)
|
||||
set(COVERAGE_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(CMAKE_Fortran_FLAGS_COVERAGE
|
||||
${COVERAGE_COMPILER_FLAGS}
|
||||
CACHE STRING "Flags used by the Fortran compiler during coverage builds."
|
||||
FORCE )
|
||||
set(CMAKE_CXX_FLAGS_COVERAGE
|
||||
${COVERAGE_COMPILER_FLAGS}
|
||||
CACHE STRING "Flags used by the C++ compiler during coverage builds."
|
||||
FORCE )
|
||||
set(CMAKE_C_FLAGS_COVERAGE
|
||||
${COVERAGE_COMPILER_FLAGS}
|
||||
CACHE STRING "Flags used by the C compiler during coverage builds."
|
||||
FORCE )
|
||||
set(CMAKE_EXE_LINKER_FLAGS_COVERAGE
|
||||
""
|
||||
CACHE STRING "Flags used for linking binaries during coverage builds."
|
||||
FORCE )
|
||||
set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE
|
||||
""
|
||||
CACHE STRING "Flags used by the shared libraries linker during coverage builds."
|
||||
FORCE )
|
||||
mark_as_advanced(
|
||||
CMAKE_Fortran_FLAGS_COVERAGE
|
||||
CMAKE_CXX_FLAGS_COVERAGE
|
||||
CMAKE_C_FLAGS_COVERAGE
|
||||
CMAKE_EXE_LINKER_FLAGS_COVERAGE
|
||||
CMAKE_SHARED_LINKER_FLAGS_COVERAGE )
|
||||
|
||||
get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
||||
if(NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG))
|
||||
message(WARNING "Code coverage results with an optimised (non-Debug) build may be misleading")
|
||||
endif() # NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG)
|
||||
|
||||
if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
|
||||
link_libraries(gcov)
|
||||
endif()
|
||||
|
||||
# Defines a target for running and collection code coverage information
|
||||
# Builds dependencies, runs the given executable and outputs reports.
|
||||
# NOTE! The executable should always have a ZERO as exit code otherwise
|
||||
# the coverage generation will not complete.
|
||||
#
|
||||
# setup_target_for_coverage_lcov(
|
||||
# NAME testrunner_coverage # New target name
|
||||
# EXECUTABLE testrunner -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR
|
||||
# DEPENDENCIES testrunner # Dependencies to build first
|
||||
# BASE_DIRECTORY "../" # Base directory for report
|
||||
# # (defaults to PROJECT_SOURCE_DIR)
|
||||
# EXCLUDE "src/dir1/*" "src/dir2/*" # Patterns to exclude (can be relative
|
||||
# # to BASE_DIRECTORY, with CMake 3.4+)
|
||||
# NO_DEMANGLE # Don't demangle C++ symbols
|
||||
# # even if c++filt is found
|
||||
# )
|
||||
function(setup_target_for_coverage_lcov)
|
||||
|
||||
set(options NO_DEMANGLE SONARQUBE)
|
||||
set(oneValueArgs BASE_DIRECTORY NAME)
|
||||
set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES LCOV_ARGS GENHTML_ARGS)
|
||||
cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||
|
||||
if(NOT LCOV_PATH)
|
||||
message(FATAL_ERROR "lcov not found! Aborting...")
|
||||
endif() # NOT LCOV_PATH
|
||||
|
||||
if(NOT GENHTML_PATH)
|
||||
message(FATAL_ERROR "genhtml not found! Aborting...")
|
||||
endif() # NOT GENHTML_PATH
|
||||
|
||||
# Set base directory (as absolute path), or default to PROJECT_SOURCE_DIR
|
||||
if(DEFINED Coverage_BASE_DIRECTORY)
|
||||
get_filename_component(BASEDIR ${Coverage_BASE_DIRECTORY} ABSOLUTE)
|
||||
else()
|
||||
set(BASEDIR ${PROJECT_SOURCE_DIR})
|
||||
endif()
|
||||
|
||||
# Collect excludes (CMake 3.4+: Also compute absolute paths)
|
||||
set(LCOV_EXCLUDES "")
|
||||
foreach(EXCLUDE ${Coverage_EXCLUDE} ${COVERAGE_EXCLUDES} ${COVERAGE_LCOV_EXCLUDES})
|
||||
if(CMAKE_VERSION VERSION_GREATER 3.4)
|
||||
get_filename_component(EXCLUDE ${EXCLUDE} ABSOLUTE BASE_DIR ${BASEDIR})
|
||||
endif()
|
||||
list(APPEND LCOV_EXCLUDES "${EXCLUDE}")
|
||||
endforeach()
|
||||
list(REMOVE_DUPLICATES LCOV_EXCLUDES)
|
||||
|
||||
# Conditional arguments
|
||||
if(CPPFILT_PATH AND NOT ${Coverage_NO_DEMANGLE})
|
||||
set(GENHTML_EXTRA_ARGS "--demangle-cpp")
|
||||
endif()
|
||||
|
||||
# Setting up commands which will be run to generate coverage data.
|
||||
# Cleanup lcov
|
||||
set(LCOV_CLEAN_CMD
|
||||
${LCOV_PATH} ${Coverage_LCOV_ARGS} --gcov-tool ${GCOV_PATH} -directory .
|
||||
-b ${BASEDIR} --zerocounters
|
||||
)
|
||||
# Create baseline to make sure untouched files show up in the report
|
||||
set(LCOV_BASELINE_CMD
|
||||
${LCOV_PATH} ${Coverage_LCOV_ARGS} --gcov-tool ${GCOV_PATH} -c -i -d . -b
|
||||
${BASEDIR} -o ${Coverage_NAME}.base
|
||||
)
|
||||
# Run tests
|
||||
set(LCOV_EXEC_TESTS_CMD
|
||||
${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS}
|
||||
)
|
||||
# Capturing lcov counters and generating report
|
||||
set(LCOV_CAPTURE_CMD
|
||||
${LCOV_PATH} ${Coverage_LCOV_ARGS} --gcov-tool ${GCOV_PATH} --directory . -b
|
||||
${BASEDIR} --capture --output-file ${Coverage_NAME}.capture
|
||||
)
|
||||
# add baseline counters
|
||||
set(LCOV_BASELINE_COUNT_CMD
|
||||
${LCOV_PATH} ${Coverage_LCOV_ARGS} --gcov-tool ${GCOV_PATH} -a ${Coverage_NAME}.base
|
||||
-a ${Coverage_NAME}.capture --output-file ${Coverage_NAME}.total
|
||||
)
|
||||
# filter collected data to final coverage report
|
||||
set(LCOV_FILTER_CMD
|
||||
${LCOV_PATH} ${Coverage_LCOV_ARGS} --gcov-tool ${GCOV_PATH} --remove
|
||||
${Coverage_NAME}.total ${LCOV_EXCLUDES} --output-file ${Coverage_NAME}.info
|
||||
)
|
||||
# Generate HTML output
|
||||
set(LCOV_GEN_HTML_CMD
|
||||
${GENHTML_PATH} ${GENHTML_EXTRA_ARGS} ${Coverage_GENHTML_ARGS} -o
|
||||
${Coverage_NAME} ${Coverage_NAME}.info
|
||||
)
|
||||
if(${Coverage_SONARQUBE})
|
||||
# Generate SonarQube output
|
||||
set(GCOVR_XML_CMD
|
||||
${GCOVR_PATH} --sonarqube ${Coverage_NAME}_sonarqube.xml -r ${BASEDIR} ${GCOVR_ADDITIONAL_ARGS}
|
||||
${GCOVR_EXCLUDE_ARGS} --object-directory=${PROJECT_BINARY_DIR}
|
||||
)
|
||||
set(GCOVR_XML_CMD_COMMAND
|
||||
COMMAND ${GCOVR_XML_CMD}
|
||||
)
|
||||
set(GCOVR_XML_CMD_BYPRODUCTS ${Coverage_NAME}_sonarqube.xml)
|
||||
set(GCOVR_XML_CMD_COMMENT COMMENT "SonarQube code coverage info report saved in ${Coverage_NAME}_sonarqube.xml.")
|
||||
endif()
|
||||
|
||||
|
||||
if(CODE_COVERAGE_VERBOSE)
|
||||
message(STATUS "Executed command report")
|
||||
message(STATUS "Command to clean up lcov: ")
|
||||
string(REPLACE ";" " " LCOV_CLEAN_CMD_SPACED "${LCOV_CLEAN_CMD}")
|
||||
message(STATUS "${LCOV_CLEAN_CMD_SPACED}")
|
||||
|
||||
message(STATUS "Command to create baseline: ")
|
||||
string(REPLACE ";" " " LCOV_BASELINE_CMD_SPACED "${LCOV_BASELINE_CMD}")
|
||||
message(STATUS "${LCOV_BASELINE_CMD_SPACED}")
|
||||
|
||||
message(STATUS "Command to run the tests: ")
|
||||
string(REPLACE ";" " " LCOV_EXEC_TESTS_CMD_SPACED "${LCOV_EXEC_TESTS_CMD}")
|
||||
message(STATUS "${LCOV_EXEC_TESTS_CMD_SPACED}")
|
||||
|
||||
message(STATUS "Command to capture counters and generate report: ")
|
||||
string(REPLACE ";" " " LCOV_CAPTURE_CMD_SPACED "${LCOV_CAPTURE_CMD}")
|
||||
message(STATUS "${LCOV_CAPTURE_CMD_SPACED}")
|
||||
|
||||
message(STATUS "Command to add baseline counters: ")
|
||||
string(REPLACE ";" " " LCOV_BASELINE_COUNT_CMD_SPACED "${LCOV_BASELINE_COUNT_CMD}")
|
||||
message(STATUS "${LCOV_BASELINE_COUNT_CMD_SPACED}")
|
||||
|
||||
message(STATUS "Command to filter collected data: ")
|
||||
string(REPLACE ";" " " LCOV_FILTER_CMD_SPACED "${LCOV_FILTER_CMD}")
|
||||
message(STATUS "${LCOV_FILTER_CMD_SPACED}")
|
||||
|
||||
message(STATUS "Command to generate lcov HTML output: ")
|
||||
string(REPLACE ";" " " LCOV_GEN_HTML_CMD_SPACED "${LCOV_GEN_HTML_CMD}")
|
||||
message(STATUS "${LCOV_GEN_HTML_CMD_SPACED}")
|
||||
|
||||
if(${Coverage_SONARQUBE})
|
||||
message(STATUS "Command to generate SonarQube XML output: ")
|
||||
string(REPLACE ";" " " GCOVR_XML_CMD_SPACED "${GCOVR_XML_CMD}")
|
||||
message(STATUS "${GCOVR_XML_CMD_SPACED}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Setup target
|
||||
add_custom_target(${Coverage_NAME}
|
||||
COMMAND ${LCOV_CLEAN_CMD}
|
||||
COMMAND ${LCOV_BASELINE_CMD}
|
||||
COMMAND ${LCOV_EXEC_TESTS_CMD}
|
||||
COMMAND ${LCOV_CAPTURE_CMD}
|
||||
COMMAND ${LCOV_BASELINE_COUNT_CMD}
|
||||
COMMAND ${LCOV_FILTER_CMD}
|
||||
COMMAND ${LCOV_GEN_HTML_CMD}
|
||||
${GCOVR_XML_CMD_COMMAND}
|
||||
|
||||
# Set output files as GENERATED (will be removed on 'make clean')
|
||||
BYPRODUCTS
|
||||
${Coverage_NAME}.base
|
||||
${Coverage_NAME}.capture
|
||||
${Coverage_NAME}.total
|
||||
${Coverage_NAME}.info
|
||||
${GCOVR_XML_CMD_BYPRODUCTS}
|
||||
${Coverage_NAME}/index.html
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
|
||||
DEPENDS ${Coverage_DEPENDENCIES}
|
||||
VERBATIM # Protect arguments to commands
|
||||
COMMENT "Resetting code coverage counters to zero.\nProcessing code coverage counters and generating report."
|
||||
)
|
||||
|
||||
# Show where to find the lcov info report
|
||||
add_custom_command(TARGET ${Coverage_NAME} POST_BUILD
|
||||
COMMAND ;
|
||||
COMMENT "Lcov code coverage info report saved in ${Coverage_NAME}.info."
|
||||
${GCOVR_XML_CMD_COMMENT}
|
||||
)
|
||||
|
||||
# Show info where to find the report
|
||||
add_custom_command(TARGET ${Coverage_NAME} POST_BUILD
|
||||
COMMAND ;
|
||||
COMMENT "Open ./${Coverage_NAME}/index.html in your browser to view the coverage report."
|
||||
)
|
||||
|
||||
endfunction() # setup_target_for_coverage_lcov
|
||||
|
||||
# Defines a target for running and collection code coverage information
|
||||
# Builds dependencies, runs the given executable and outputs reports.
|
||||
# NOTE! The executable should always have a ZERO as exit code otherwise
|
||||
# the coverage generation will not complete.
|
||||
#
|
||||
# setup_target_for_coverage_gcovr_xml(
|
||||
# NAME ctest_coverage # New target name
|
||||
# EXECUTABLE ctest -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR
|
||||
# DEPENDENCIES executable_target # Dependencies to build first
|
||||
# BASE_DIRECTORY "../" # Base directory for report
|
||||
# # (defaults to PROJECT_SOURCE_DIR)
|
||||
# EXCLUDE "src/dir1/*" "src/dir2/*" # Patterns to exclude (can be relative
|
||||
# # to BASE_DIRECTORY, with CMake 3.4+)
|
||||
# )
|
||||
# The user can set the variable GCOVR_ADDITIONAL_ARGS to supply additional flags to the
|
||||
# GCVOR command.
|
||||
function(setup_target_for_coverage_gcovr_xml)
|
||||
|
||||
set(options NONE)
|
||||
set(oneValueArgs BASE_DIRECTORY NAME)
|
||||
set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES)
|
||||
cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||
|
||||
if(NOT GCOVR_PATH)
|
||||
message(FATAL_ERROR "gcovr not found! Aborting...")
|
||||
endif() # NOT GCOVR_PATH
|
||||
|
||||
# Set base directory (as absolute path), or default to PROJECT_SOURCE_DIR
|
||||
if(DEFINED Coverage_BASE_DIRECTORY)
|
||||
get_filename_component(BASEDIR ${Coverage_BASE_DIRECTORY} ABSOLUTE)
|
||||
else()
|
||||
set(BASEDIR ${PROJECT_SOURCE_DIR})
|
||||
endif()
|
||||
|
||||
# Collect excludes (CMake 3.4+: Also compute absolute paths)
|
||||
set(GCOVR_EXCLUDES "")
|
||||
foreach(EXCLUDE ${Coverage_EXCLUDE} ${COVERAGE_EXCLUDES} ${COVERAGE_GCOVR_EXCLUDES})
|
||||
if(CMAKE_VERSION VERSION_GREATER 3.4)
|
||||
get_filename_component(EXCLUDE ${EXCLUDE} ABSOLUTE BASE_DIR ${BASEDIR})
|
||||
endif()
|
||||
list(APPEND GCOVR_EXCLUDES "${EXCLUDE}")
|
||||
endforeach()
|
||||
list(REMOVE_DUPLICATES GCOVR_EXCLUDES)
|
||||
|
||||
# Combine excludes to several -e arguments
|
||||
set(GCOVR_EXCLUDE_ARGS "")
|
||||
foreach(EXCLUDE ${GCOVR_EXCLUDES})
|
||||
list(APPEND GCOVR_EXCLUDE_ARGS "-e")
|
||||
list(APPEND GCOVR_EXCLUDE_ARGS "${EXCLUDE}")
|
||||
endforeach()
|
||||
|
||||
# Set up commands which will be run to generate coverage data
|
||||
# Run tests
|
||||
set(GCOVR_XML_EXEC_TESTS_CMD
|
||||
${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS}
|
||||
)
|
||||
# Running gcovr
|
||||
set(GCOVR_XML_CMD
|
||||
${GCOVR_PATH} --xml ${Coverage_NAME}.xml -r ${BASEDIR} ${GCOVR_ADDITIONAL_ARGS}
|
||||
${GCOVR_EXCLUDE_ARGS} --object-directory=${PROJECT_BINARY_DIR}
|
||||
)
|
||||
|
||||
if(CODE_COVERAGE_VERBOSE)
|
||||
message(STATUS "Executed command report")
|
||||
|
||||
message(STATUS "Command to run tests: ")
|
||||
string(REPLACE ";" " " GCOVR_XML_EXEC_TESTS_CMD_SPACED "${GCOVR_XML_EXEC_TESTS_CMD}")
|
||||
message(STATUS "${GCOVR_XML_EXEC_TESTS_CMD_SPACED}")
|
||||
|
||||
message(STATUS "Command to generate gcovr XML coverage data: ")
|
||||
string(REPLACE ";" " " GCOVR_XML_CMD_SPACED "${GCOVR_XML_CMD}")
|
||||
message(STATUS "${GCOVR_XML_CMD_SPACED}")
|
||||
endif()
|
||||
|
||||
add_custom_target(${Coverage_NAME}
|
||||
COMMAND ${GCOVR_XML_EXEC_TESTS_CMD}
|
||||
COMMAND ${GCOVR_XML_CMD}
|
||||
|
||||
BYPRODUCTS ${Coverage_NAME}.xml
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
|
||||
DEPENDS ${Coverage_DEPENDENCIES}
|
||||
VERBATIM # Protect arguments to commands
|
||||
COMMENT "Running gcovr to produce Cobertura code coverage report."
|
||||
)
|
||||
|
||||
# Show info where to find the report
|
||||
add_custom_command(TARGET ${Coverage_NAME} POST_BUILD
|
||||
COMMAND ;
|
||||
COMMENT "Cobertura code coverage report saved in ${Coverage_NAME}.xml."
|
||||
)
|
||||
endfunction() # setup_target_for_coverage_gcovr_xml
|
||||
|
||||
# Defines a target for running and collection code coverage information
|
||||
# Builds dependencies, runs the given executable and outputs reports.
|
||||
# NOTE! The executable should always have a ZERO as exit code otherwise
|
||||
# the coverage generation will not complete.
|
||||
#
|
||||
# setup_target_for_coverage_gcovr_html(
|
||||
# NAME ctest_coverage # New target name
|
||||
# EXECUTABLE ctest -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR
|
||||
# DEPENDENCIES executable_target # Dependencies to build first
|
||||
# BASE_DIRECTORY "../" # Base directory for report
|
||||
# # (defaults to PROJECT_SOURCE_DIR)
|
||||
# EXCLUDE "src/dir1/*" "src/dir2/*" # Patterns to exclude (can be relative
|
||||
# # to BASE_DIRECTORY, with CMake 3.4+)
|
||||
# )
|
||||
# The user can set the variable GCOVR_ADDITIONAL_ARGS to supply additional flags to the
|
||||
# GCVOR command.
|
||||
function(setup_target_for_coverage_gcovr_html)
|
||||
|
||||
set(options NONE)
|
||||
set(oneValueArgs BASE_DIRECTORY NAME)
|
||||
set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES)
|
||||
cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||
|
||||
if(NOT GCOVR_PATH)
|
||||
message(FATAL_ERROR "gcovr not found! Aborting...")
|
||||
endif() # NOT GCOVR_PATH
|
||||
|
||||
# Set base directory (as absolute path), or default to PROJECT_SOURCE_DIR
|
||||
if(DEFINED Coverage_BASE_DIRECTORY)
|
||||
get_filename_component(BASEDIR ${Coverage_BASE_DIRECTORY} ABSOLUTE)
|
||||
else()
|
||||
set(BASEDIR ${PROJECT_SOURCE_DIR})
|
||||
endif()
|
||||
|
||||
# Collect excludes (CMake 3.4+: Also compute absolute paths)
|
||||
set(GCOVR_EXCLUDES "")
|
||||
foreach(EXCLUDE ${Coverage_EXCLUDE} ${COVERAGE_EXCLUDES} ${COVERAGE_GCOVR_EXCLUDES})
|
||||
if(CMAKE_VERSION VERSION_GREATER 3.4)
|
||||
get_filename_component(EXCLUDE ${EXCLUDE} ABSOLUTE BASE_DIR ${BASEDIR})
|
||||
endif()
|
||||
list(APPEND GCOVR_EXCLUDES "${EXCLUDE}")
|
||||
endforeach()
|
||||
list(REMOVE_DUPLICATES GCOVR_EXCLUDES)
|
||||
|
||||
# Combine excludes to several -e arguments
|
||||
set(GCOVR_EXCLUDE_ARGS "")
|
||||
foreach(EXCLUDE ${GCOVR_EXCLUDES})
|
||||
list(APPEND GCOVR_EXCLUDE_ARGS "-e")
|
||||
list(APPEND GCOVR_EXCLUDE_ARGS "${EXCLUDE}")
|
||||
endforeach()
|
||||
|
||||
# Set up commands which will be run to generate coverage data
|
||||
# Run tests
|
||||
set(GCOVR_HTML_EXEC_TESTS_CMD
|
||||
${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS}
|
||||
)
|
||||
# Create folder
|
||||
set(GCOVR_HTML_FOLDER_CMD
|
||||
${CMAKE_COMMAND} -E make_directory ${PROJECT_BINARY_DIR}/${Coverage_NAME}
|
||||
)
|
||||
# Running gcovr
|
||||
set(GCOVR_HTML_CMD
|
||||
${GCOVR_PATH} --html ${Coverage_NAME}/index.html --html-details -r ${BASEDIR} ${GCOVR_ADDITIONAL_ARGS}
|
||||
${GCOVR_EXCLUDE_ARGS} --object-directory=${PROJECT_BINARY_DIR}
|
||||
)
|
||||
|
||||
if(CODE_COVERAGE_VERBOSE)
|
||||
message(STATUS "Executed command report")
|
||||
|
||||
message(STATUS "Command to run tests: ")
|
||||
string(REPLACE ";" " " GCOVR_HTML_EXEC_TESTS_CMD_SPACED "${GCOVR_HTML_EXEC_TESTS_CMD}")
|
||||
message(STATUS "${GCOVR_HTML_EXEC_TESTS_CMD_SPACED}")
|
||||
|
||||
message(STATUS "Command to create a folder: ")
|
||||
string(REPLACE ";" " " GCOVR_HTML_FOLDER_CMD_SPACED "${GCOVR_HTML_FOLDER_CMD}")
|
||||
message(STATUS "${GCOVR_HTML_FOLDER_CMD_SPACED}")
|
||||
|
||||
message(STATUS "Command to generate gcovr HTML coverage data: ")
|
||||
string(REPLACE ";" " " GCOVR_HTML_CMD_SPACED "${GCOVR_HTML_CMD}")
|
||||
message(STATUS "${GCOVR_HTML_CMD_SPACED}")
|
||||
endif()
|
||||
|
||||
add_custom_target(${Coverage_NAME}
|
||||
COMMAND ${GCOVR_HTML_EXEC_TESTS_CMD}
|
||||
COMMAND ${GCOVR_HTML_FOLDER_CMD}
|
||||
COMMAND ${GCOVR_HTML_CMD}
|
||||
|
||||
BYPRODUCTS ${PROJECT_BINARY_DIR}/${Coverage_NAME}/index.html # report directory
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
|
||||
DEPENDS ${Coverage_DEPENDENCIES}
|
||||
VERBATIM # Protect arguments to commands
|
||||
COMMENT "Running gcovr to produce HTML code coverage report."
|
||||
)
|
||||
|
||||
# Show info where to find the report
|
||||
add_custom_command(TARGET ${Coverage_NAME} POST_BUILD
|
||||
COMMAND ;
|
||||
COMMENT "Open ./${Coverage_NAME}/index.html in your browser to view the coverage report."
|
||||
)
|
||||
|
||||
endfunction() # setup_target_for_coverage_gcovr_html
|
||||
|
||||
# Defines a target for running and collection code coverage information
|
||||
# Builds dependencies, runs the given executable and outputs reports.
|
||||
# NOTE! The executable should always have a ZERO as exit code otherwise
|
||||
# the coverage generation will not complete.
|
||||
#
|
||||
# setup_target_for_coverage_fastcov(
|
||||
# NAME testrunner_coverage # New target name
|
||||
# EXECUTABLE testrunner -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR
|
||||
# DEPENDENCIES testrunner # Dependencies to build first
|
||||
# BASE_DIRECTORY "../" # Base directory for report
|
||||
# # (defaults to PROJECT_SOURCE_DIR)
|
||||
# EXCLUDE "src/dir1/" "src/dir2/" # Patterns to exclude.
|
||||
# NO_DEMANGLE # Don't demangle C++ symbols
|
||||
# # even if c++filt is found
|
||||
# SKIP_HTML # Don't create html report
|
||||
# POST_CMD perl -i -pe s!${PROJECT_SOURCE_DIR}/!!g ctest_coverage.json # E.g. for stripping source dir from file paths
|
||||
# )
|
||||
function(setup_target_for_coverage_fastcov)
|
||||
|
||||
set(options NO_DEMANGLE SKIP_HTML)
|
||||
set(oneValueArgs BASE_DIRECTORY NAME)
|
||||
set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES FASTCOV_ARGS GENHTML_ARGS POST_CMD)
|
||||
cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||
|
||||
if(NOT FASTCOV_PATH)
|
||||
message(FATAL_ERROR "fastcov not found! Aborting...")
|
||||
endif()
|
||||
|
||||
if(NOT Coverage_SKIP_HTML AND NOT GENHTML_PATH)
|
||||
message(FATAL_ERROR "genhtml not found! Aborting...")
|
||||
endif()
|
||||
|
||||
# Set base directory (as absolute path), or default to PROJECT_SOURCE_DIR
|
||||
if(Coverage_BASE_DIRECTORY)
|
||||
get_filename_component(BASEDIR ${Coverage_BASE_DIRECTORY} ABSOLUTE)
|
||||
else()
|
||||
set(BASEDIR ${PROJECT_SOURCE_DIR})
|
||||
endif()
|
||||
|
||||
# Collect excludes (Patterns, not paths, for fastcov)
|
||||
set(FASTCOV_EXCLUDES "")
|
||||
foreach(EXCLUDE ${Coverage_EXCLUDE} ${COVERAGE_EXCLUDES} ${COVERAGE_FASTCOV_EXCLUDES})
|
||||
list(APPEND FASTCOV_EXCLUDES "${EXCLUDE}")
|
||||
endforeach()
|
||||
list(REMOVE_DUPLICATES FASTCOV_EXCLUDES)
|
||||
|
||||
# Conditional arguments
|
||||
if(CPPFILT_PATH AND NOT ${Coverage_NO_DEMANGLE})
|
||||
set(GENHTML_EXTRA_ARGS "--demangle-cpp")
|
||||
endif()
|
||||
|
||||
# Set up commands which will be run to generate coverage data
|
||||
set(FASTCOV_EXEC_TESTS_CMD ${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS})
|
||||
|
||||
set(FASTCOV_CAPTURE_CMD ${FASTCOV_PATH} ${Coverage_FASTCOV_ARGS} --gcov ${GCOV_PATH}
|
||||
--search-directory ${BASEDIR}
|
||||
--process-gcno
|
||||
--output ${Coverage_NAME}.json
|
||||
--exclude ${FASTCOV_EXCLUDES}
|
||||
)
|
||||
|
||||
set(FASTCOV_CONVERT_CMD ${FASTCOV_PATH}
|
||||
-C ${Coverage_NAME}.json --lcov --output ${Coverage_NAME}.info
|
||||
)
|
||||
|
||||
if(Coverage_SKIP_HTML)
|
||||
set(FASTCOV_HTML_CMD ";")
|
||||
else()
|
||||
set(FASTCOV_HTML_CMD ${GENHTML_PATH} ${GENHTML_EXTRA_ARGS} ${Coverage_GENHTML_ARGS}
|
||||
-o ${Coverage_NAME} ${Coverage_NAME}.info
|
||||
)
|
||||
endif()
|
||||
|
||||
set(FASTCOV_POST_CMD ";")
|
||||
if(Coverage_POST_CMD)
|
||||
set(FASTCOV_POST_CMD ${Coverage_POST_CMD})
|
||||
endif()
|
||||
|
||||
if(CODE_COVERAGE_VERBOSE)
|
||||
message(STATUS "Code coverage commands for target ${Coverage_NAME} (fastcov):")
|
||||
|
||||
message(" Running tests:")
|
||||
string(REPLACE ";" " " FASTCOV_EXEC_TESTS_CMD_SPACED "${FASTCOV_EXEC_TESTS_CMD}")
|
||||
message(" ${FASTCOV_EXEC_TESTS_CMD_SPACED}")
|
||||
|
||||
message(" Capturing fastcov counters and generating report:")
|
||||
string(REPLACE ";" " " FASTCOV_CAPTURE_CMD_SPACED "${FASTCOV_CAPTURE_CMD}")
|
||||
message(" ${FASTCOV_CAPTURE_CMD_SPACED}")
|
||||
|
||||
message(" Converting fastcov .json to lcov .info:")
|
||||
string(REPLACE ";" " " FASTCOV_CONVERT_CMD_SPACED "${FASTCOV_CONVERT_CMD}")
|
||||
message(" ${FASTCOV_CONVERT_CMD_SPACED}")
|
||||
|
||||
if(NOT Coverage_SKIP_HTML)
|
||||
message(" Generating HTML report: ")
|
||||
string(REPLACE ";" " " FASTCOV_HTML_CMD_SPACED "${FASTCOV_HTML_CMD}")
|
||||
message(" ${FASTCOV_HTML_CMD_SPACED}")
|
||||
endif()
|
||||
if(Coverage_POST_CMD)
|
||||
message(" Running post command: ")
|
||||
string(REPLACE ";" " " FASTCOV_POST_CMD_SPACED "${FASTCOV_POST_CMD}")
|
||||
message(" ${FASTCOV_POST_CMD_SPACED}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Setup target
|
||||
add_custom_target(${Coverage_NAME}
|
||||
|
||||
# Cleanup fastcov
|
||||
COMMAND ${FASTCOV_PATH} ${Coverage_FASTCOV_ARGS} --gcov ${GCOV_PATH}
|
||||
--search-directory ${BASEDIR}
|
||||
--zerocounters
|
||||
|
||||
COMMAND ${FASTCOV_EXEC_TESTS_CMD}
|
||||
COMMAND ${FASTCOV_CAPTURE_CMD}
|
||||
COMMAND ${FASTCOV_CONVERT_CMD}
|
||||
COMMAND ${FASTCOV_HTML_CMD}
|
||||
COMMAND ${FASTCOV_POST_CMD}
|
||||
|
||||
# Set output files as GENERATED (will be removed on 'make clean')
|
||||
BYPRODUCTS
|
||||
${Coverage_NAME}.info
|
||||
${Coverage_NAME}.json
|
||||
${Coverage_NAME}/index.html # report directory
|
||||
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
|
||||
DEPENDS ${Coverage_DEPENDENCIES}
|
||||
VERBATIM # Protect arguments to commands
|
||||
COMMENT "Resetting code coverage counters to zero. Processing code coverage counters and generating report."
|
||||
)
|
||||
|
||||
set(INFO_MSG "fastcov code coverage info report saved in ${Coverage_NAME}.info and ${Coverage_NAME}.json.")
|
||||
if(NOT Coverage_SKIP_HTML)
|
||||
string(APPEND INFO_MSG " Open ${PROJECT_BINARY_DIR}/${Coverage_NAME}/index.html in your browser to view the coverage report.")
|
||||
endif()
|
||||
# Show where to find the fastcov info report
|
||||
add_custom_command(TARGET ${Coverage_NAME} POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E echo ${INFO_MSG}
|
||||
)
|
||||
|
||||
endfunction() # setup_target_for_coverage_fastcov
|
||||
|
||||
function(append_coverage_compiler_flags)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
|
||||
set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
|
||||
message(STATUS "Appending code coverage compiler flags: ${COVERAGE_COMPILER_FLAGS}")
|
||||
endfunction() # append_coverage_compiler_flags
|
||||
|
||||
# Setup coverage for specific library
|
||||
function(append_coverage_compiler_flags_to_target name)
|
||||
separate_arguments(_flag_list NATIVE_COMMAND "${COVERAGE_COMPILER_FLAGS}")
|
||||
target_compile_options(${name} PRIVATE ${_flag_list})
|
||||
if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
|
||||
target_link_libraries(${name} PRIVATE gcov)
|
||||
endif()
|
||||
endfunction()
|
@@ -1,22 +0,0 @@
|
||||
if(ENABLE_CLANG_TIDY)
|
||||
find_program(CLANG_TIDY_COMMAND NAMES clang-tidy)
|
||||
|
||||
if(NOT CLANG_TIDY_COMMAND)
|
||||
message(WARNING "🔴 CMake_RUN_CLANG_TIDY is ON but clang-tidy is not found!")
|
||||
set(CMAKE_CXX_CLANG_TIDY "" CACHE STRING "" FORCE)
|
||||
else()
|
||||
|
||||
message(STATUS "🟢 CMake_RUN_CLANG_TIDY is ON")
|
||||
set(CLANGTIDY_EXTRA_ARGS
|
||||
"-extra-arg=-Wno-unknown-warning-option"
|
||||
)
|
||||
set(CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_COMMAND};-p=${CMAKE_BINARY_DIR};${CLANGTIDY_EXTRA_ARGS}" CACHE STRING "" FORCE)
|
||||
|
||||
add_custom_target(clang-tidy
|
||||
COMMAND ${CMAKE_COMMAND} --build ${CMAKE_BINARY_DIR} --target ${CMAKE_PROJECT_NAME}
|
||||
COMMAND ${CMAKE_COMMAND} --build ${CMAKE_BINARY_DIR} --target clang-tidy
|
||||
COMMENT "Running clang-tidy..."
|
||||
)
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||
endif()
|
||||
endif(ENABLE_CLANG_TIDY)
|
10
conandata.yml
Normal file
10
conandata.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
sources:
|
||||
"1.1.2":
|
||||
url: "https://github.com/rmontanana/BayesNet/archive/v1.1.2.tar.gz"
|
||||
sha256: "placeholder_sha256" # Replace with actual SHA256 when releasing
|
||||
"1.0.7":
|
||||
url: "https://github.com/rmontanana/BayesNet/archive/v1.0.7.tar.gz"
|
||||
sha256: "placeholder_sha256" # Replace with actual SHA256 when releasing
|
||||
|
||||
patches:
|
||||
# Add patches here if needed for specific versions
|
93
conanfile.py
Normal file
93
conanfile.py
Normal file
@@ -0,0 +1,93 @@
|
||||
import os, re, pathlib
|
||||
from conan import ConanFile
|
||||
from conan.tools.cmake import CMakeToolchain, CMake, cmake_layout, CMakeDeps
|
||||
from conan.tools.files import copy
|
||||
|
||||
class BayesNetConan(ConanFile):
|
||||
name = "bayesnet"
|
||||
settings = "os", "compiler", "build_type", "arch"
|
||||
options = {
|
||||
"shared": [True, False],
|
||||
"fPIC": [True, False],
|
||||
"enable_testing": [True, False],
|
||||
"enable_coverage": [True, False]
|
||||
}
|
||||
default_options = {
|
||||
"shared": False,
|
||||
"fPIC": True,
|
||||
"enable_testing": False,
|
||||
"enable_coverage": False
|
||||
}
|
||||
|
||||
# Sources are located in the same place as this recipe, copy them to the recipe
|
||||
exports_sources = "CMakeLists.txt", "bayesnet/*", "config/*", "cmake/*", "docs/*", "tests/*", "bayesnetConfig.cmake.in"
|
||||
|
||||
def set_version(self) -> None:
|
||||
cmake = pathlib.Path(self.recipe_folder) / "CMakeLists.txt"
|
||||
text = cmake.read_text(encoding="utf-8")
|
||||
|
||||
# Accept either: project(foo VERSION 1.2.3) or set(foo_VERSION 1.2.3)
|
||||
match = re.search(
|
||||
r"""project\s*\([^\)]*VERSION\s+([0-9]+\.[0-9]+\.[0-9]+)""",
|
||||
text, re.IGNORECASE | re.VERBOSE
|
||||
)
|
||||
if match:
|
||||
self.version = match.group(1)
|
||||
else:
|
||||
raise Exception("Version not found in CMakeLists.txt")
|
||||
self.version = match.group(1)
|
||||
|
||||
def config_options(self):
|
||||
if self.settings.os == "Windows":
|
||||
del self.options.fPIC
|
||||
|
||||
def configure(self):
|
||||
if self.options.shared:
|
||||
self.options.rm_safe("fPIC")
|
||||
|
||||
def requirements(self):
|
||||
# Core dependencies
|
||||
self.requires("libtorch/2.7.0")
|
||||
self.requires("nlohmann_json/3.11.3")
|
||||
self.requires("folding/1.1.1") # Custom package
|
||||
self.requires("fimdlp/2.1.0") # Custom package
|
||||
|
||||
def build_requirements(self):
|
||||
self.build_requires("cmake/[>=3.27]")
|
||||
self.test_requires("arff-files/1.2.0") # Custom package
|
||||
self.test_requires("catch2/3.8.1")
|
||||
|
||||
def layout(self):
|
||||
cmake_layout(self)
|
||||
|
||||
def generate(self):
|
||||
deps = CMakeDeps(self)
|
||||
deps.generate()
|
||||
tc = CMakeToolchain(self)
|
||||
tc.variables["ENABLE_TESTING"] = self.options.enable_testing
|
||||
tc.variables["CODE_COVERAGE"] = self.options.enable_coverage
|
||||
tc.generate()
|
||||
|
||||
def build(self):
|
||||
cmake = CMake(self)
|
||||
cmake.configure()
|
||||
cmake.build()
|
||||
|
||||
if self.options.enable_testing:
|
||||
# Run tests only if we're building with testing enabled
|
||||
self.run("ctest --output-on-failure", cwd=self.build_folder)
|
||||
|
||||
def package(self):
|
||||
copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
|
||||
cmake = CMake(self)
|
||||
cmake.install()
|
||||
|
||||
def package_info(self):
|
||||
self.cpp_info.libs = ["bayesnet"]
|
||||
self.cpp_info.includedirs = ["include"]
|
||||
self.cpp_info.set_property("cmake_find_mode", "both")
|
||||
self.cpp_info.set_property("cmake_target_name", "bayesnet::bayesnet")
|
||||
|
||||
# Add compiler flags that might be needed
|
||||
if self.settings.os == "Linux":
|
||||
self.cpp_info.system_libs = ["pthread"]
|
@@ -3,12 +3,8 @@
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
|
||||
#define PROJECT_VERSION_MAJOR @PROJECT_VERSION_MAJOR @
|
||||
#define PROJECT_VERSION_MINOR @PROJECT_VERSION_MINOR @
|
||||
#define PROJECT_VERSION_PATCH @PROJECT_VERSION_PATCH @
|
||||
|
||||
static constexpr std::string_view project_name = "@PROJECT_NAME@";
|
||||
static constexpr std::string_view project_version = "@PROJECT_VERSION@";
|
||||
static constexpr std::string_view project_description = "@PROJECT_DESCRIPTION@";
|
||||
static constexpr std::string_view git_sha = "@GIT_SHA@";
|
||||
static constexpr std::string_view data_path = "@BayesNet_SOURCE_DIR@/tests/data/";
|
||||
static constexpr std::string_view data_path = "@bayesnet_SOURCE_DIR@/tests/data/";
|
@@ -1,36 +1,16 @@
|
||||
@startuml
|
||||
title clang-uml class diagram model
|
||||
class "bayesnet::Metrics" as C_0000736965376885623323
|
||||
class C_0000736965376885623323 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+Metrics() = default : void
|
||||
+Metrics(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int classNumStates) : void
|
||||
+Metrics(const std::vector<std::vector<int>> & vsamples, const std::vector<int> & labels, const std::vector<std::string> & features, const std::string & className, const int classNumStates) : void
|
||||
..
|
||||
+SelectKBestWeighted(const torch::Tensor & weights, bool ascending = false, unsigned int k = 0) : std::vector<int>
|
||||
+conditionalEdge(const torch::Tensor & weights) : torch::Tensor
|
||||
+conditionalEdgeWeights(std::vector<float> & weights) : std::vector<float>
|
||||
#doCombinations<T>(const std::vector<T> & source) : std::vector<std::pair<T, T> >
|
||||
#entropy(const torch::Tensor & feature, const torch::Tensor & weights) : double
|
||||
+getScoresKBest() const : std::vector<double>
|
||||
+maximumSpanningTree(const std::vector<std::string> & features, const torch::Tensor & weights, const int root) : std::vector<std::pair<int,int>>
|
||||
+mutualInformation(const torch::Tensor & firstFeature, const torch::Tensor & secondFeature, const torch::Tensor & weights) : double
|
||||
#pop_first<T>(std::vector<T> & v) : T
|
||||
__
|
||||
#className : std::string
|
||||
#features : std::vector<std::string>
|
||||
#samples : torch::Tensor
|
||||
}
|
||||
class "bayesnet::Node" as C_0001303524929067080934
|
||||
class C_0001303524929067080934 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
class "bayesnet::Node" as C_0010428199432536647474
|
||||
class C_0010428199432536647474 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+Node(const std::string &) : void
|
||||
..
|
||||
+addChild(Node *) : void
|
||||
+addParent(Node *) : void
|
||||
+clear() : void
|
||||
+computeCPT(const torch::Tensor & dataset, const std::vector<std::string> & features, const double laplaceSmoothing, const torch::Tensor & weights) : void
|
||||
+computeCPT(const torch::Tensor & dataset, const std::vector<std::string> & features, const double smoothing, const torch::Tensor & weights) : void
|
||||
+getCPT() : torch::Tensor &
|
||||
+getChildren() : std::vector<Node *> &
|
||||
+getFactorValue(std::map<std::string,int> &) : float
|
||||
+getFactorValue(std::map<std::string,int> &) : double
|
||||
+getName() const : std::string
|
||||
+getNumStates() const : int
|
||||
+getParents() : std::vector<Node *> &
|
||||
@@ -41,24 +21,29 @@ class C_0001303524929067080934 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+setNumStates(int) : void
|
||||
__
|
||||
}
|
||||
class "bayesnet::Network" as C_0001186707649890429575
|
||||
class C_0001186707649890429575 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
enum "bayesnet::Smoothing_t" as C_0013393078277439680282
|
||||
enum C_0013393078277439680282 {
|
||||
NONE
|
||||
ORIGINAL
|
||||
LAPLACE
|
||||
CESTNIK
|
||||
}
|
||||
class "bayesnet::Network" as C_0009493661199123436603
|
||||
class C_0009493661199123436603 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+Network() : void
|
||||
+Network(float) : void
|
||||
+Network(const Network &) : void
|
||||
+~Network() = default : void
|
||||
..
|
||||
+addEdge(const std::string &, const std::string &) : void
|
||||
+addNode(const std::string &) : void
|
||||
+dump_cpt() const : std::string
|
||||
+fit(const torch::Tensor & samples, const torch::Tensor & weights, const std::vector<std::string> & featureNames, const std::string & className, const std::map<std::string,std::vector<int>> & states) : void
|
||||
+fit(const torch::Tensor & X, const torch::Tensor & y, const torch::Tensor & weights, const std::vector<std::string> & featureNames, const std::string & className, const std::map<std::string,std::vector<int>> & states) : void
|
||||
+fit(const std::vector<std::vector<int>> & input_data, const std::vector<int> & labels, const std::vector<double> & weights, const std::vector<std::string> & featureNames, const std::string & className, const std::map<std::string,std::vector<int>> & states) : void
|
||||
+fit(const torch::Tensor & samples, const torch::Tensor & weights, const std::vector<std::string> & featureNames, const std::string & className, const std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : void
|
||||
+fit(const torch::Tensor & X, const torch::Tensor & y, const torch::Tensor & weights, const std::vector<std::string> & featureNames, const std::string & className, const std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : void
|
||||
+fit(const std::vector<std::vector<int>> & input_data, const std::vector<int> & labels, const std::vector<double> & weights, const std::vector<std::string> & featureNames, const std::string & className, const std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : void
|
||||
+getClassName() const : std::string
|
||||
+getClassNumStates() const : int
|
||||
+getEdges() const : std::vector<std::pair<std::string,std::string>>
|
||||
+getFeatures() const : std::vector<std::string>
|
||||
+getMaxThreads() const : float
|
||||
+getNodes() : std::map<std::string,std::unique_ptr<Node>> &
|
||||
+getNumEdges() const : int
|
||||
+getSamples() : torch::Tensor &
|
||||
@@ -76,21 +61,21 @@ class C_0001186707649890429575 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+version() : std::string
|
||||
__
|
||||
}
|
||||
enum "bayesnet::status_t" as C_0000738420730783851375
|
||||
enum C_0000738420730783851375 {
|
||||
enum "bayesnet::status_t" as C_0005907365846270811004
|
||||
enum C_0005907365846270811004 {
|
||||
NORMAL
|
||||
WARNING
|
||||
ERROR
|
||||
}
|
||||
abstract "bayesnet::BaseClassifier" as C_0000327135989451974539
|
||||
abstract C_0000327135989451974539 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
abstract "bayesnet::BaseClassifier" as C_0002617087915615796317
|
||||
abstract C_0002617087915615796317 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+~BaseClassifier() = default : void
|
||||
..
|
||||
{abstract} +dump_cpt() const = 0 : std::string
|
||||
{abstract} +fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) = 0 : BaseClassifier &
|
||||
{abstract} +fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) = 0 : BaseClassifier &
|
||||
{abstract} +fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const torch::Tensor & weights) = 0 : BaseClassifier &
|
||||
{abstract} +fit(std::vector<std::vector<int>> & X, std::vector<int> & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) = 0 : BaseClassifier &
|
||||
{abstract} +fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) = 0 : BaseClassifier &
|
||||
{abstract} +fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) = 0 : BaseClassifier &
|
||||
{abstract} +fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const torch::Tensor & weights, const Smoothing_t smoothing) = 0 : BaseClassifier &
|
||||
{abstract} +fit(std::vector<std::vector<int>> & X, std::vector<int> & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) = 0 : BaseClassifier &
|
||||
{abstract} +getClassNumStates() const = 0 : int
|
||||
{abstract} +getNotes() const = 0 : std::vector<std::string>
|
||||
{abstract} +getNumberOfEdges() const = 0 : int
|
||||
@@ -109,12 +94,37 @@ abstract C_0000327135989451974539 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
{abstract} +setHyperparameters(const nlohmann::json & hyperparameters) = 0 : void
|
||||
{abstract} +show() const = 0 : std::vector<std::string>
|
||||
{abstract} +topological_order() = 0 : std::vector<std::string>
|
||||
{abstract} #trainModel(const torch::Tensor & weights) = 0 : void
|
||||
{abstract} #trainModel(const torch::Tensor & weights, const Smoothing_t smoothing) = 0 : void
|
||||
__
|
||||
#notes : std::vector<std::string>
|
||||
#status : status_t
|
||||
#validHyperparameters : std::vector<std::string>
|
||||
}
|
||||
abstract "bayesnet::Classifier" as C_0002043996622900301644
|
||||
abstract C_0002043996622900301644 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
class "bayesnet::Metrics" as C_0005895723015084986588
|
||||
class C_0005895723015084986588 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+Metrics() = default : void
|
||||
+Metrics(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int classNumStates) : void
|
||||
+Metrics(const std::vector<std::vector<int>> & vsamples, const std::vector<int> & labels, const std::vector<std::string> & features, const std::string & className, const int classNumStates) : void
|
||||
..
|
||||
+SelectKBestWeighted(const torch::Tensor & weights, bool ascending = false, unsigned int k = 0) : std::vector<int>
|
||||
+SelectKPairs(const torch::Tensor & weights, std::vector<int> & featuresExcluded, bool ascending = false, unsigned int k = 0) : std::vector<std::pair<int,int>>
|
||||
+conditionalEdge(const torch::Tensor & weights) : torch::Tensor
|
||||
+conditionalEntropy(const torch::Tensor & firstFeature, const torch::Tensor & secondFeature, const torch::Tensor & labels, const torch::Tensor & weights) : double
|
||||
+conditionalMutualInformation(const torch::Tensor & firstFeature, const torch::Tensor & secondFeature, const torch::Tensor & labels, const torch::Tensor & weights) : double
|
||||
#doCombinations<T>(const std::vector<T> & source) : std::vector<std::pair<T, T> >
|
||||
+entropy(const torch::Tensor & feature, const torch::Tensor & weights) : double
|
||||
+getScoresKBest() const : std::vector<double>
|
||||
+getScoresKPairs() const : std::vector<std::pair<std::pair<int,int>,double>>
|
||||
+maximumSpanningTree(const std::vector<std::string> & features, const torch::Tensor & weights, const int root) : std::vector<std::pair<int,int>>
|
||||
+mutualInformation(const torch::Tensor & firstFeature, const torch::Tensor & secondFeature, const torch::Tensor & weights) : double
|
||||
#pop_first<T>(std::vector<T> & v) : T
|
||||
__
|
||||
#className : std::string
|
||||
#features : std::vector<std::string>
|
||||
#samples : torch::Tensor
|
||||
}
|
||||
abstract "bayesnet::Classifier" as C_0016351972983202413152
|
||||
abstract C_0016351972983202413152 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+Classifier(Network model) : void
|
||||
+~Classifier() = default : void
|
||||
..
|
||||
@@ -123,10 +133,10 @@ abstract C_0002043996622900301644 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
{abstract} #buildModel(const torch::Tensor & weights) = 0 : void
|
||||
#checkFitParameters() : void
|
||||
+dump_cpt() const : std::string
|
||||
+fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) : Classifier &
|
||||
+fit(std::vector<std::vector<int>> & X, std::vector<int> & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) : Classifier &
|
||||
+fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) : Classifier &
|
||||
+fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const torch::Tensor & weights) : Classifier &
|
||||
+fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : Classifier &
|
||||
+fit(std::vector<std::vector<int>> & X, std::vector<int> & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : Classifier &
|
||||
+fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : Classifier &
|
||||
+fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const torch::Tensor & weights, const Smoothing_t smoothing) : Classifier &
|
||||
+getClassNumStates() const : int
|
||||
+getNotes() const : std::vector<std::string>
|
||||
+getNumberOfEdges() const : int
|
||||
@@ -143,8 +153,9 @@ abstract C_0002043996622900301644 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+setHyperparameters(const nlohmann::json & hyperparameters) : void
|
||||
+show() const : std::vector<std::string>
|
||||
+topological_order() : std::vector<std::string>
|
||||
#trainModel(const torch::Tensor & weights) : void
|
||||
#trainModel(const torch::Tensor & weights, const Smoothing_t smoothing) : void
|
||||
__
|
||||
#CLASSIFIER_NOT_FITTED : const std::string
|
||||
#className : std::string
|
||||
#dataset : torch::Tensor
|
||||
#features : std::vector<std::string>
|
||||
@@ -153,31 +164,10 @@ __
|
||||
#metrics : Metrics
|
||||
#model : Network
|
||||
#n : unsigned int
|
||||
#notes : std::vector<std::string>
|
||||
#states : std::map<std::string,std::vector<int>>
|
||||
#status : status_t
|
||||
}
|
||||
class "bayesnet::KDB" as C_0001112865019015250005
|
||||
class C_0001112865019015250005 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+KDB(int k, float theta = 0.03) : void
|
||||
+~KDB() = default : void
|
||||
..
|
||||
#buildModel(const torch::Tensor & weights) : void
|
||||
+graph(const std::string & name = "KDB") const : std::vector<std::string>
|
||||
+setHyperparameters(const nlohmann::json & hyperparameters_) : void
|
||||
__
|
||||
}
|
||||
class "bayesnet::TAN" as C_0001760994424884323017
|
||||
class C_0001760994424884323017 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+TAN() : void
|
||||
+~TAN() = default : void
|
||||
..
|
||||
#buildModel(const torch::Tensor & weights) : void
|
||||
+graph(const std::string & name = "TAN") const : std::vector<std::string>
|
||||
__
|
||||
}
|
||||
class "bayesnet::Proposal" as C_0002219995589162262979
|
||||
class C_0002219995589162262979 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
class "bayesnet::Proposal" as C_0017759964713298103839
|
||||
class C_0017759964713298103839 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+Proposal(torch::Tensor & pDataset, std::vector<std::string> & features_, std::string & className_) : void
|
||||
+~Proposal() : void
|
||||
..
|
||||
@@ -190,74 +180,140 @@ __
|
||||
#discretizers : map<std::string,mdlp::CPPFImdlp *>
|
||||
#y : torch::Tensor
|
||||
}
|
||||
class "bayesnet::TANLd" as C_0001668829096702037834
|
||||
class C_0001668829096702037834 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+TANLd() : void
|
||||
+~TANLd() = default : void
|
||||
class "bayesnet::KDB" as C_0008902920152122000044
|
||||
class C_0008902920152122000044 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+KDB(int k, float theta = 0.03) : void
|
||||
+~KDB() = default : void
|
||||
..
|
||||
+fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) : TANLd &
|
||||
+graph(const std::string & name = "TAN") const : std::vector<std::string>
|
||||
#add_m_edges(int idx, std::vector<int> & S, torch::Tensor & weights) : void
|
||||
#buildModel(const torch::Tensor & weights) : void
|
||||
+graph(const std::string & name = "KDB") const : std::vector<std::string>
|
||||
+setHyperparameters(const nlohmann::json & hyperparameters_) : void
|
||||
__
|
||||
}
|
||||
class "bayesnet::KDBLd" as C_0002756018222998454702
|
||||
class C_0002756018222998454702 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+KDBLd(int k) : void
|
||||
+~KDBLd() = default : void
|
||||
..
|
||||
+fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : KDBLd &
|
||||
+graph(const std::string & name = "KDB") const : std::vector<std::string>
|
||||
+predict(torch::Tensor & X) : torch::Tensor
|
||||
{static} +version() : std::string
|
||||
__
|
||||
}
|
||||
abstract "bayesnet::FeatureSelect" as C_0001695326193250580823
|
||||
abstract C_0001695326193250580823 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+FeatureSelect(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int maxFeatures, const int classNumStates, const torch::Tensor & weights) : void
|
||||
+~FeatureSelect() : void
|
||||
..
|
||||
#computeMeritCFS() : double
|
||||
#computeSuFeatures(const int a, const int b) : double
|
||||
#computeSuLabels() : void
|
||||
{abstract} +fit() = 0 : void
|
||||
+getFeatures() const : std::vector<int>
|
||||
+getScores() const : std::vector<double>
|
||||
#initialize() : void
|
||||
#symmetricalUncertainty(int a, int b) : double
|
||||
__
|
||||
#fitted : bool
|
||||
#maxFeatures : int
|
||||
#selectedFeatures : std::vector<int>
|
||||
#selectedScores : std::vector<double>
|
||||
#suFeatures : std::map<std::pair<int,int>,double>
|
||||
#suLabels : std::vector<double>
|
||||
#weights : const torch::Tensor &
|
||||
}
|
||||
class "bayesnet::CFS" as C_0000011627355691342494
|
||||
class C_0000011627355691342494 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+CFS(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int maxFeatures, const int classNumStates, const torch::Tensor & weights) : void
|
||||
+~CFS() : void
|
||||
..
|
||||
+fit() : void
|
||||
__
|
||||
}
|
||||
class "bayesnet::FCBF" as C_0000144682015341746929
|
||||
class C_0000144682015341746929 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+FCBF(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int maxFeatures, const int classNumStates, const torch::Tensor & weights, const double threshold) : void
|
||||
+~FCBF() : void
|
||||
..
|
||||
+fit() : void
|
||||
__
|
||||
}
|
||||
class "bayesnet::IWSS" as C_0000008268514674428553
|
||||
class C_0000008268514674428553 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+IWSS(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int maxFeatures, const int classNumStates, const torch::Tensor & weights, const double threshold) : void
|
||||
+~IWSS() : void
|
||||
..
|
||||
+fit() : void
|
||||
__
|
||||
}
|
||||
class "bayesnet::SPODE" as C_0000512022813807538451
|
||||
class C_0000512022813807538451 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
class "bayesnet::SPODE" as C_0004096182510460307610
|
||||
class C_0004096182510460307610 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+SPODE(int root) : void
|
||||
+~SPODE() = default : void
|
||||
..
|
||||
#buildModel(const torch::Tensor & weights) : void
|
||||
+graph(const std::string & name = "SPODE") const : std::vector<std::string>
|
||||
+setHyperparameters(const nlohmann::json & hyperparameters_) : void
|
||||
__
|
||||
}
|
||||
class "bayesnet::Ensemble" as C_0001985241386355360576
|
||||
class C_0001985241386355360576 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
class "bayesnet::SPODELd" as C_0010957245114062042836
|
||||
class C_0010957245114062042836 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+SPODELd(int root) : void
|
||||
+~SPODELd() = default : void
|
||||
..
|
||||
+commonFit(const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : SPODELd &
|
||||
+fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : SPODELd &
|
||||
+fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : SPODELd &
|
||||
+graph(const std::string & name = "SPODELd") const : std::vector<std::string>
|
||||
+predict(torch::Tensor & X) : torch::Tensor
|
||||
{static} +version() : std::string
|
||||
__
|
||||
}
|
||||
class "bayesnet::SPnDE" as C_0016268916386101512883
|
||||
class C_0016268916386101512883 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+SPnDE(std::vector<int> parents) : void
|
||||
+~SPnDE() = default : void
|
||||
..
|
||||
#buildModel(const torch::Tensor & weights) : void
|
||||
+graph(const std::string & name = "SPnDE") const : std::vector<std::string>
|
||||
__
|
||||
}
|
||||
class "bayesnet::TAN" as C_0014087955399074584137
|
||||
class C_0014087955399074584137 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+TAN() : void
|
||||
+~TAN() = default : void
|
||||
..
|
||||
#buildModel(const torch::Tensor & weights) : void
|
||||
+graph(const std::string & name = "TAN") const : std::vector<std::string>
|
||||
+setHyperparameters(const nlohmann::json & hyperparameters_) : void
|
||||
__
|
||||
}
|
||||
class "bayesnet::TANLd" as C_0013350632773616302678
|
||||
class C_0013350632773616302678 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+TANLd() : void
|
||||
+~TANLd() = default : void
|
||||
..
|
||||
+fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : TANLd &
|
||||
+graph(const std::string & name = "TANLd") const : std::vector<std::string>
|
||||
+predict(torch::Tensor & X) : torch::Tensor
|
||||
__
|
||||
}
|
||||
class "bayesnet::XSp2de" as C_0007640742442325463418
|
||||
class C_0007640742442325463418 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+XSp2de(int spIndex1, int spIndex2) : void
|
||||
..
|
||||
#buildModel(const torch::Tensor & weights) : void
|
||||
+fitx(torch::Tensor & X, torch::Tensor & y, torch::Tensor & weights_, const Smoothing_t smoothing) : void
|
||||
+getClassNumStates() const : int
|
||||
+getNFeatures() const : int
|
||||
+getNumberOfEdges() const : int
|
||||
+getNumberOfNodes() const : int
|
||||
+getNumberOfStates() const : int
|
||||
+graph(const std::string & title) const : std::vector<std::string>
|
||||
+predict(const std::vector<int> & instance) const : int
|
||||
+predict(std::vector<std::vector<int>> & test_data) : std::vector<int>
|
||||
+predict(torch::Tensor & X) : torch::Tensor
|
||||
+predict_proba(const std::vector<int> & instance) const : std::vector<double>
|
||||
+predict_proba(std::vector<std::vector<int>> & test_data) : std::vector<std::vector<double>>
|
||||
+predict_proba(torch::Tensor & X) : torch::Tensor
|
||||
+score(std::vector<std::vector<int>> & X, std::vector<int> & y) : float
|
||||
+score(torch::Tensor & X, torch::Tensor & y) : float
|
||||
+setHyperparameters(const nlohmann::json & hyperparameters_) : void
|
||||
+to_string() const : std::string
|
||||
#trainModel(const torch::Tensor & weights, const bayesnet::Smoothing_t smoothing) : void
|
||||
__
|
||||
}
|
||||
class "bayesnet::XSpode" as C_0015654113248178830206
|
||||
class C_0015654113248178830206 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+XSpode(int spIndex) : void
|
||||
..
|
||||
#buildModel(const torch::Tensor & weights) : void
|
||||
+fitx(torch::Tensor & X, torch::Tensor & y, torch::Tensor & weights_, const Smoothing_t smoothing) : void
|
||||
+getClassNumStates() const : int
|
||||
+getNFeatures() const : int
|
||||
+getNumberOfEdges() const : int
|
||||
+getNumberOfNodes() const : int
|
||||
+getNumberOfStates() const : int
|
||||
+getStates() : std::vector<int> &
|
||||
+graph(const std::string & title) const : std::vector<std::string>
|
||||
+normalize(std::vector<double> & v) const : void
|
||||
+predict(const std::vector<int> & instance) const : int
|
||||
+predict(std::vector<std::vector<int>> & X) : std::vector<int>
|
||||
+predict(torch::Tensor & X) : torch::Tensor
|
||||
+predict_proba(std::vector<std::vector<int>> & X) : std::vector<std::vector<double>>
|
||||
+predict_proba(torch::Tensor & X) : torch::Tensor
|
||||
+predict_proba(const std::vector<int> & instance) const : std::vector<double>
|
||||
+score(torch::Tensor & X, torch::Tensor & y) : float
|
||||
+score(std::vector<std::vector<int>> & X, std::vector<int> & y) : float
|
||||
+setHyperparameters(const nlohmann::json & hyperparameters_) : void
|
||||
+to_string() const : std::string
|
||||
#trainModel(const torch::Tensor & weights, const bayesnet::Smoothing_t smoothing) : void
|
||||
__
|
||||
}
|
||||
class "bayesnet::TensorUtils" as C_0010304804115474100819
|
||||
class C_0010304804115474100819 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
{static} +to_matrix(const torch::Tensor & X) : std::vector<std::vector<int>>
|
||||
{static} +to_vector<T>(const torch::Tensor & y) : std::vector<T>
|
||||
__
|
||||
}
|
||||
class "bayesnet::Ensemble" as C_0015881931090842884611
|
||||
class C_0015881931090842884611 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+Ensemble(bool predict_voting = true) : void
|
||||
+~Ensemble() = default : void
|
||||
..
|
||||
@@ -280,7 +336,7 @@ class C_0001985241386355360576 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+score(torch::Tensor & X, torch::Tensor & y) : float
|
||||
+show() const : std::vector<std::string>
|
||||
+topological_order() : std::vector<std::string>
|
||||
#trainModel(const torch::Tensor & weights) : void
|
||||
#trainModel(const torch::Tensor & weights, const Smoothing_t smoothing) : void
|
||||
#voting(torch::Tensor & votes) : torch::Tensor
|
||||
__
|
||||
#models : std::vector<std::unique_ptr<Classifier>>
|
||||
@@ -288,41 +344,244 @@ __
|
||||
#predict_voting : bool
|
||||
#significanceModels : std::vector<double>
|
||||
}
|
||||
class "bayesnet::(anonymous_45089536)" as C_0001186398587753535158
|
||||
class C_0001186398587753535158 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
class "bayesnet::A2DE" as C_0001410789567057647859
|
||||
class C_0001410789567057647859 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+A2DE(bool predict_voting = false) : void
|
||||
+~A2DE() : void
|
||||
..
|
||||
#buildModel(const torch::Tensor & weights) : void
|
||||
+graph(const std::string & title = "A2DE") const : std::vector<std::string>
|
||||
+setHyperparameters(const nlohmann::json & hyperparameters) : void
|
||||
__
|
||||
}
|
||||
class "bayesnet::AODE" as C_0006288892608974306258
|
||||
class C_0006288892608974306258 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+AODE(bool predict_voting = false) : void
|
||||
+~AODE() : void
|
||||
..
|
||||
#buildModel(const torch::Tensor & weights) : void
|
||||
+graph(const std::string & title = "AODE") const : std::vector<std::string>
|
||||
+setHyperparameters(const nlohmann::json & hyperparameters) : void
|
||||
__
|
||||
}
|
||||
class "bayesnet::AODELd" as C_0003898187834670349177
|
||||
class C_0003898187834670349177 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+AODELd(bool predict_voting = true) : void
|
||||
+~AODELd() = default : void
|
||||
..
|
||||
#buildModel(const torch::Tensor & weights) : void
|
||||
+fit(torch::Tensor & X_, torch::Tensor & y_, const std::vector<std::string> & features_, const std::string & className_, std::map<std::string,std::vector<int>> & states_, const Smoothing_t smoothing) : AODELd &
|
||||
+graph(const std::string & name = "AODELd") const : std::vector<std::string>
|
||||
#trainModel(const torch::Tensor & weights, const Smoothing_t smoothing) : void
|
||||
__
|
||||
}
|
||||
abstract "bayesnet::FeatureSelect" as C_0013562609546004646591
|
||||
abstract C_0013562609546004646591 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+FeatureSelect(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int maxFeatures, const int classNumStates, const torch::Tensor & weights) : void
|
||||
+~FeatureSelect() : void
|
||||
..
|
||||
#computeMeritCFS() : double
|
||||
#computeSuFeatures(const int a, const int b) : double
|
||||
#computeSuLabels() : void
|
||||
{abstract} +fit() = 0 : void
|
||||
+getFeatures() const : std::vector<int>
|
||||
+getScores() const : std::vector<double>
|
||||
#initialize() : void
|
||||
#symmetricalUncertainty(int a, int b) : double
|
||||
__
|
||||
#fitted : bool
|
||||
#maxFeatures : int
|
||||
#selectedFeatures : std::vector<int>
|
||||
#selectedScores : std::vector<double>
|
||||
#suFeatures : std::map<std::pair<int,int>,double>
|
||||
#suLabels : std::vector<double>
|
||||
#weights : const torch::Tensor &
|
||||
}
|
||||
class "bayesnet::(anonymous_60357672)" as C_0006397015156479549697
|
||||
class C_0006397015156479549697 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
__
|
||||
+CFS : std::string
|
||||
+FCBF : std::string
|
||||
+IWSS : std::string
|
||||
}
|
||||
class "bayesnet::(anonymous_45090163)" as C_0000602764946063116717
|
||||
class C_0000602764946063116717 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
class "bayesnet::(anonymous_60358326)" as C_0013066254331852347304
|
||||
class C_0013066254331852347304 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
__
|
||||
+ASC : std::string
|
||||
+DESC : std::string
|
||||
+RAND : std::string
|
||||
}
|
||||
class "bayesnet::BoostAODE" as C_0000358471592399852382
|
||||
class C_0000358471592399852382 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
class "bayesnet::Boost" as C_0009819322948617116148
|
||||
class C_0009819322948617116148 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+Boost(bool predict_voting = false) : void
|
||||
+~Boost() = default : void
|
||||
..
|
||||
#add_model(std::unique_ptr<Classifier> model, double significance) : void
|
||||
#buildModel(const torch::Tensor & weights) : void
|
||||
#featureSelection(torch::Tensor & weights_) : std::vector<int>
|
||||
#remove_last_model() : void
|
||||
+setHyperparameters(const nlohmann::json & hyperparameters_) : void
|
||||
#update_weights(torch::Tensor & ytrain, torch::Tensor & ypred, torch::Tensor & weights) : std::tuple<torch::Tensor &,double,bool>
|
||||
#update_weights_block(int k, torch::Tensor & ytrain, torch::Tensor & weights) : std::tuple<torch::Tensor &,double,bool>
|
||||
__
|
||||
#X_test : torch::Tensor
|
||||
#X_train : torch::Tensor
|
||||
#alpha_block : bool
|
||||
#bisection : bool
|
||||
#block_update : bool
|
||||
#convergence : bool
|
||||
#convergence_best : bool
|
||||
#featureSelector : FeatureSelect *
|
||||
#maxTolerance : int
|
||||
#order_algorithm : std::string
|
||||
#selectFeatures : bool
|
||||
#select_features_algorithm : std::string
|
||||
#threshold : double
|
||||
#y_test : torch::Tensor
|
||||
#y_train : torch::Tensor
|
||||
}
|
||||
class "bayesnet::BoostA2DE" as C_0000272055465257861326
|
||||
class C_0000272055465257861326 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+BoostA2DE(bool predict_voting = false) : void
|
||||
+~BoostA2DE() = default : void
|
||||
..
|
||||
+graph(const std::string & title = "BoostA2DE") const : std::vector<std::string>
|
||||
#trainModel(const torch::Tensor & weights, const Smoothing_t smoothing) : void
|
||||
__
|
||||
}
|
||||
class "bayesnet::(anonymous_60425028)" as C_0000461144706913711531
|
||||
class C_0000461144706913711531 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
__
|
||||
+CFS : std::string
|
||||
+FCBF : std::string
|
||||
+IWSS : std::string
|
||||
}
|
||||
class "bayesnet::(anonymous_60425682)" as C_0014849589915262463453
|
||||
class C_0014849589915262463453 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
__
|
||||
+ASC : std::string
|
||||
+DESC : std::string
|
||||
+RAND : std::string
|
||||
}
|
||||
class "bayesnet::BoostAODE" as C_0002867772739198819061
|
||||
class C_0002867772739198819061 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+BoostAODE(bool predict_voting = false) : void
|
||||
+~BoostAODE() = default : void
|
||||
..
|
||||
#buildModel(const torch::Tensor & weights) : void
|
||||
+graph(const std::string & title = "BoostAODE") const : std::vector<std::string>
|
||||
+setHyperparameters(const nlohmann::json & hyperparameters_) : void
|
||||
#trainModel(const torch::Tensor & weights) : void
|
||||
#trainModel(const torch::Tensor & weights, const Smoothing_t smoothing) : void
|
||||
__
|
||||
}
|
||||
class "bayesnet::MST" as C_0000131858426172291700
|
||||
class C_0000131858426172291700 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
class "bayesnet::XBA2DE" as C_0008480973840710001141
|
||||
class C_0008480973840710001141 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+XBA2DE(bool predict_voting = false) : void
|
||||
+~XBA2DE() = default : void
|
||||
..
|
||||
+getVersion() : std::string
|
||||
+graph(const std::string & title = "XBA2DE") const : std::vector<std::string>
|
||||
#trainModel(const torch::Tensor & weights, const Smoothing_t smoothing) : void
|
||||
__
|
||||
}
|
||||
class "bayesnet::(anonymous_60414016)" as C_0008746994658440620779
|
||||
class C_0008746994658440620779 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
__
|
||||
+CFS : std::string
|
||||
+FCBF : std::string
|
||||
+IWSS : std::string
|
||||
}
|
||||
class "bayesnet::(anonymous_60414670)" as C_0008030559132212449356
|
||||
class C_0008030559132212449356 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
__
|
||||
+ASC : std::string
|
||||
+DESC : std::string
|
||||
+RAND : std::string
|
||||
}
|
||||
class "bayesnet::XBAODE" as C_0005198482342493966768
|
||||
class C_0005198482342493966768 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+XBAODE() : void
|
||||
..
|
||||
+getVersion() : std::string
|
||||
#trainModel(const torch::Tensor & weights, const bayesnet::Smoothing_t smoothing) : void
|
||||
__
|
||||
}
|
||||
class "bayesnet::CFS" as C_0000093018845530739957
|
||||
class C_0000093018845530739957 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+CFS(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int maxFeatures, const int classNumStates, const torch::Tensor & weights) : void
|
||||
+~CFS() : void
|
||||
..
|
||||
+fit() : void
|
||||
__
|
||||
}
|
||||
class "bayesnet::FCBF" as C_0001157456122733975432
|
||||
class C_0001157456122733975432 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+FCBF(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int maxFeatures, const int classNumStates, const torch::Tensor & weights, const double threshold) : void
|
||||
+~FCBF() : void
|
||||
..
|
||||
+fit() : void
|
||||
__
|
||||
}
|
||||
class "bayesnet::IWSS" as C_0000066148117395428429
|
||||
class C_0000066148117395428429 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+IWSS(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int maxFeatures, const int classNumStates, const torch::Tensor & weights, const double threshold) : void
|
||||
+~IWSS() : void
|
||||
..
|
||||
+fit() : void
|
||||
__
|
||||
}
|
||||
class "bayesnet::(anonymous_60810808)" as C_0012002108046995621535
|
||||
class C_0012002108046995621535 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
__
|
||||
+CFS : std::string
|
||||
+FCBF : std::string
|
||||
+IWSS : std::string
|
||||
}
|
||||
class "bayesnet::(anonymous_60811462)" as C_0004735044229422764240
|
||||
class C_0004735044229422764240 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
__
|
||||
+ASC : std::string
|
||||
+DESC : std::string
|
||||
+RAND : std::string
|
||||
}
|
||||
class "bayesnet::(anonymous_60804220)" as C_0007082100550474633839
|
||||
class C_0007082100550474633839 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
__
|
||||
+CFS : std::string
|
||||
+FCBF : std::string
|
||||
+IWSS : std::string
|
||||
}
|
||||
class "bayesnet::(anonymous_60804874)" as C_0003669430095936529648
|
||||
class C_0003669430095936529648 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
__
|
||||
+ASC : std::string
|
||||
+DESC : std::string
|
||||
+RAND : std::string
|
||||
}
|
||||
class "bayesnet::(anonymous_60809706)" as C_0012336951062058157227
|
||||
class C_0012336951062058157227 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
__
|
||||
+CFS : std::string
|
||||
+FCBF : std::string
|
||||
+IWSS : std::string
|
||||
}
|
||||
class "bayesnet::(anonymous_60810360)" as C_0002435892998884329673
|
||||
class C_0002435892998884329673 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
__
|
||||
+ASC : std::string
|
||||
+DESC : std::string
|
||||
+RAND : std::string
|
||||
}
|
||||
class "bayesnet::MST" as C_0001054867409378333602
|
||||
class C_0001054867409378333602 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+MST() = default : void
|
||||
+MST(const std::vector<std::string> & features, const torch::Tensor & weights, const int root) : void
|
||||
..
|
||||
+insertElement(std::list<int> & variables, int variable) : void
|
||||
+maximumSpanningTree() : std::vector<std::pair<int,int>>
|
||||
+reorder(std::vector<std::pair<float,std::pair<int,int>>> T, int root_original) : std::vector<std::pair<int,int>>
|
||||
__
|
||||
}
|
||||
class "bayesnet::Graph" as C_0001197041682001898467
|
||||
class C_0001197041682001898467 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
class "bayesnet::Graph" as C_0009576333456015187741
|
||||
class C_0009576333456015187741 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+Graph(int V) : void
|
||||
..
|
||||
+addEdge(int u, int v, float wt) : void
|
||||
@@ -332,81 +591,86 @@ class C_0001197041682001898467 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+union_set(int u, int v) : void
|
||||
__
|
||||
}
|
||||
class "bayesnet::KDBLd" as C_0000344502277874806837
|
||||
class C_0000344502277874806837 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+KDBLd(int k) : void
|
||||
+~KDBLd() = default : void
|
||||
..
|
||||
+fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) : KDBLd &
|
||||
+graph(const std::string & name = "KDB") const : std::vector<std::string>
|
||||
+predict(torch::Tensor & X) : torch::Tensor
|
||||
{static} +version() : std::string
|
||||
__
|
||||
}
|
||||
class "bayesnet::AODE" as C_0000786111576121788282
|
||||
class C_0000786111576121788282 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+AODE(bool predict_voting = false) : void
|
||||
+~AODE() : void
|
||||
..
|
||||
#buildModel(const torch::Tensor & weights) : void
|
||||
+graph(const std::string & title = "AODE") const : std::vector<std::string>
|
||||
+setHyperparameters(const nlohmann::json & hyperparameters) : void
|
||||
__
|
||||
}
|
||||
class "bayesnet::SPODELd" as C_0001369655639257755354
|
||||
class C_0001369655639257755354 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+SPODELd(int root) : void
|
||||
+~SPODELd() = default : void
|
||||
..
|
||||
+commonFit(const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) : SPODELd &
|
||||
+fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) : SPODELd &
|
||||
+fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) : SPODELd &
|
||||
+graph(const std::string & name = "SPODE") const : std::vector<std::string>
|
||||
+predict(torch::Tensor & X) : torch::Tensor
|
||||
{static} +version() : std::string
|
||||
__
|
||||
}
|
||||
class "bayesnet::AODELd" as C_0000487273479333793647
|
||||
class C_0000487273479333793647 #aliceblue;line:blue;line.dotted;text:blue {
|
||||
+AODELd(bool predict_voting = true) : void
|
||||
+~AODELd() = default : void
|
||||
..
|
||||
#buildModel(const torch::Tensor & weights) : void
|
||||
+fit(torch::Tensor & X_, torch::Tensor & y_, const std::vector<std::string> & features_, const std::string & className_, std::map<std::string,std::vector<int>> & states_) : AODELd &
|
||||
+graph(const std::string & name = "AODELd") const : std::vector<std::string>
|
||||
#trainModel(const torch::Tensor & weights) : void
|
||||
__
|
||||
}
|
||||
C_0001303524929067080934 --> C_0001303524929067080934 : -parents
|
||||
C_0001303524929067080934 --> C_0001303524929067080934 : -children
|
||||
C_0001186707649890429575 o-- C_0001303524929067080934 : -nodes
|
||||
C_0000327135989451974539 ..> C_0000738420730783851375
|
||||
C_0002043996622900301644 o-- C_0001186707649890429575 : #model
|
||||
C_0002043996622900301644 o-- C_0000736965376885623323 : #metrics
|
||||
C_0002043996622900301644 o-- C_0000738420730783851375 : #status
|
||||
C_0000327135989451974539 <|-- C_0002043996622900301644
|
||||
C_0002043996622900301644 <|-- C_0001112865019015250005
|
||||
C_0002043996622900301644 <|-- C_0001760994424884323017
|
||||
C_0002219995589162262979 ..> C_0001186707649890429575
|
||||
C_0001760994424884323017 <|-- C_0001668829096702037834
|
||||
C_0002219995589162262979 <|-- C_0001668829096702037834
|
||||
C_0000736965376885623323 <|-- C_0001695326193250580823
|
||||
C_0001695326193250580823 <|-- C_0000011627355691342494
|
||||
C_0001695326193250580823 <|-- C_0000144682015341746929
|
||||
C_0001695326193250580823 <|-- C_0000008268514674428553
|
||||
C_0002043996622900301644 <|-- C_0000512022813807538451
|
||||
C_0001985241386355360576 o-- C_0002043996622900301644 : #models
|
||||
C_0002043996622900301644 <|-- C_0001985241386355360576
|
||||
C_0000358471592399852382 --> C_0001695326193250580823 : -featureSelector
|
||||
C_0001985241386355360576 <|-- C_0000358471592399852382
|
||||
C_0001112865019015250005 <|-- C_0000344502277874806837
|
||||
C_0002219995589162262979 <|-- C_0000344502277874806837
|
||||
C_0001985241386355360576 <|-- C_0000786111576121788282
|
||||
C_0000512022813807538451 <|-- C_0001369655639257755354
|
||||
C_0002219995589162262979 <|-- C_0001369655639257755354
|
||||
C_0001985241386355360576 <|-- C_0000487273479333793647
|
||||
C_0002219995589162262979 <|-- C_0000487273479333793647
|
||||
C_0010428199432536647474 --> C_0010428199432536647474 : -parents
|
||||
C_0010428199432536647474 --> C_0010428199432536647474 : -children
|
||||
C_0009493661199123436603 ..> C_0013393078277439680282
|
||||
C_0009493661199123436603 o-- C_0010428199432536647474 : -nodes
|
||||
C_0002617087915615796317 ..> C_0013393078277439680282
|
||||
C_0002617087915615796317 o-- C_0005907365846270811004 : #status
|
||||
C_0016351972983202413152 ..> C_0013393078277439680282
|
||||
C_0016351972983202413152 ..> C_0005907365846270811004
|
||||
C_0016351972983202413152 o-- C_0009493661199123436603 : #model
|
||||
C_0016351972983202413152 o-- C_0005895723015084986588 : #metrics
|
||||
C_0002617087915615796317 <|-- C_0016351972983202413152
|
||||
|
||||
'Generated with clang-uml, version 0.5.1
|
||||
'LLVM version clang version 17.0.6 (Fedora 17.0.6-2.fc39)
|
||||
C_0017759964713298103839 ..> C_0009493661199123436603
|
||||
C_0016351972983202413152 <|-- C_0008902920152122000044
|
||||
|
||||
C_0002756018222998454702 ..> C_0013393078277439680282
|
||||
C_0008902920152122000044 <|-- C_0002756018222998454702
|
||||
|
||||
C_0017759964713298103839 <|-- C_0002756018222998454702
|
||||
|
||||
C_0016351972983202413152 <|-- C_0004096182510460307610
|
||||
|
||||
C_0010957245114062042836 ..> C_0013393078277439680282
|
||||
C_0004096182510460307610 <|-- C_0010957245114062042836
|
||||
|
||||
C_0017759964713298103839 <|-- C_0010957245114062042836
|
||||
|
||||
C_0016351972983202413152 <|-- C_0016268916386101512883
|
||||
|
||||
C_0016351972983202413152 <|-- C_0014087955399074584137
|
||||
|
||||
C_0013350632773616302678 ..> C_0013393078277439680282
|
||||
C_0014087955399074584137 <|-- C_0013350632773616302678
|
||||
|
||||
C_0017759964713298103839 <|-- C_0013350632773616302678
|
||||
|
||||
C_0007640742442325463418 ..> C_0013393078277439680282
|
||||
C_0016351972983202413152 <|-- C_0007640742442325463418
|
||||
|
||||
C_0015654113248178830206 ..> C_0013393078277439680282
|
||||
C_0016351972983202413152 <|-- C_0015654113248178830206
|
||||
|
||||
C_0015881931090842884611 ..> C_0013393078277439680282
|
||||
C_0015881931090842884611 o-- C_0016351972983202413152 : #models
|
||||
C_0016351972983202413152 <|-- C_0015881931090842884611
|
||||
|
||||
C_0015881931090842884611 <|-- C_0001410789567057647859
|
||||
|
||||
C_0015881931090842884611 <|-- C_0006288892608974306258
|
||||
|
||||
C_0003898187834670349177 ..> C_0013393078277439680282
|
||||
C_0015881931090842884611 <|-- C_0003898187834670349177
|
||||
|
||||
C_0017759964713298103839 <|-- C_0003898187834670349177
|
||||
|
||||
C_0005895723015084986588 <|-- C_0013562609546004646591
|
||||
|
||||
C_0009819322948617116148 ..> C_0016351972983202413152
|
||||
C_0009819322948617116148 --> C_0013562609546004646591 : #featureSelector
|
||||
C_0015881931090842884611 <|-- C_0009819322948617116148
|
||||
|
||||
C_0000272055465257861326 ..> C_0013393078277439680282
|
||||
C_0009819322948617116148 <|-- C_0000272055465257861326
|
||||
|
||||
C_0002867772739198819061 ..> C_0013393078277439680282
|
||||
C_0009819322948617116148 <|-- C_0002867772739198819061
|
||||
|
||||
C_0008480973840710001141 ..> C_0013393078277439680282
|
||||
C_0009819322948617116148 <|-- C_0008480973840710001141
|
||||
|
||||
C_0005198482342493966768 ..> C_0013393078277439680282
|
||||
C_0009819322948617116148 <|-- C_0005198482342493966768
|
||||
|
||||
C_0013562609546004646591 <|-- C_0000093018845530739957
|
||||
|
||||
C_0013562609546004646591 <|-- C_0001157456122733975432
|
||||
|
||||
C_0013562609546004646591 <|-- C_0000066148117395428429
|
||||
|
||||
|
||||
'Generated with clang-uml, version 0.5.5
|
||||
'LLVM version clang version 18.1.8 (Fedora 18.1.8-5.fc41)
|
||||
@enduml
|
||||
|
File diff suppressed because one or more lines are too long
Before Width: | Height: | Size: 139 KiB After Width: | Height: | Size: 229 KiB |
@@ -1,128 +1,314 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
|
||||
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<!-- Generated by graphviz version 8.1.0 (20230707.0739)
|
||||
<!-- Generated by graphviz version 12.1.0 (20240811.2233)
|
||||
-->
|
||||
<!-- Title: BayesNet Pages: 1 -->
|
||||
<svg width="1632pt" height="288pt"
|
||||
viewBox="0.00 0.00 1631.95 287.80" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 283.8)">
|
||||
<svg width="3725pt" height="432pt"
|
||||
viewBox="0.00 0.00 3724.84 431.80" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 427.8)">
|
||||
<title>BayesNet</title>
|
||||
<polygon fill="white" stroke="none" points="-4,4 -4,-283.8 1627.95,-283.8 1627.95,4 -4,4"/>
|
||||
<!-- node1 -->
|
||||
<polygon fill="white" stroke="none" points="-4,4 -4,-427.8 3720.84,-427.8 3720.84,4 -4,4"/>
|
||||
<!-- node0 -->
|
||||
<g id="node1" class="node">
|
||||
<title>node0</title>
|
||||
<polygon fill="none" stroke="black" points="1655.43,-398.35 1655.43,-413.26 1625.69,-423.8 1583.63,-423.8 1553.89,-413.26 1553.89,-398.35 1583.63,-387.8 1625.69,-387.8 1655.43,-398.35"/>
|
||||
<text text-anchor="middle" x="1604.66" y="-401.53" font-family="Times,serif" font-size="12.00">BayesNet</text>
|
||||
</g>
|
||||
<!-- node1 -->
|
||||
<g id="node2" class="node">
|
||||
<title>node1</title>
|
||||
<polygon fill="none" stroke="black" points="826.43,-254.35 826.43,-269.26 796.69,-279.8 754.63,-279.8 724.89,-269.26 724.89,-254.35 754.63,-243.8 796.69,-243.8 826.43,-254.35"/>
|
||||
<text text-anchor="middle" x="775.66" y="-257.53" font-family="Times,serif" font-size="12.00">BayesNet</text>
|
||||
<polygon fill="none" stroke="black" points="413.32,-257.8 372.39,-273.03 206.66,-279.8 40.93,-273.03 0,-257.8 114.69,-245.59 298.64,-245.59 413.32,-257.8"/>
|
||||
<text text-anchor="middle" x="206.66" y="-257.53" font-family="Times,serif" font-size="12.00">/home/rmontanana/Code/libtorch/lib/libc10.so</text>
|
||||
</g>
|
||||
<!-- node0->node1 -->
|
||||
<g id="edge1" class="edge">
|
||||
<title>node0->node1</title>
|
||||
<path fill="none" stroke="black" d="M1553.59,-400.53C1451.65,-391.91 1215.69,-371.61 1017.66,-351.8 773.36,-327.37 488.07,-295.22 329.31,-277.01"/>
|
||||
<polygon fill="black" stroke="black" points="329.93,-273.56 319.6,-275.89 329.14,-280.51 329.93,-273.56"/>
|
||||
</g>
|
||||
<!-- node2 -->
|
||||
<g id="node2" class="node">
|
||||
<g id="node3" class="node">
|
||||
<title>node2</title>
|
||||
<polygon fill="none" stroke="black" points="413.32,-185.8 372.39,-201.03 206.66,-207.8 40.93,-201.03 0,-185.8 114.69,-173.59 298.64,-173.59 413.32,-185.8"/>
|
||||
<text text-anchor="middle" x="206.66" y="-185.53" font-family="Times,serif" font-size="12.00">/home/rmontanana/Code/libtorch/lib/libc10.so</text>
|
||||
<polygon fill="none" stroke="black" points="894.21,-257.8 848.35,-273.03 662.66,-279.8 476.98,-273.03 431.12,-257.8 559.61,-245.59 765.71,-245.59 894.21,-257.8"/>
|
||||
<text text-anchor="middle" x="662.66" y="-257.53" font-family="Times,serif" font-size="12.00">/home/rmontanana/Code/libtorch/lib/libc10_cuda.so</text>
|
||||
</g>
|
||||
<!-- node1->node2 -->
|
||||
<g id="edge1" class="edge">
|
||||
<title>node1->node2</title>
|
||||
<path fill="none" stroke="black" d="M724.41,-254.5C634.7,-243.46 447.04,-220.38 324.01,-205.24"/>
|
||||
<polygon fill="black" stroke="black" points="324.77,-201.69 314.42,-203.94 323.92,-208.63 324.77,-201.69"/>
|
||||
<!-- node0->node2 -->
|
||||
<g id="edge2" class="edge">
|
||||
<title>node0->node2</title>
|
||||
<path fill="none" stroke="black" d="M1555.34,-397.37C1408.12,-375.18 969.52,-309.06 767.13,-278.55"/>
|
||||
<polygon fill="black" stroke="black" points="767.81,-275.12 757.4,-277.09 766.77,-282.04 767.81,-275.12"/>
|
||||
</g>
|
||||
<!-- node3 -->
|
||||
<g id="node3" class="node">
|
||||
<g id="node4" class="node">
|
||||
<title>node3</title>
|
||||
<polygon fill="none" stroke="black" points="857.68,-185.8 815.49,-201.03 644.66,-207.8 473.84,-201.03 431.65,-185.8 549.86,-173.59 739.46,-173.59 857.68,-185.8"/>
|
||||
<text text-anchor="middle" x="644.66" y="-185.53" font-family="Times,serif" font-size="12.00">/home/rmontanana/Code/libtorch/lib/libkineto.a</text>
|
||||
<polygon fill="none" stroke="black" points="1338.68,-257.8 1296.49,-273.03 1125.66,-279.8 954.84,-273.03 912.65,-257.8 1030.86,-245.59 1220.46,-245.59 1338.68,-257.8"/>
|
||||
<text text-anchor="middle" x="1125.66" y="-257.53" font-family="Times,serif" font-size="12.00">/home/rmontanana/Code/libtorch/lib/libkineto.a</text>
|
||||
</g>
|
||||
<!-- node1->node3 -->
|
||||
<g id="edge2" class="edge">
|
||||
<title>node1->node3</title>
|
||||
<path fill="none" stroke="black" d="M747.56,-245.79C729.21,-235.98 704.97,-223.03 684.63,-212.16"/>
|
||||
<polygon fill="black" stroke="black" points="686.47,-208.64 676,-207.02 683.17,-214.82 686.47,-208.64"/>
|
||||
<!-- node0->node3 -->
|
||||
<g id="edge3" class="edge">
|
||||
<title>node0->node3</title>
|
||||
<path fill="none" stroke="black" d="M1566.68,-393.54C1484.46,-369.17 1289.3,-311.32 1188.44,-281.41"/>
|
||||
<polygon fill="black" stroke="black" points="1189.53,-278.09 1178.95,-278.6 1187.54,-284.8 1189.53,-278.09"/>
|
||||
</g>
|
||||
<!-- node4 -->
|
||||
<g id="node4" class="node">
|
||||
<title>node4</title>
|
||||
<polygon fill="none" stroke="black" points="939.33,-182.35 939.33,-197.26 920.78,-207.8 894.54,-207.8 875.99,-197.26 875.99,-182.35 894.54,-171.8 920.78,-171.8 939.33,-182.35"/>
|
||||
<text text-anchor="middle" x="907.66" y="-185.53" font-family="Times,serif" font-size="12.00">mdlp</text>
|
||||
</g>
|
||||
<!-- node1->node4 -->
|
||||
<g id="edge3" class="edge">
|
||||
<title>node1->node4</title>
|
||||
<path fill="none" stroke="black" d="M803.66,-245.96C824.66,-234.82 853.45,-219.56 875.41,-207.91"/>
|
||||
<polygon fill="black" stroke="black" points="876.78,-210.61 883.97,-202.84 873.5,-204.43 876.78,-210.61"/>
|
||||
</g>
|
||||
<!-- node9 -->
|
||||
<g id="node5" class="node">
|
||||
<title>node9</title>
|
||||
<polygon fill="none" stroke="black" points="1107.74,-195.37 1032.66,-207.8 957.58,-195.37 986.26,-175.24 1079.06,-175.24 1107.74,-195.37"/>
|
||||
<text text-anchor="middle" x="1032.66" y="-185.53" font-family="Times,serif" font-size="12.00">torch_library</text>
|
||||
<title>node4</title>
|
||||
<polygon fill="none" stroke="black" points="1552.26,-257.8 1532.93,-273.03 1454.66,-279.8 1376.4,-273.03 1357.07,-257.8 1411.23,-245.59 1498.1,-245.59 1552.26,-257.8"/>
|
||||
<text text-anchor="middle" x="1454.66" y="-257.53" font-family="Times,serif" font-size="12.00">/usr/lib64/libcuda.so</text>
|
||||
</g>
|
||||
<!-- node1->node9 -->
|
||||
<!-- node0->node4 -->
|
||||
<g id="edge4" class="edge">
|
||||
<title>node1->node9</title>
|
||||
<path fill="none" stroke="black" d="M815.25,-250.02C860.25,-237.77 933.77,-217.74 982.68,-204.42"/>
|
||||
<polygon fill="black" stroke="black" points="983.3,-207.61 992.02,-201.6 981.46,-200.85 983.3,-207.61"/>
|
||||
</g>
|
||||
<!-- node10 -->
|
||||
<g id="node6" class="node">
|
||||
<title>node10</title>
|
||||
<polygon fill="none" stroke="black" points="1159.81,-113.8 1086.89,-129.03 791.66,-135.8 496.43,-129.03 423.52,-113.8 627.82,-101.59 955.5,-101.59 1159.81,-113.8"/>
|
||||
<text text-anchor="middle" x="791.66" y="-113.53" font-family="Times,serif" font-size="12.00">-Wl,--no-as-needed,"/home/rmontanana/Code/libtorch/lib/libtorch.so" -Wl,--as-needed</text>
|
||||
</g>
|
||||
<!-- node9->node10 -->
|
||||
<g id="edge5" class="edge">
|
||||
<title>node9->node10</title>
|
||||
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M985.62,-175.14C949.2,-164.56 898.31,-149.78 857.79,-138.01"/>
|
||||
<polygon fill="black" stroke="black" points="859.04,-134.44 848.46,-135.01 857.09,-141.16 859.04,-134.44"/>
|
||||
<title>node0->node4</title>
|
||||
<path fill="none" stroke="black" d="M1586.27,-387.39C1559.5,-362.05 1509.72,-314.92 1479.65,-286.46"/>
|
||||
<polygon fill="black" stroke="black" points="1482.13,-283.99 1472.46,-279.65 1477.31,-289.07 1482.13,-283.99"/>
|
||||
</g>
|
||||
<!-- node5 -->
|
||||
<g id="node7" class="node">
|
||||
<g id="node6" class="node">
|
||||
<title>node5</title>
|
||||
<polygon fill="none" stroke="black" points="1371.56,-123.37 1274.66,-135.8 1177.77,-123.37 1214.78,-103.24 1334.55,-103.24 1371.56,-123.37"/>
|
||||
<text text-anchor="middle" x="1274.66" y="-113.53" font-family="Times,serif" font-size="12.00">torch_cpu_library</text>
|
||||
<polygon fill="none" stroke="black" points="1873.26,-257.8 1843.23,-273.03 1721.66,-279.8 1600.09,-273.03 1570.06,-257.8 1654.19,-245.59 1789.13,-245.59 1873.26,-257.8"/>
|
||||
<text text-anchor="middle" x="1721.66" y="-257.53" font-family="Times,serif" font-size="12.00">/usr/local/cuda/lib64/libcudart.so</text>
|
||||
</g>
|
||||
<!-- node9->node5 -->
|
||||
<g id="edge6" class="edge">
|
||||
<title>node9->node5</title>
|
||||
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M1079.61,-175.22C1120.66,-163.35 1180.2,-146.13 1222.68,-133.84"/>
|
||||
<polygon fill="black" stroke="black" points="1223.46,-136.97 1232.09,-130.83 1221.51,-130.24 1223.46,-136.97"/>
|
||||
<!-- node0->node5 -->
|
||||
<g id="edge5" class="edge">
|
||||
<title>node0->node5</title>
|
||||
<path fill="none" stroke="black" d="M1619.76,-387.77C1628.83,-377.46 1640.53,-363.98 1650.66,-351.8 1668.32,-330.59 1687.84,-306.03 1701.94,-288.1"/>
|
||||
<polygon fill="black" stroke="black" points="1704.43,-290.59 1707.84,-280.56 1698.92,-286.27 1704.43,-290.59"/>
|
||||
</g>
|
||||
<!-- node6 -->
|
||||
<g id="node8" class="node">
|
||||
<g id="node7" class="node">
|
||||
<title>node6</title>
|
||||
<polygon fill="none" stroke="black" points="1191.4,-27.9 1114.6,-43.12 803.66,-49.9 492.72,-43.12 415.93,-27.9 631.1,-15.68 976.22,-15.68 1191.4,-27.9"/>
|
||||
<text text-anchor="middle" x="803.66" y="-27.63" font-family="Times,serif" font-size="12.00">-Wl,--no-as-needed,"/home/rmontanana/Code/libtorch/lib/libtorch_cpu.so" -Wl,--as-needed</text>
|
||||
<polygon fill="none" stroke="black" points="2231.79,-257.8 2198.1,-273.03 2061.66,-279.8 1925.23,-273.03 1891.53,-257.8 1985.95,-245.59 2137.38,-245.59 2231.79,-257.8"/>
|
||||
<text text-anchor="middle" x="2061.66" y="-257.53" font-family="Times,serif" font-size="12.00">/usr/local/cuda/lib64/libnvToolsExt.so</text>
|
||||
</g>
|
||||
<!-- node5->node6 -->
|
||||
<g id="edge7" class="edge">
|
||||
<title>node5->node6</title>
|
||||
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M1210.16,-105.31C1130.55,-91.13 994.37,-66.87 901.77,-50.38"/>
|
||||
<polygon fill="black" stroke="black" points="902.44,-46.77 891.98,-48.46 901.22,-53.66 902.44,-46.77"/>
|
||||
<!-- node0->node6 -->
|
||||
<g id="edge6" class="edge">
|
||||
<title>node0->node6</title>
|
||||
<path fill="none" stroke="black" d="M1642.06,-393.18C1721.31,-368.56 1906.71,-310.95 2002.32,-281.24"/>
|
||||
<polygon fill="black" stroke="black" points="2003.28,-284.61 2011.79,-278.3 2001.21,-277.92 2003.28,-284.61"/>
|
||||
</g>
|
||||
<!-- node7 -->
|
||||
<g id="node9" class="node">
|
||||
<g id="node8" class="node">
|
||||
<title>node7</title>
|
||||
<polygon fill="none" stroke="black" points="1339.72,-37.46 1274.66,-49.9 1209.61,-37.46 1234.46,-17.34 1314.87,-17.34 1339.72,-37.46"/>
|
||||
<text text-anchor="middle" x="1274.66" y="-27.63" font-family="Times,serif" font-size="12.00">caffe2::mkl</text>
|
||||
<polygon fill="none" stroke="black" points="2541.44,-257.8 2512.56,-273.03 2395.66,-279.8 2278.76,-273.03 2249.89,-257.8 2330.79,-245.59 2460.54,-245.59 2541.44,-257.8"/>
|
||||
<text text-anchor="middle" x="2395.66" y="-257.53" font-family="Times,serif" font-size="12.00">/usr/local/cuda/lib64/libnvrtc.so</text>
|
||||
</g>
|
||||
<!-- node5->node7 -->
|
||||
<g id="edge8" class="edge">
|
||||
<title>node5->node7</title>
|
||||
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M1274.66,-102.95C1274.66,-91.56 1274.66,-75.07 1274.66,-60.95"/>
|
||||
<polygon fill="black" stroke="black" points="1278.16,-61.27 1274.66,-51.27 1271.16,-61.27 1278.16,-61.27"/>
|
||||
<!-- node0->node7 -->
|
||||
<g id="edge7" class="edge">
|
||||
<title>node0->node7</title>
|
||||
<path fill="none" stroke="black" d="M1651.19,-396.45C1780.36,-373.26 2144.76,-307.85 2311.05,-277.99"/>
|
||||
<polygon fill="black" stroke="black" points="2311.47,-281.47 2320.7,-276.26 2310.24,-274.58 2311.47,-281.47"/>
|
||||
</g>
|
||||
<!-- node8 -->
|
||||
<g id="node10" class="node">
|
||||
<g id="node9" class="node">
|
||||
<title>node8</title>
|
||||
<polygon fill="none" stroke="black" points="1623.95,-41.76 1490.66,-63.8 1357.37,-41.76 1408.28,-6.09 1573.04,-6.09 1623.95,-41.76"/>
|
||||
<text text-anchor="middle" x="1490.66" y="-34.75" font-family="Times,serif" font-size="12.00">dummy</text>
|
||||
<text text-anchor="middle" x="1490.66" y="-20.5" font-family="Times,serif" font-size="12.00">(protobuf::libprotobuf)</text>
|
||||
<polygon fill="none" stroke="black" points="1642.01,-326.35 1642.01,-341.26 1620.13,-351.8 1589.19,-351.8 1567.31,-341.26 1567.31,-326.35 1589.19,-315.8 1620.13,-315.8 1642.01,-326.35"/>
|
||||
<text text-anchor="middle" x="1604.66" y="-329.53" font-family="Times,serif" font-size="12.00">fimdlp</text>
|
||||
</g>
|
||||
<!-- node5->node8 -->
|
||||
<!-- node0->node8 -->
|
||||
<g id="edge8" class="edge">
|
||||
<title>node0->node8</title>
|
||||
<path fill="none" stroke="black" d="M1604.66,-387.5C1604.66,-380.21 1604.66,-371.53 1604.66,-363.34"/>
|
||||
<polygon fill="black" stroke="black" points="1608.16,-363.42 1604.66,-353.42 1601.16,-363.42 1608.16,-363.42"/>
|
||||
</g>
|
||||
<!-- node19 -->
|
||||
<g id="node10" class="node">
|
||||
<title>node19</title>
|
||||
<polygon fill="none" stroke="black" points="2709.74,-267.37 2634.66,-279.8 2559.58,-267.37 2588.26,-247.24 2681.06,-247.24 2709.74,-267.37"/>
|
||||
<text text-anchor="middle" x="2634.66" y="-257.53" font-family="Times,serif" font-size="12.00">torch_library</text>
|
||||
</g>
|
||||
<!-- node0->node19 -->
|
||||
<g id="edge29" class="edge">
|
||||
<title>node0->node19</title>
|
||||
<path fill="none" stroke="black" d="M1655.87,-399.32C1798.23,-383.79 2210.64,-336.94 2550.66,-279.8 2559.43,-278.33 2568.68,-276.62 2577.72,-274.86"/>
|
||||
<polygon fill="black" stroke="black" points="2578.38,-278.3 2587.5,-272.92 2577.01,-271.43 2578.38,-278.3"/>
|
||||
</g>
|
||||
<!-- node8->node1 -->
|
||||
<g id="edge9" class="edge">
|
||||
<title>node5->node8</title>
|
||||
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M1310.82,-102.76C1341.68,-90.77 1386.88,-73.21 1424.25,-58.7"/>
|
||||
<polygon fill="black" stroke="black" points="1425.01,-61.77 1433.06,-54.89 1422.47,-55.25 1425.01,-61.77"/>
|
||||
<title>node8->node1</title>
|
||||
<path fill="none" stroke="black" d="M1566.84,-331.58C1419.81,-326.72 872.06,-307.69 421.66,-279.8 401.07,-278.53 379.38,-277.02 358.03,-275.43"/>
|
||||
<polygon fill="black" stroke="black" points="358.3,-271.94 348.06,-274.67 357.77,-278.92 358.3,-271.94"/>
|
||||
</g>
|
||||
<!-- node8->node2 -->
|
||||
<g id="edge10" class="edge">
|
||||
<title>node8->node2</title>
|
||||
<path fill="none" stroke="black" d="M1566.86,-330C1445.11,-320.95 1057.97,-292.18 831.67,-275.36"/>
|
||||
<polygon fill="black" stroke="black" points="832.09,-271.89 821.86,-274.63 831.57,-278.87 832.09,-271.89"/>
|
||||
</g>
|
||||
<!-- node8->node3 -->
|
||||
<g id="edge11" class="edge">
|
||||
<title>node8->node3</title>
|
||||
<path fill="none" stroke="black" d="M1567.08,-327.31C1495.4,-316.84 1336.86,-293.67 1230.62,-278.14"/>
|
||||
<polygon fill="black" stroke="black" points="1231.44,-274.72 1221.04,-276.74 1230.42,-281.65 1231.44,-274.72"/>
|
||||
</g>
|
||||
<!-- node8->node4 -->
|
||||
<g id="edge12" class="edge">
|
||||
<title>node8->node4</title>
|
||||
<path fill="none" stroke="black" d="M1578.53,-320.61C1555.96,-310.08 1522.92,-294.66 1496.64,-282.4"/>
|
||||
<polygon fill="black" stroke="black" points="1498.12,-279.22 1487.58,-278.17 1495.16,-285.57 1498.12,-279.22"/>
|
||||
</g>
|
||||
<!-- node8->node5 -->
|
||||
<g id="edge13" class="edge">
|
||||
<title>node8->node5</title>
|
||||
<path fill="none" stroke="black" d="M1627.78,-318.97C1644.15,-309.18 1666.44,-295.84 1685.2,-284.62"/>
|
||||
<polygon fill="black" stroke="black" points="1686.83,-287.73 1693.61,-279.59 1683.23,-281.72 1686.83,-287.73"/>
|
||||
</g>
|
||||
<!-- node8->node6 -->
|
||||
<g id="edge14" class="edge">
|
||||
<title>node8->node6</title>
|
||||
<path fill="none" stroke="black" d="M1642.45,-327.02C1712.36,-316.31 1863.89,-293.1 1964.32,-277.71"/>
|
||||
<polygon fill="black" stroke="black" points="1964.84,-281.18 1974.2,-276.2 1963.78,-274.26 1964.84,-281.18"/>
|
||||
</g>
|
||||
<!-- node8->node7 -->
|
||||
<g id="edge15" class="edge">
|
||||
<title>node8->node7</title>
|
||||
<path fill="none" stroke="black" d="M1642.33,-330.01C1740.75,-322.64 2013.75,-301.7 2240.66,-279.8 2254.16,-278.5 2268.32,-277.06 2282.35,-275.58"/>
|
||||
<polygon fill="black" stroke="black" points="2282.49,-279.08 2292.06,-274.54 2281.75,-272.12 2282.49,-279.08"/>
|
||||
</g>
|
||||
<!-- node8->node19 -->
|
||||
<g id="edge16" class="edge">
|
||||
<title>node8->node19</title>
|
||||
<path fill="none" stroke="black" d="M1642.25,-332.63C1770.06,-331.64 2199.48,-324.94 2550.66,-279.8 2560.1,-278.59 2570.07,-276.92 2579.71,-275.1"/>
|
||||
<polygon fill="black" stroke="black" points="2580.21,-278.57 2589.34,-273.21 2578.86,-271.7 2580.21,-278.57"/>
|
||||
</g>
|
||||
<!-- node20 -->
|
||||
<g id="node11" class="node">
|
||||
<title>node20</title>
|
||||
<polygon fill="none" stroke="black" points="2606.81,-185.8 2533.89,-201.03 2238.66,-207.8 1943.43,-201.03 1870.52,-185.8 2074.82,-173.59 2402.5,-173.59 2606.81,-185.8"/>
|
||||
<text text-anchor="middle" x="2238.66" y="-185.53" font-family="Times,serif" font-size="12.00">-Wl,--no-as-needed,"/home/rmontanana/Code/libtorch/lib/libtorch.so" -Wl,--as-needed</text>
|
||||
</g>
|
||||
<!-- node19->node20 -->
|
||||
<g id="edge17" class="edge">
|
||||
<title>node19->node20</title>
|
||||
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2583.63,-250.21C2572.76,-248.03 2561.34,-245.79 2550.66,-243.8 2482.14,-231.05 2404.92,-217.93 2344.44,-207.93"/>
|
||||
<polygon fill="black" stroke="black" points="2345.28,-204.52 2334.84,-206.34 2344.14,-211.42 2345.28,-204.52"/>
|
||||
</g>
|
||||
<!-- node9 -->
|
||||
<g id="node12" class="node">
|
||||
<title>node9</title>
|
||||
<polygon fill="none" stroke="black" points="2542.56,-123.37 2445.66,-135.8 2348.77,-123.37 2385.78,-103.24 2505.55,-103.24 2542.56,-123.37"/>
|
||||
<text text-anchor="middle" x="2445.66" y="-113.53" font-family="Times,serif" font-size="12.00">torch_cpu_library</text>
|
||||
</g>
|
||||
<!-- node19->node9 -->
|
||||
<g id="edge18" class="edge">
|
||||
<title>node19->node9</title>
|
||||
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2635.72,-246.84C2636.4,-227.49 2634.61,-192.58 2615.66,-171.8 2601.13,-155.87 2551.93,-141.56 2510.18,-131.84"/>
|
||||
<polygon fill="black" stroke="black" points="2511.2,-128.48 2500.67,-129.68 2509.65,-135.31 2511.2,-128.48"/>
|
||||
</g>
|
||||
<!-- node13 -->
|
||||
<g id="node16" class="node">
|
||||
<title>node13</title>
|
||||
<polygon fill="none" stroke="black" points="3056.45,-195.37 2953.66,-207.8 2850.87,-195.37 2890.13,-175.24 3017.19,-175.24 3056.45,-195.37"/>
|
||||
<text text-anchor="middle" x="2953.66" y="-185.53" font-family="Times,serif" font-size="12.00">torch_cuda_library</text>
|
||||
</g>
|
||||
<!-- node19->node13 -->
|
||||
<g id="edge22" class="edge">
|
||||
<title>node19->node13</title>
|
||||
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2685.21,-249.71C2741.11,-237.45 2831.21,-217.67 2891.42,-204.46"/>
|
||||
<polygon fill="black" stroke="black" points="2891.8,-207.96 2900.82,-202.4 2890.3,-201.13 2891.8,-207.96"/>
|
||||
</g>
|
||||
<!-- node10 -->
|
||||
<g id="node13" class="node">
|
||||
<title>node10</title>
|
||||
<polygon fill="none" stroke="black" points="2362.4,-27.9 2285.6,-43.12 1974.66,-49.9 1663.72,-43.12 1586.93,-27.9 1802.1,-15.68 2147.22,-15.68 2362.4,-27.9"/>
|
||||
<text text-anchor="middle" x="1974.66" y="-27.63" font-family="Times,serif" font-size="12.00">-Wl,--no-as-needed,"/home/rmontanana/Code/libtorch/lib/libtorch_cpu.so" -Wl,--as-needed</text>
|
||||
</g>
|
||||
<!-- node9->node10 -->
|
||||
<g id="edge19" class="edge">
|
||||
<title>node9->node10</title>
|
||||
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2381.16,-105.31C2301.63,-91.15 2165.65,-66.92 2073.05,-50.43"/>
|
||||
<polygon fill="black" stroke="black" points="2073.93,-47.03 2063.48,-48.72 2072.71,-53.92 2073.93,-47.03"/>
|
||||
</g>
|
||||
<!-- node11 -->
|
||||
<g id="node14" class="node">
|
||||
<title>node11</title>
|
||||
<polygon fill="none" stroke="black" points="2510.72,-37.46 2445.66,-49.9 2380.61,-37.46 2405.46,-17.34 2485.87,-17.34 2510.72,-37.46"/>
|
||||
<text text-anchor="middle" x="2445.66" y="-27.63" font-family="Times,serif" font-size="12.00">caffe2::mkl</text>
|
||||
</g>
|
||||
<!-- node9->node11 -->
|
||||
<g id="edge20" class="edge">
|
||||
<title>node9->node11</title>
|
||||
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2445.66,-102.95C2445.66,-91.68 2445.66,-75.4 2445.66,-61.37"/>
|
||||
<polygon fill="black" stroke="black" points="2449.16,-61.78 2445.66,-51.78 2442.16,-61.78 2449.16,-61.78"/>
|
||||
</g>
|
||||
<!-- node12 -->
|
||||
<g id="node15" class="node">
|
||||
<title>node12</title>
|
||||
<polygon fill="none" stroke="black" points="2794.95,-41.76 2661.66,-63.8 2528.37,-41.76 2579.28,-6.09 2744.04,-6.09 2794.95,-41.76"/>
|
||||
<text text-anchor="middle" x="2661.66" y="-34.75" font-family="Times,serif" font-size="12.00">dummy</text>
|
||||
<text text-anchor="middle" x="2661.66" y="-20.5" font-family="Times,serif" font-size="12.00">(protobuf::libprotobuf)</text>
|
||||
</g>
|
||||
<!-- node9->node12 -->
|
||||
<g id="edge21" class="edge">
|
||||
<title>node9->node12</title>
|
||||
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2481.82,-102.76C2512.55,-90.82 2557.5,-73.36 2594.77,-58.89"/>
|
||||
<polygon fill="black" stroke="black" points="2595.6,-62.32 2603.65,-55.44 2593.06,-55.79 2595.6,-62.32"/>
|
||||
</g>
|
||||
<!-- node13->node9 -->
|
||||
<g id="edge28" class="edge">
|
||||
<title>node13->node9</title>
|
||||
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2880.59,-179.79C2799.97,-169.71 2666.42,-152.57 2551.66,-135.8 2540.2,-134.13 2528.06,-132.27 2516.24,-130.41"/>
|
||||
<polygon fill="black" stroke="black" points="2516.96,-126.98 2506.54,-128.86 2515.87,-133.89 2516.96,-126.98"/>
|
||||
</g>
|
||||
<!-- node14 -->
|
||||
<g id="node17" class="node">
|
||||
<title>node14</title>
|
||||
<polygon fill="none" stroke="black" points="3346.69,-113.8 3268.85,-129.03 2953.66,-135.8 2638.48,-129.03 2560.63,-113.8 2778.75,-101.59 3128.58,-101.59 3346.69,-113.8"/>
|
||||
<text text-anchor="middle" x="2953.66" y="-113.53" font-family="Times,serif" font-size="12.00">-Wl,--no-as-needed,"/home/rmontanana/Code/libtorch/lib/libtorch_cuda.so" -Wl,--as-needed</text>
|
||||
</g>
|
||||
<!-- node13->node14 -->
|
||||
<g id="edge23" class="edge">
|
||||
<title>node13->node14</title>
|
||||
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2953.66,-174.97C2953.66,-167.13 2953.66,-157.01 2953.66,-147.53"/>
|
||||
<polygon fill="black" stroke="black" points="2957.16,-147.59 2953.66,-137.59 2950.16,-147.59 2957.16,-147.59"/>
|
||||
</g>
|
||||
<!-- node15 -->
|
||||
<g id="node18" class="node">
|
||||
<title>node15</title>
|
||||
<polygon fill="none" stroke="black" points="3514.74,-123.37 3439.66,-135.8 3364.58,-123.37 3393.26,-103.24 3486.06,-103.24 3514.74,-123.37"/>
|
||||
<text text-anchor="middle" x="3439.66" y="-113.53" font-family="Times,serif" font-size="12.00">torch::cudart</text>
|
||||
</g>
|
||||
<!-- node13->node15 -->
|
||||
<g id="edge24" class="edge">
|
||||
<title>node13->node15</title>
|
||||
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M3028.35,-180.51C3109.24,-171.17 3241.96,-154.78 3355.66,-135.8 3364.43,-134.34 3373.69,-132.63 3382.72,-130.88"/>
|
||||
<polygon fill="black" stroke="black" points="3383.38,-134.31 3392.51,-128.93 3382.02,-127.45 3383.38,-134.31"/>
|
||||
</g>
|
||||
<!-- node17 -->
|
||||
<g id="node20" class="node">
|
||||
<title>node17</title>
|
||||
<polygon fill="none" stroke="black" points="3716.84,-123.37 3624.66,-135.8 3532.48,-123.37 3567.69,-103.24 3681.63,-103.24 3716.84,-123.37"/>
|
||||
<text text-anchor="middle" x="3624.66" y="-113.53" font-family="Times,serif" font-size="12.00">torch::nvtoolsext</text>
|
||||
</g>
|
||||
<!-- node13->node17 -->
|
||||
<g id="edge26" class="edge">
|
||||
<title>node13->node17</title>
|
||||
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M3033.64,-183.25C3144.1,-175.14 3349.47,-158.53 3523.66,-135.8 3534.84,-134.35 3546.67,-132.57 3558.15,-130.72"/>
|
||||
<polygon fill="black" stroke="black" points="3558.68,-134.18 3567.98,-129.1 3557.54,-127.27 3558.68,-134.18"/>
|
||||
</g>
|
||||
<!-- node16 -->
|
||||
<g id="node19" class="node">
|
||||
<title>node16</title>
|
||||
<polygon fill="none" stroke="black" points="3510.78,-27.9 3496.7,-43.12 3439.66,-49.9 3382.63,-43.12 3368.54,-27.9 3408.01,-15.68 3471.31,-15.68 3510.78,-27.9"/>
|
||||
<text text-anchor="middle" x="3439.66" y="-27.63" font-family="Times,serif" font-size="12.00">CUDA::cudart</text>
|
||||
</g>
|
||||
<!-- node15->node16 -->
|
||||
<g id="edge25" class="edge">
|
||||
<title>node15->node16</title>
|
||||
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M3439.66,-102.95C3439.66,-91.68 3439.66,-75.4 3439.66,-61.37"/>
|
||||
<polygon fill="black" stroke="black" points="3443.16,-61.78 3439.66,-51.78 3436.16,-61.78 3443.16,-61.78"/>
|
||||
</g>
|
||||
<!-- node18 -->
|
||||
<g id="node21" class="node">
|
||||
<title>node18</title>
|
||||
<polygon fill="none" stroke="black" points="3714.32,-27.9 3696.56,-43.12 3624.66,-49.9 3552.77,-43.12 3535.01,-27.9 3584.76,-15.68 3664.56,-15.68 3714.32,-27.9"/>
|
||||
<text text-anchor="middle" x="3624.66" y="-27.63" font-family="Times,serif" font-size="12.00">CUDA::nvToolsExt</text>
|
||||
</g>
|
||||
<!-- node17->node18 -->
|
||||
<g id="edge27" class="edge">
|
||||
<title>node17->node18</title>
|
||||
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M3624.66,-102.95C3624.66,-91.68 3624.66,-75.4 3624.66,-61.37"/>
|
||||
<polygon fill="black" stroke="black" points="3628.16,-61.78 3624.66,-51.78 3621.16,-61.78 3628.16,-61.78"/>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 7.1 KiB After Width: | Height: | Size: 18 KiB |
Submodule lib/catch2 deleted from 029fe3b460
Submodule lib/folding deleted from 2ac43e32ac
1
lib/json
1
lib/json
Submodule lib/json deleted from 960b763ecd
2009
lib/log/loguru.cpp
2009
lib/log/loguru.cpp
File diff suppressed because it is too large
Load Diff
1475
lib/log/loguru.hpp
1475
lib/log/loguru.hpp
File diff suppressed because it is too large
Load Diff
1
lib/mdlp
1
lib/mdlp
Submodule lib/mdlp deleted from 2db60e007d
@@ -1,19 +1,22 @@
|
||||
cmake_minimum_required(VERSION 3.20)
|
||||
|
||||
project(bayesnet_sample)
|
||||
project(bayesnet_sample VERSION 0.1.0 LANGUAGES CXX)
|
||||
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
|
||||
find_package(Torch REQUIRED)
|
||||
find_library(BayesNet NAMES BayesNet.a libBayesNet.a REQUIRED)
|
||||
find_package(Torch CONFIG REQUIRED)
|
||||
find_package(fimdlp CONFIG REQUIRED)
|
||||
find_package(folding CONFIG REQUIRED)
|
||||
find_package(arff-files CONFIG REQUIRED)
|
||||
find_package(nlohmann_json REQUIRED)
|
||||
find_package(bayesnet CONFIG REQUIRED)
|
||||
|
||||
include_directories(
|
||||
../tests/lib/Files
|
||||
lib/mdlp
|
||||
lib/json/include
|
||||
/usr/local/include
|
||||
)
|
||||
|
||||
add_subdirectory(lib/mdlp)
|
||||
add_executable(bayesnet_sample sample.cc)
|
||||
target_link_libraries(bayesnet_sample mdlp "${TORCH_LIBRARIES}" "${BayesNet}")
|
||||
target_link_libraries(bayesnet_sample PRIVATE
|
||||
fimdlp::fimdlp
|
||||
arff-files::arff-files
|
||||
torch::torch
|
||||
bayesnet::bayesnet
|
||||
folding::folding
|
||||
nlohmann_json::nlohmann_json
|
||||
)
|
||||
|
9
sample/CMakeUserPresets.json
Normal file
9
sample/CMakeUserPresets.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"version": 4,
|
||||
"vendor": {
|
||||
"conan": {}
|
||||
},
|
||||
"include": [
|
||||
"build/CMakePresets.json"
|
||||
]
|
||||
}
|
14
sample/conanfile.txt
Normal file
14
sample/conanfile.txt
Normal file
@@ -0,0 +1,14 @@
|
||||
[requires]
|
||||
libtorch/2.7.0
|
||||
arff-files/1.2.0
|
||||
fimdlp/2.1.0
|
||||
folding/1.1.1
|
||||
bayesnet/1.2.0
|
||||
nlohmann_json/3.11.3
|
||||
|
||||
[generators]
|
||||
CMakeToolchain
|
||||
CMakeDeps
|
||||
|
||||
[options]
|
||||
libtorch/2.7.0:shared=True
|
@@ -1,55 +0,0 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include <nlohmann/detail/abi_macros.hpp>
|
||||
#include <nlohmann/detail/conversions/from_json.hpp>
|
||||
#include <nlohmann/detail/conversions/to_json.hpp>
|
||||
#include <nlohmann/detail/meta/identity_tag.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
|
||||
/// @sa https://json.nlohmann.me/api/adl_serializer/
|
||||
template<typename ValueType, typename>
|
||||
struct adl_serializer
|
||||
{
|
||||
/// @brief convert a JSON value to any value type
|
||||
/// @sa https://json.nlohmann.me/api/adl_serializer/from_json/
|
||||
template<typename BasicJsonType, typename TargetType = ValueType>
|
||||
static auto from_json(BasicJsonType && j, TargetType& val) noexcept(
|
||||
noexcept(::nlohmann::from_json(std::forward<BasicJsonType>(j), val)))
|
||||
-> decltype(::nlohmann::from_json(std::forward<BasicJsonType>(j), val), void())
|
||||
{
|
||||
::nlohmann::from_json(std::forward<BasicJsonType>(j), val);
|
||||
}
|
||||
|
||||
/// @brief convert a JSON value to any value type
|
||||
/// @sa https://json.nlohmann.me/api/adl_serializer/from_json/
|
||||
template<typename BasicJsonType, typename TargetType = ValueType>
|
||||
static auto from_json(BasicJsonType && j) noexcept(
|
||||
noexcept(::nlohmann::from_json(std::forward<BasicJsonType>(j), detail::identity_tag<TargetType> {})))
|
||||
-> decltype(::nlohmann::from_json(std::forward<BasicJsonType>(j), detail::identity_tag<TargetType> {}))
|
||||
{
|
||||
return ::nlohmann::from_json(std::forward<BasicJsonType>(j), detail::identity_tag<TargetType> {});
|
||||
}
|
||||
|
||||
/// @brief convert any value type to a JSON value
|
||||
/// @sa https://json.nlohmann.me/api/adl_serializer/to_json/
|
||||
template<typename BasicJsonType, typename TargetType = ValueType>
|
||||
static auto to_json(BasicJsonType& j, TargetType && val) noexcept(
|
||||
noexcept(::nlohmann::to_json(j, std::forward<TargetType>(val))))
|
||||
-> decltype(::nlohmann::to_json(j, std::forward<TargetType>(val)), void())
|
||||
{
|
||||
::nlohmann::to_json(j, std::forward<TargetType>(val));
|
||||
}
|
||||
};
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
@@ -1,103 +0,0 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint> // uint8_t, uint64_t
|
||||
#include <tuple> // tie
|
||||
#include <utility> // move
|
||||
|
||||
#include <nlohmann/detail/abi_macros.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
|
||||
/// @brief an internal type for a backed binary type
|
||||
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/
|
||||
template<typename BinaryType>
|
||||
class byte_container_with_subtype : public BinaryType
|
||||
{
|
||||
public:
|
||||
using container_type = BinaryType;
|
||||
using subtype_type = std::uint64_t;
|
||||
|
||||
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/byte_container_with_subtype/
|
||||
byte_container_with_subtype() noexcept(noexcept(container_type()))
|
||||
: container_type()
|
||||
{}
|
||||
|
||||
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/byte_container_with_subtype/
|
||||
byte_container_with_subtype(const container_type& b) noexcept(noexcept(container_type(b)))
|
||||
: container_type(b)
|
||||
{}
|
||||
|
||||
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/byte_container_with_subtype/
|
||||
byte_container_with_subtype(container_type&& b) noexcept(noexcept(container_type(std::move(b))))
|
||||
: container_type(std::move(b))
|
||||
{}
|
||||
|
||||
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/byte_container_with_subtype/
|
||||
byte_container_with_subtype(const container_type& b, subtype_type subtype_) noexcept(noexcept(container_type(b)))
|
||||
: container_type(b)
|
||||
, m_subtype(subtype_)
|
||||
, m_has_subtype(true)
|
||||
{}
|
||||
|
||||
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/byte_container_with_subtype/
|
||||
byte_container_with_subtype(container_type&& b, subtype_type subtype_) noexcept(noexcept(container_type(std::move(b))))
|
||||
: container_type(std::move(b))
|
||||
, m_subtype(subtype_)
|
||||
, m_has_subtype(true)
|
||||
{}
|
||||
|
||||
bool operator==(const byte_container_with_subtype& rhs) const
|
||||
{
|
||||
return std::tie(static_cast<const BinaryType&>(*this), m_subtype, m_has_subtype) ==
|
||||
std::tie(static_cast<const BinaryType&>(rhs), rhs.m_subtype, rhs.m_has_subtype);
|
||||
}
|
||||
|
||||
bool operator!=(const byte_container_with_subtype& rhs) const
|
||||
{
|
||||
return !(rhs == *this);
|
||||
}
|
||||
|
||||
/// @brief sets the binary subtype
|
||||
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/set_subtype/
|
||||
void set_subtype(subtype_type subtype_) noexcept
|
||||
{
|
||||
m_subtype = subtype_;
|
||||
m_has_subtype = true;
|
||||
}
|
||||
|
||||
/// @brief return the binary subtype
|
||||
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/subtype/
|
||||
constexpr subtype_type subtype() const noexcept
|
||||
{
|
||||
return m_has_subtype ? m_subtype : static_cast<subtype_type>(-1);
|
||||
}
|
||||
|
||||
/// @brief return whether the value has a subtype
|
||||
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/has_subtype/
|
||||
constexpr bool has_subtype() const noexcept
|
||||
{
|
||||
return m_has_subtype;
|
||||
}
|
||||
|
||||
/// @brief clears the binary subtype
|
||||
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/clear_subtype/
|
||||
void clear_subtype() noexcept
|
||||
{
|
||||
m_subtype = 0;
|
||||
m_has_subtype = false;
|
||||
}
|
||||
|
||||
private:
|
||||
subtype_type m_subtype = 0;
|
||||
bool m_has_subtype = false;
|
||||
};
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
@@ -1,100 +0,0 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
// This file contains all macro definitions affecting or depending on the ABI
|
||||
|
||||
#ifndef JSON_SKIP_LIBRARY_VERSION_CHECK
|
||||
#if defined(NLOHMANN_JSON_VERSION_MAJOR) && defined(NLOHMANN_JSON_VERSION_MINOR) && defined(NLOHMANN_JSON_VERSION_PATCH)
|
||||
#if NLOHMANN_JSON_VERSION_MAJOR != 3 || NLOHMANN_JSON_VERSION_MINOR != 11 || NLOHMANN_JSON_VERSION_PATCH != 3
|
||||
#warning "Already included a different version of the library!"
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define NLOHMANN_JSON_VERSION_MAJOR 3 // NOLINT(modernize-macro-to-enum)
|
||||
#define NLOHMANN_JSON_VERSION_MINOR 11 // NOLINT(modernize-macro-to-enum)
|
||||
#define NLOHMANN_JSON_VERSION_PATCH 3 // NOLINT(modernize-macro-to-enum)
|
||||
|
||||
#ifndef JSON_DIAGNOSTICS
|
||||
#define JSON_DIAGNOSTICS 0
|
||||
#endif
|
||||
|
||||
#ifndef JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON
|
||||
#define JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON 0
|
||||
#endif
|
||||
|
||||
#if JSON_DIAGNOSTICS
|
||||
#define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS _diag
|
||||
#else
|
||||
#define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS
|
||||
#endif
|
||||
|
||||
#if JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON
|
||||
#define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON _ldvcmp
|
||||
#else
|
||||
#define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON
|
||||
#endif
|
||||
|
||||
#ifndef NLOHMANN_JSON_NAMESPACE_NO_VERSION
|
||||
#define NLOHMANN_JSON_NAMESPACE_NO_VERSION 0
|
||||
#endif
|
||||
|
||||
// Construct the namespace ABI tags component
|
||||
#define NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b) json_abi ## a ## b
|
||||
#define NLOHMANN_JSON_ABI_TAGS_CONCAT(a, b) \
|
||||
NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b)
|
||||
|
||||
#define NLOHMANN_JSON_ABI_TAGS \
|
||||
NLOHMANN_JSON_ABI_TAGS_CONCAT( \
|
||||
NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS, \
|
||||
NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON)
|
||||
|
||||
// Construct the namespace version component
|
||||
#define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch) \
|
||||
_v ## major ## _ ## minor ## _ ## patch
|
||||
#define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(major, minor, patch) \
|
||||
NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch)
|
||||
|
||||
#if NLOHMANN_JSON_NAMESPACE_NO_VERSION
|
||||
#define NLOHMANN_JSON_NAMESPACE_VERSION
|
||||
#else
|
||||
#define NLOHMANN_JSON_NAMESPACE_VERSION \
|
||||
NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(NLOHMANN_JSON_VERSION_MAJOR, \
|
||||
NLOHMANN_JSON_VERSION_MINOR, \
|
||||
NLOHMANN_JSON_VERSION_PATCH)
|
||||
#endif
|
||||
|
||||
// Combine namespace components
|
||||
#define NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b) a ## b
|
||||
#define NLOHMANN_JSON_NAMESPACE_CONCAT(a, b) \
|
||||
NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b)
|
||||
|
||||
#ifndef NLOHMANN_JSON_NAMESPACE
|
||||
#define NLOHMANN_JSON_NAMESPACE \
|
||||
nlohmann::NLOHMANN_JSON_NAMESPACE_CONCAT( \
|
||||
NLOHMANN_JSON_ABI_TAGS, \
|
||||
NLOHMANN_JSON_NAMESPACE_VERSION)
|
||||
#endif
|
||||
|
||||
#ifndef NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
#define NLOHMANN_JSON_NAMESPACE_BEGIN \
|
||||
namespace nlohmann \
|
||||
{ \
|
||||
inline namespace NLOHMANN_JSON_NAMESPACE_CONCAT( \
|
||||
NLOHMANN_JSON_ABI_TAGS, \
|
||||
NLOHMANN_JSON_NAMESPACE_VERSION) \
|
||||
{
|
||||
#endif
|
||||
|
||||
#ifndef NLOHMANN_JSON_NAMESPACE_END
|
||||
#define NLOHMANN_JSON_NAMESPACE_END \
|
||||
} /* namespace (inline namespace) NOLINT(readability/namespace) */ \
|
||||
} // namespace nlohmann
|
||||
#endif
|
@@ -1,497 +0,0 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <algorithm> // transform
|
||||
#include <array> // array
|
||||
#include <forward_list> // forward_list
|
||||
#include <iterator> // inserter, front_inserter, end
|
||||
#include <map> // map
|
||||
#include <string> // string
|
||||
#include <tuple> // tuple, make_tuple
|
||||
#include <type_traits> // is_arithmetic, is_same, is_enum, underlying_type, is_convertible
|
||||
#include <unordered_map> // unordered_map
|
||||
#include <utility> // pair, declval
|
||||
#include <valarray> // valarray
|
||||
|
||||
#include <nlohmann/detail/exceptions.hpp>
|
||||
#include <nlohmann/detail/macro_scope.hpp>
|
||||
#include <nlohmann/detail/meta/cpp_future.hpp>
|
||||
#include <nlohmann/detail/meta/identity_tag.hpp>
|
||||
#include <nlohmann/detail/meta/std_fs.hpp>
|
||||
#include <nlohmann/detail/meta/type_traits.hpp>
|
||||
#include <nlohmann/detail/string_concat.hpp>
|
||||
#include <nlohmann/detail/value_t.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void from_json(const BasicJsonType& j, typename std::nullptr_t& n)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_null()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be null, but is ", j.type_name()), &j));
|
||||
}
|
||||
n = nullptr;
|
||||
}
|
||||
|
||||
// overloads for basic_json template parameters
|
||||
template < typename BasicJsonType, typename ArithmeticType,
|
||||
enable_if_t < std::is_arithmetic<ArithmeticType>::value&&
|
||||
!std::is_same<ArithmeticType, typename BasicJsonType::boolean_t>::value,
|
||||
int > = 0 >
|
||||
void get_arithmetic_value(const BasicJsonType& j, ArithmeticType& val)
|
||||
{
|
||||
switch (static_cast<value_t>(j))
|
||||
{
|
||||
case value_t::number_unsigned:
|
||||
{
|
||||
val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_unsigned_t*>());
|
||||
break;
|
||||
}
|
||||
case value_t::number_integer:
|
||||
{
|
||||
val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_integer_t*>());
|
||||
break;
|
||||
}
|
||||
case value_t::number_float:
|
||||
{
|
||||
val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_float_t*>());
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::null:
|
||||
case value_t::object:
|
||||
case value_t::array:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
JSON_THROW(type_error::create(302, concat("type must be number, but is ", j.type_name()), &j));
|
||||
}
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void from_json(const BasicJsonType& j, typename BasicJsonType::boolean_t& b)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_boolean()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be boolean, but is ", j.type_name()), &j));
|
||||
}
|
||||
b = *j.template get_ptr<const typename BasicJsonType::boolean_t*>();
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void from_json(const BasicJsonType& j, typename BasicJsonType::string_t& s)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_string()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be string, but is ", j.type_name()), &j));
|
||||
}
|
||||
s = *j.template get_ptr<const typename BasicJsonType::string_t*>();
|
||||
}
|
||||
|
||||
template <
|
||||
typename BasicJsonType, typename StringType,
|
||||
enable_if_t <
|
||||
std::is_assignable<StringType&, const typename BasicJsonType::string_t>::value
|
||||
&& is_detected_exact<typename BasicJsonType::string_t::value_type, value_type_t, StringType>::value
|
||||
&& !std::is_same<typename BasicJsonType::string_t, StringType>::value
|
||||
&& !is_json_ref<StringType>::value, int > = 0 >
|
||||
inline void from_json(const BasicJsonType& j, StringType& s)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_string()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be string, but is ", j.type_name()), &j));
|
||||
}
|
||||
|
||||
s = *j.template get_ptr<const typename BasicJsonType::string_t*>();
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void from_json(const BasicJsonType& j, typename BasicJsonType::number_float_t& val)
|
||||
{
|
||||
get_arithmetic_value(j, val);
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void from_json(const BasicJsonType& j, typename BasicJsonType::number_unsigned_t& val)
|
||||
{
|
||||
get_arithmetic_value(j, val);
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void from_json(const BasicJsonType& j, typename BasicJsonType::number_integer_t& val)
|
||||
{
|
||||
get_arithmetic_value(j, val);
|
||||
}
|
||||
|
||||
#if !JSON_DISABLE_ENUM_SERIALIZATION
|
||||
template<typename BasicJsonType, typename EnumType,
|
||||
enable_if_t<std::is_enum<EnumType>::value, int> = 0>
|
||||
inline void from_json(const BasicJsonType& j, EnumType& e)
|
||||
{
|
||||
typename std::underlying_type<EnumType>::type val;
|
||||
get_arithmetic_value(j, val);
|
||||
e = static_cast<EnumType>(val);
|
||||
}
|
||||
#endif // JSON_DISABLE_ENUM_SERIALIZATION
|
||||
|
||||
// forward_list doesn't have an insert method
|
||||
template<typename BasicJsonType, typename T, typename Allocator,
|
||||
enable_if_t<is_getable<BasicJsonType, T>::value, int> = 0>
|
||||
inline void from_json(const BasicJsonType& j, std::forward_list<T, Allocator>& l)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
|
||||
}
|
||||
l.clear();
|
||||
std::transform(j.rbegin(), j.rend(),
|
||||
std::front_inserter(l), [](const BasicJsonType & i)
|
||||
{
|
||||
return i.template get<T>();
|
||||
});
|
||||
}
|
||||
|
||||
// valarray doesn't have an insert method
|
||||
template<typename BasicJsonType, typename T,
|
||||
enable_if_t<is_getable<BasicJsonType, T>::value, int> = 0>
|
||||
inline void from_json(const BasicJsonType& j, std::valarray<T>& l)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
|
||||
}
|
||||
l.resize(j.size());
|
||||
std::transform(j.begin(), j.end(), std::begin(l),
|
||||
[](const BasicJsonType & elem)
|
||||
{
|
||||
return elem.template get<T>();
|
||||
});
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename T, std::size_t N>
|
||||
auto from_json(const BasicJsonType& j, T (&arr)[N]) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays)
|
||||
-> decltype(j.template get<T>(), void())
|
||||
{
|
||||
for (std::size_t i = 0; i < N; ++i)
|
||||
{
|
||||
arr[i] = j.at(i).template get<T>();
|
||||
}
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void from_json_array_impl(const BasicJsonType& j, typename BasicJsonType::array_t& arr, priority_tag<3> /*unused*/)
|
||||
{
|
||||
arr = *j.template get_ptr<const typename BasicJsonType::array_t*>();
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename T, std::size_t N>
|
||||
auto from_json_array_impl(const BasicJsonType& j, std::array<T, N>& arr,
|
||||
priority_tag<2> /*unused*/)
|
||||
-> decltype(j.template get<T>(), void())
|
||||
{
|
||||
for (std::size_t i = 0; i < N; ++i)
|
||||
{
|
||||
arr[i] = j.at(i).template get<T>();
|
||||
}
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename ConstructibleArrayType,
|
||||
enable_if_t<
|
||||
std::is_assignable<ConstructibleArrayType&, ConstructibleArrayType>::value,
|
||||
int> = 0>
|
||||
auto from_json_array_impl(const BasicJsonType& j, ConstructibleArrayType& arr, priority_tag<1> /*unused*/)
|
||||
-> decltype(
|
||||
arr.reserve(std::declval<typename ConstructibleArrayType::size_type>()),
|
||||
j.template get<typename ConstructibleArrayType::value_type>(),
|
||||
void())
|
||||
{
|
||||
using std::end;
|
||||
|
||||
ConstructibleArrayType ret;
|
||||
ret.reserve(j.size());
|
||||
std::transform(j.begin(), j.end(),
|
||||
std::inserter(ret, end(ret)), [](const BasicJsonType & i)
|
||||
{
|
||||
// get<BasicJsonType>() returns *this, this won't call a from_json
|
||||
// method when value_type is BasicJsonType
|
||||
return i.template get<typename ConstructibleArrayType::value_type>();
|
||||
});
|
||||
arr = std::move(ret);
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename ConstructibleArrayType,
|
||||
enable_if_t<
|
||||
std::is_assignable<ConstructibleArrayType&, ConstructibleArrayType>::value,
|
||||
int> = 0>
|
||||
inline void from_json_array_impl(const BasicJsonType& j, ConstructibleArrayType& arr,
|
||||
priority_tag<0> /*unused*/)
|
||||
{
|
||||
using std::end;
|
||||
|
||||
ConstructibleArrayType ret;
|
||||
std::transform(
|
||||
j.begin(), j.end(), std::inserter(ret, end(ret)),
|
||||
[](const BasicJsonType & i)
|
||||
{
|
||||
// get<BasicJsonType>() returns *this, this won't call a from_json
|
||||
// method when value_type is BasicJsonType
|
||||
return i.template get<typename ConstructibleArrayType::value_type>();
|
||||
});
|
||||
arr = std::move(ret);
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename ConstructibleArrayType,
|
||||
enable_if_t <
|
||||
is_constructible_array_type<BasicJsonType, ConstructibleArrayType>::value&&
|
||||
!is_constructible_object_type<BasicJsonType, ConstructibleArrayType>::value&&
|
||||
!is_constructible_string_type<BasicJsonType, ConstructibleArrayType>::value&&
|
||||
!std::is_same<ConstructibleArrayType, typename BasicJsonType::binary_t>::value&&
|
||||
!is_basic_json<ConstructibleArrayType>::value,
|
||||
int > = 0 >
|
||||
auto from_json(const BasicJsonType& j, ConstructibleArrayType& arr)
|
||||
-> decltype(from_json_array_impl(j, arr, priority_tag<3> {}),
|
||||
j.template get<typename ConstructibleArrayType::value_type>(),
|
||||
void())
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
|
||||
}
|
||||
|
||||
from_json_array_impl(j, arr, priority_tag<3> {});
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename T, std::size_t... Idx >
|
||||
std::array<T, sizeof...(Idx)> from_json_inplace_array_impl(BasicJsonType&& j,
|
||||
identity_tag<std::array<T, sizeof...(Idx)>> /*unused*/, index_sequence<Idx...> /*unused*/)
|
||||
{
|
||||
return { { std::forward<BasicJsonType>(j).at(Idx).template get<T>()... } };
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename T, std::size_t N >
|
||||
auto from_json(BasicJsonType&& j, identity_tag<std::array<T, N>> tag)
|
||||
-> decltype(from_json_inplace_array_impl(std::forward<BasicJsonType>(j), tag, make_index_sequence<N> {}))
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
|
||||
}
|
||||
|
||||
return from_json_inplace_array_impl(std::forward<BasicJsonType>(j), tag, make_index_sequence<N> {});
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void from_json(const BasicJsonType& j, typename BasicJsonType::binary_t& bin)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_binary()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be binary, but is ", j.type_name()), &j));
|
||||
}
|
||||
|
||||
bin = *j.template get_ptr<const typename BasicJsonType::binary_t*>();
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename ConstructibleObjectType,
|
||||
enable_if_t<is_constructible_object_type<BasicJsonType, ConstructibleObjectType>::value, int> = 0>
|
||||
inline void from_json(const BasicJsonType& j, ConstructibleObjectType& obj)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_object()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be object, but is ", j.type_name()), &j));
|
||||
}
|
||||
|
||||
ConstructibleObjectType ret;
|
||||
const auto* inner_object = j.template get_ptr<const typename BasicJsonType::object_t*>();
|
||||
using value_type = typename ConstructibleObjectType::value_type;
|
||||
std::transform(
|
||||
inner_object->begin(), inner_object->end(),
|
||||
std::inserter(ret, ret.begin()),
|
||||
[](typename BasicJsonType::object_t::value_type const & p)
|
||||
{
|
||||
return value_type(p.first, p.second.template get<typename ConstructibleObjectType::mapped_type>());
|
||||
});
|
||||
obj = std::move(ret);
|
||||
}
|
||||
|
||||
// overload for arithmetic types, not chosen for basic_json template arguments
|
||||
// (BooleanType, etc..); note: Is it really necessary to provide explicit
|
||||
// overloads for boolean_t etc. in case of a custom BooleanType which is not
|
||||
// an arithmetic type?
|
||||
template < typename BasicJsonType, typename ArithmeticType,
|
||||
enable_if_t <
|
||||
std::is_arithmetic<ArithmeticType>::value&&
|
||||
!std::is_same<ArithmeticType, typename BasicJsonType::number_unsigned_t>::value&&
|
||||
!std::is_same<ArithmeticType, typename BasicJsonType::number_integer_t>::value&&
|
||||
!std::is_same<ArithmeticType, typename BasicJsonType::number_float_t>::value&&
|
||||
!std::is_same<ArithmeticType, typename BasicJsonType::boolean_t>::value,
|
||||
int > = 0 >
|
||||
inline void from_json(const BasicJsonType& j, ArithmeticType& val)
|
||||
{
|
||||
switch (static_cast<value_t>(j))
|
||||
{
|
||||
case value_t::number_unsigned:
|
||||
{
|
||||
val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_unsigned_t*>());
|
||||
break;
|
||||
}
|
||||
case value_t::number_integer:
|
||||
{
|
||||
val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_integer_t*>());
|
||||
break;
|
||||
}
|
||||
case value_t::number_float:
|
||||
{
|
||||
val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_float_t*>());
|
||||
break;
|
||||
}
|
||||
case value_t::boolean:
|
||||
{
|
||||
val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::boolean_t*>());
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::null:
|
||||
case value_t::object:
|
||||
case value_t::array:
|
||||
case value_t::string:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
JSON_THROW(type_error::create(302, concat("type must be number, but is ", j.type_name()), &j));
|
||||
}
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename... Args, std::size_t... Idx>
|
||||
std::tuple<Args...> from_json_tuple_impl_base(BasicJsonType&& j, index_sequence<Idx...> /*unused*/)
|
||||
{
|
||||
return std::make_tuple(std::forward<BasicJsonType>(j).at(Idx).template get<Args>()...);
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, class A1, class A2 >
|
||||
std::pair<A1, A2> from_json_tuple_impl(BasicJsonType&& j, identity_tag<std::pair<A1, A2>> /*unused*/, priority_tag<0> /*unused*/)
|
||||
{
|
||||
return {std::forward<BasicJsonType>(j).at(0).template get<A1>(),
|
||||
std::forward<BasicJsonType>(j).at(1).template get<A2>()};
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename A1, typename A2>
|
||||
inline void from_json_tuple_impl(BasicJsonType&& j, std::pair<A1, A2>& p, priority_tag<1> /*unused*/)
|
||||
{
|
||||
p = from_json_tuple_impl(std::forward<BasicJsonType>(j), identity_tag<std::pair<A1, A2>> {}, priority_tag<0> {});
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename... Args>
|
||||
std::tuple<Args...> from_json_tuple_impl(BasicJsonType&& j, identity_tag<std::tuple<Args...>> /*unused*/, priority_tag<2> /*unused*/)
|
||||
{
|
||||
return from_json_tuple_impl_base<BasicJsonType, Args...>(std::forward<BasicJsonType>(j), index_sequence_for<Args...> {});
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename... Args>
|
||||
inline void from_json_tuple_impl(BasicJsonType&& j, std::tuple<Args...>& t, priority_tag<3> /*unused*/)
|
||||
{
|
||||
t = from_json_tuple_impl_base<BasicJsonType, Args...>(std::forward<BasicJsonType>(j), index_sequence_for<Args...> {});
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename TupleRelated>
|
||||
auto from_json(BasicJsonType&& j, TupleRelated&& t)
|
||||
-> decltype(from_json_tuple_impl(std::forward<BasicJsonType>(j), std::forward<TupleRelated>(t), priority_tag<3> {}))
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
|
||||
}
|
||||
|
||||
return from_json_tuple_impl(std::forward<BasicJsonType>(j), std::forward<TupleRelated>(t), priority_tag<3> {});
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename Key, typename Value, typename Compare, typename Allocator,
|
||||
typename = enable_if_t < !std::is_constructible <
|
||||
typename BasicJsonType::string_t, Key >::value >>
|
||||
inline void from_json(const BasicJsonType& j, std::map<Key, Value, Compare, Allocator>& m)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
|
||||
}
|
||||
m.clear();
|
||||
for (const auto& p : j)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!p.is_array()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be array, but is ", p.type_name()), &j));
|
||||
}
|
||||
m.emplace(p.at(0).template get<Key>(), p.at(1).template get<Value>());
|
||||
}
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename Key, typename Value, typename Hash, typename KeyEqual, typename Allocator,
|
||||
typename = enable_if_t < !std::is_constructible <
|
||||
typename BasicJsonType::string_t, Key >::value >>
|
||||
inline void from_json(const BasicJsonType& j, std::unordered_map<Key, Value, Hash, KeyEqual, Allocator>& m)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
|
||||
}
|
||||
m.clear();
|
||||
for (const auto& p : j)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!p.is_array()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be array, but is ", p.type_name()), &j));
|
||||
}
|
||||
m.emplace(p.at(0).template get<Key>(), p.at(1).template get<Value>());
|
||||
}
|
||||
}
|
||||
|
||||
#if JSON_HAS_FILESYSTEM || JSON_HAS_EXPERIMENTAL_FILESYSTEM
|
||||
template<typename BasicJsonType>
|
||||
inline void from_json(const BasicJsonType& j, std_fs::path& p)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_string()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be string, but is ", j.type_name()), &j));
|
||||
}
|
||||
p = *j.template get_ptr<const typename BasicJsonType::string_t*>();
|
||||
}
|
||||
#endif
|
||||
|
||||
struct from_json_fn
|
||||
{
|
||||
template<typename BasicJsonType, typename T>
|
||||
auto operator()(const BasicJsonType& j, T&& val) const
|
||||
noexcept(noexcept(from_json(j, std::forward<T>(val))))
|
||||
-> decltype(from_json(j, std::forward<T>(val)))
|
||||
{
|
||||
return from_json(j, std::forward<T>(val));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
#ifndef JSON_HAS_CPP_17
|
||||
/// namespace to hold default `from_json` function
|
||||
/// to see why this is required:
|
||||
/// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4381.html
|
||||
namespace // NOLINT(cert-dcl59-cpp,fuchsia-header-anon-namespaces,google-build-namespaces)
|
||||
{
|
||||
#endif
|
||||
JSON_INLINE_VARIABLE constexpr const auto& from_json = // NOLINT(misc-definitions-in-headers)
|
||||
detail::static_const<detail::from_json_fn>::value;
|
||||
#ifndef JSON_HAS_CPP_17
|
||||
} // namespace
|
||||
#endif
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
File diff suppressed because it is too large
Load Diff
@@ -1,447 +0,0 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <algorithm> // copy
|
||||
#include <iterator> // begin, end
|
||||
#include <string> // string
|
||||
#include <tuple> // tuple, get
|
||||
#include <type_traits> // is_same, is_constructible, is_floating_point, is_enum, underlying_type
|
||||
#include <utility> // move, forward, declval, pair
|
||||
#include <valarray> // valarray
|
||||
#include <vector> // vector
|
||||
|
||||
#include <nlohmann/detail/iterators/iteration_proxy.hpp>
|
||||
#include <nlohmann/detail/macro_scope.hpp>
|
||||
#include <nlohmann/detail/meta/cpp_future.hpp>
|
||||
#include <nlohmann/detail/meta/std_fs.hpp>
|
||||
#include <nlohmann/detail/meta/type_traits.hpp>
|
||||
#include <nlohmann/detail/value_t.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
//////////////////
|
||||
// constructors //
|
||||
//////////////////
|
||||
|
||||
/*
|
||||
* Note all external_constructor<>::construct functions need to call
|
||||
* j.m_data.m_value.destroy(j.m_data.m_type) to avoid a memory leak in case j contains an
|
||||
* allocated value (e.g., a string). See bug issue
|
||||
* https://github.com/nlohmann/json/issues/2865 for more information.
|
||||
*/
|
||||
|
||||
template<value_t> struct external_constructor;
|
||||
|
||||
template<>
|
||||
struct external_constructor<value_t::boolean>
|
||||
{
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, typename BasicJsonType::boolean_t b) noexcept
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::boolean;
|
||||
j.m_data.m_value = b;
|
||||
j.assert_invariant();
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct external_constructor<value_t::string>
|
||||
{
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, const typename BasicJsonType::string_t& s)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::string;
|
||||
j.m_data.m_value = s;
|
||||
j.assert_invariant();
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, typename BasicJsonType::string_t&& s)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::string;
|
||||
j.m_data.m_value = std::move(s);
|
||||
j.assert_invariant();
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename CompatibleStringType,
|
||||
enable_if_t < !std::is_same<CompatibleStringType, typename BasicJsonType::string_t>::value,
|
||||
int > = 0 >
|
||||
static void construct(BasicJsonType& j, const CompatibleStringType& str)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::string;
|
||||
j.m_data.m_value.string = j.template create<typename BasicJsonType::string_t>(str);
|
||||
j.assert_invariant();
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct external_constructor<value_t::binary>
|
||||
{
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, const typename BasicJsonType::binary_t& b)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::binary;
|
||||
j.m_data.m_value = typename BasicJsonType::binary_t(b);
|
||||
j.assert_invariant();
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, typename BasicJsonType::binary_t&& b)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::binary;
|
||||
j.m_data.m_value = typename BasicJsonType::binary_t(std::move(b));
|
||||
j.assert_invariant();
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct external_constructor<value_t::number_float>
|
||||
{
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, typename BasicJsonType::number_float_t val) noexcept
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::number_float;
|
||||
j.m_data.m_value = val;
|
||||
j.assert_invariant();
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct external_constructor<value_t::number_unsigned>
|
||||
{
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, typename BasicJsonType::number_unsigned_t val) noexcept
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::number_unsigned;
|
||||
j.m_data.m_value = val;
|
||||
j.assert_invariant();
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct external_constructor<value_t::number_integer>
|
||||
{
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, typename BasicJsonType::number_integer_t val) noexcept
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::number_integer;
|
||||
j.m_data.m_value = val;
|
||||
j.assert_invariant();
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct external_constructor<value_t::array>
|
||||
{
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, const typename BasicJsonType::array_t& arr)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::array;
|
||||
j.m_data.m_value = arr;
|
||||
j.set_parents();
|
||||
j.assert_invariant();
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, typename BasicJsonType::array_t&& arr)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::array;
|
||||
j.m_data.m_value = std::move(arr);
|
||||
j.set_parents();
|
||||
j.assert_invariant();
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename CompatibleArrayType,
|
||||
enable_if_t < !std::is_same<CompatibleArrayType, typename BasicJsonType::array_t>::value,
|
||||
int > = 0 >
|
||||
static void construct(BasicJsonType& j, const CompatibleArrayType& arr)
|
||||
{
|
||||
using std::begin;
|
||||
using std::end;
|
||||
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::array;
|
||||
j.m_data.m_value.array = j.template create<typename BasicJsonType::array_t>(begin(arr), end(arr));
|
||||
j.set_parents();
|
||||
j.assert_invariant();
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, const std::vector<bool>& arr)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::array;
|
||||
j.m_data.m_value = value_t::array;
|
||||
j.m_data.m_value.array->reserve(arr.size());
|
||||
for (const bool x : arr)
|
||||
{
|
||||
j.m_data.m_value.array->push_back(x);
|
||||
j.set_parent(j.m_data.m_value.array->back());
|
||||
}
|
||||
j.assert_invariant();
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename T,
|
||||
enable_if_t<std::is_convertible<T, BasicJsonType>::value, int> = 0>
|
||||
static void construct(BasicJsonType& j, const std::valarray<T>& arr)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::array;
|
||||
j.m_data.m_value = value_t::array;
|
||||
j.m_data.m_value.array->resize(arr.size());
|
||||
if (arr.size() > 0)
|
||||
{
|
||||
std::copy(std::begin(arr), std::end(arr), j.m_data.m_value.array->begin());
|
||||
}
|
||||
j.set_parents();
|
||||
j.assert_invariant();
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct external_constructor<value_t::object>
|
||||
{
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, const typename BasicJsonType::object_t& obj)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::object;
|
||||
j.m_data.m_value = obj;
|
||||
j.set_parents();
|
||||
j.assert_invariant();
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, typename BasicJsonType::object_t&& obj)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::object;
|
||||
j.m_data.m_value = std::move(obj);
|
||||
j.set_parents();
|
||||
j.assert_invariant();
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename CompatibleObjectType,
|
||||
enable_if_t < !std::is_same<CompatibleObjectType, typename BasicJsonType::object_t>::value, int > = 0 >
|
||||
static void construct(BasicJsonType& j, const CompatibleObjectType& obj)
|
||||
{
|
||||
using std::begin;
|
||||
using std::end;
|
||||
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::object;
|
||||
j.m_data.m_value.object = j.template create<typename BasicJsonType::object_t>(begin(obj), end(obj));
|
||||
j.set_parents();
|
||||
j.assert_invariant();
|
||||
}
|
||||
};
|
||||
|
||||
/////////////
|
||||
// to_json //
|
||||
/////////////
|
||||
|
||||
template<typename BasicJsonType, typename T,
|
||||
enable_if_t<std::is_same<T, typename BasicJsonType::boolean_t>::value, int> = 0>
|
||||
inline void to_json(BasicJsonType& j, T b) noexcept
|
||||
{
|
||||
external_constructor<value_t::boolean>::construct(j, b);
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename BoolRef,
|
||||
enable_if_t <
|
||||
((std::is_same<std::vector<bool>::reference, BoolRef>::value
|
||||
&& !std::is_same <std::vector<bool>::reference, typename BasicJsonType::boolean_t&>::value)
|
||||
|| (std::is_same<std::vector<bool>::const_reference, BoolRef>::value
|
||||
&& !std::is_same <detail::uncvref_t<std::vector<bool>::const_reference>,
|
||||
typename BasicJsonType::boolean_t >::value))
|
||||
&& std::is_convertible<const BoolRef&, typename BasicJsonType::boolean_t>::value, int > = 0 >
|
||||
inline void to_json(BasicJsonType& j, const BoolRef& b) noexcept
|
||||
{
|
||||
external_constructor<value_t::boolean>::construct(j, static_cast<typename BasicJsonType::boolean_t>(b));
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename CompatibleString,
|
||||
enable_if_t<std::is_constructible<typename BasicJsonType::string_t, CompatibleString>::value, int> = 0>
|
||||
inline void to_json(BasicJsonType& j, const CompatibleString& s)
|
||||
{
|
||||
external_constructor<value_t::string>::construct(j, s);
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void to_json(BasicJsonType& j, typename BasicJsonType::string_t&& s)
|
||||
{
|
||||
external_constructor<value_t::string>::construct(j, std::move(s));
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename FloatType,
|
||||
enable_if_t<std::is_floating_point<FloatType>::value, int> = 0>
|
||||
inline void to_json(BasicJsonType& j, FloatType val) noexcept
|
||||
{
|
||||
external_constructor<value_t::number_float>::construct(j, static_cast<typename BasicJsonType::number_float_t>(val));
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename CompatibleNumberUnsignedType,
|
||||
enable_if_t<is_compatible_integer_type<typename BasicJsonType::number_unsigned_t, CompatibleNumberUnsignedType>::value, int> = 0>
|
||||
inline void to_json(BasicJsonType& j, CompatibleNumberUnsignedType val) noexcept
|
||||
{
|
||||
external_constructor<value_t::number_unsigned>::construct(j, static_cast<typename BasicJsonType::number_unsigned_t>(val));
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename CompatibleNumberIntegerType,
|
||||
enable_if_t<is_compatible_integer_type<typename BasicJsonType::number_integer_t, CompatibleNumberIntegerType>::value, int> = 0>
|
||||
inline void to_json(BasicJsonType& j, CompatibleNumberIntegerType val) noexcept
|
||||
{
|
||||
external_constructor<value_t::number_integer>::construct(j, static_cast<typename BasicJsonType::number_integer_t>(val));
|
||||
}
|
||||
|
||||
#if !JSON_DISABLE_ENUM_SERIALIZATION
|
||||
template<typename BasicJsonType, typename EnumType,
|
||||
enable_if_t<std::is_enum<EnumType>::value, int> = 0>
|
||||
inline void to_json(BasicJsonType& j, EnumType e) noexcept
|
||||
{
|
||||
using underlying_type = typename std::underlying_type<EnumType>::type;
|
||||
static constexpr value_t integral_value_t = std::is_unsigned<underlying_type>::value ? value_t::number_unsigned : value_t::number_integer;
|
||||
external_constructor<integral_value_t>::construct(j, static_cast<underlying_type>(e));
|
||||
}
|
||||
#endif // JSON_DISABLE_ENUM_SERIALIZATION
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void to_json(BasicJsonType& j, const std::vector<bool>& e)
|
||||
{
|
||||
external_constructor<value_t::array>::construct(j, e);
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename CompatibleArrayType,
|
||||
enable_if_t < is_compatible_array_type<BasicJsonType,
|
||||
CompatibleArrayType>::value&&
|
||||
!is_compatible_object_type<BasicJsonType, CompatibleArrayType>::value&&
|
||||
!is_compatible_string_type<BasicJsonType, CompatibleArrayType>::value&&
|
||||
!std::is_same<typename BasicJsonType::binary_t, CompatibleArrayType>::value&&
|
||||
!is_basic_json<CompatibleArrayType>::value,
|
||||
int > = 0 >
|
||||
inline void to_json(BasicJsonType& j, const CompatibleArrayType& arr)
|
||||
{
|
||||
external_constructor<value_t::array>::construct(j, arr);
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void to_json(BasicJsonType& j, const typename BasicJsonType::binary_t& bin)
|
||||
{
|
||||
external_constructor<value_t::binary>::construct(j, bin);
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename T,
|
||||
enable_if_t<std::is_convertible<T, BasicJsonType>::value, int> = 0>
|
||||
inline void to_json(BasicJsonType& j, const std::valarray<T>& arr)
|
||||
{
|
||||
external_constructor<value_t::array>::construct(j, std::move(arr));
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void to_json(BasicJsonType& j, typename BasicJsonType::array_t&& arr)
|
||||
{
|
||||
external_constructor<value_t::array>::construct(j, std::move(arr));
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename CompatibleObjectType,
|
||||
enable_if_t < is_compatible_object_type<BasicJsonType, CompatibleObjectType>::value&& !is_basic_json<CompatibleObjectType>::value, int > = 0 >
|
||||
inline void to_json(BasicJsonType& j, const CompatibleObjectType& obj)
|
||||
{
|
||||
external_constructor<value_t::object>::construct(j, obj);
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void to_json(BasicJsonType& j, typename BasicJsonType::object_t&& obj)
|
||||
{
|
||||
external_constructor<value_t::object>::construct(j, std::move(obj));
|
||||
}
|
||||
|
||||
template <
|
||||
typename BasicJsonType, typename T, std::size_t N,
|
||||
enable_if_t < !std::is_constructible<typename BasicJsonType::string_t,
|
||||
const T(&)[N]>::value, // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays)
|
||||
int > = 0 >
|
||||
inline void to_json(BasicJsonType& j, const T(&arr)[N]) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays)
|
||||
{
|
||||
external_constructor<value_t::array>::construct(j, arr);
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename T1, typename T2, enable_if_t < std::is_constructible<BasicJsonType, T1>::value&& std::is_constructible<BasicJsonType, T2>::value, int > = 0 >
|
||||
inline void to_json(BasicJsonType& j, const std::pair<T1, T2>& p)
|
||||
{
|
||||
j = { p.first, p.second };
|
||||
}
|
||||
|
||||
// for https://github.com/nlohmann/json/pull/1134
|
||||
template<typename BasicJsonType, typename T,
|
||||
enable_if_t<std::is_same<T, iteration_proxy_value<typename BasicJsonType::iterator>>::value, int> = 0>
|
||||
inline void to_json(BasicJsonType& j, const T& b)
|
||||
{
|
||||
j = { {b.key(), b.value()} };
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename Tuple, std::size_t... Idx>
|
||||
inline void to_json_tuple_impl(BasicJsonType& j, const Tuple& t, index_sequence<Idx...> /*unused*/)
|
||||
{
|
||||
j = { std::get<Idx>(t)... };
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename T, enable_if_t<is_constructible_tuple<BasicJsonType, T>::value, int > = 0>
|
||||
inline void to_json(BasicJsonType& j, const T& t)
|
||||
{
|
||||
to_json_tuple_impl(j, t, make_index_sequence<std::tuple_size<T>::value> {});
|
||||
}
|
||||
|
||||
#if JSON_HAS_FILESYSTEM || JSON_HAS_EXPERIMENTAL_FILESYSTEM
|
||||
template<typename BasicJsonType>
|
||||
inline void to_json(BasicJsonType& j, const std_fs::path& p)
|
||||
{
|
||||
j = p.string();
|
||||
}
|
||||
#endif
|
||||
|
||||
struct to_json_fn
|
||||
{
|
||||
template<typename BasicJsonType, typename T>
|
||||
auto operator()(BasicJsonType& j, T&& val) const noexcept(noexcept(to_json(j, std::forward<T>(val))))
|
||||
-> decltype(to_json(j, std::forward<T>(val)), void())
|
||||
{
|
||||
return to_json(j, std::forward<T>(val));
|
||||
}
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
#ifndef JSON_HAS_CPP_17
|
||||
/// namespace to hold default `to_json` function
|
||||
/// to see why this is required:
|
||||
/// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4381.html
|
||||
namespace // NOLINT(cert-dcl59-cpp,fuchsia-header-anon-namespaces,google-build-namespaces)
|
||||
{
|
||||
#endif
|
||||
JSON_INLINE_VARIABLE constexpr const auto& to_json = // NOLINT(misc-definitions-in-headers)
|
||||
detail::static_const<detail::to_json_fn>::value;
|
||||
#ifndef JSON_HAS_CPP_17
|
||||
} // namespace
|
||||
#endif
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
@@ -1,257 +0,0 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef> // nullptr_t
|
||||
#include <exception> // exception
|
||||
#if JSON_DIAGNOSTICS
|
||||
#include <numeric> // accumulate
|
||||
#endif
|
||||
#include <stdexcept> // runtime_error
|
||||
#include <string> // to_string
|
||||
#include <vector> // vector
|
||||
|
||||
#include <nlohmann/detail/value_t.hpp>
|
||||
#include <nlohmann/detail/string_escape.hpp>
|
||||
#include <nlohmann/detail/input/position_t.hpp>
|
||||
#include <nlohmann/detail/macro_scope.hpp>
|
||||
#include <nlohmann/detail/meta/cpp_future.hpp>
|
||||
#include <nlohmann/detail/meta/type_traits.hpp>
|
||||
#include <nlohmann/detail/string_concat.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
////////////////
|
||||
// exceptions //
|
||||
////////////////
|
||||
|
||||
/// @brief general exception of the @ref basic_json class
|
||||
/// @sa https://json.nlohmann.me/api/basic_json/exception/
|
||||
class exception : public std::exception
|
||||
{
|
||||
public:
|
||||
/// returns the explanatory string
|
||||
const char* what() const noexcept override
|
||||
{
|
||||
return m.what();
|
||||
}
|
||||
|
||||
/// the id of the exception
|
||||
const int id; // NOLINT(cppcoreguidelines-non-private-member-variables-in-classes)
|
||||
|
||||
protected:
|
||||
JSON_HEDLEY_NON_NULL(3)
|
||||
exception(int id_, const char* what_arg) : id(id_), m(what_arg) {} // NOLINT(bugprone-throw-keyword-missing)
|
||||
|
||||
static std::string name(const std::string& ename, int id_)
|
||||
{
|
||||
return concat("[json.exception.", ename, '.', std::to_string(id_), "] ");
|
||||
}
|
||||
|
||||
static std::string diagnostics(std::nullptr_t /*leaf_element*/)
|
||||
{
|
||||
return "";
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
static std::string diagnostics(const BasicJsonType* leaf_element)
|
||||
{
|
||||
#if JSON_DIAGNOSTICS
|
||||
std::vector<std::string> tokens;
|
||||
for (const auto* current = leaf_element; current != nullptr && current->m_parent != nullptr; current = current->m_parent)
|
||||
{
|
||||
switch (current->m_parent->type())
|
||||
{
|
||||
case value_t::array:
|
||||
{
|
||||
for (std::size_t i = 0; i < current->m_parent->m_data.m_value.array->size(); ++i)
|
||||
{
|
||||
if (¤t->m_parent->m_data.m_value.array->operator[](i) == current)
|
||||
{
|
||||
tokens.emplace_back(std::to_string(i));
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::object:
|
||||
{
|
||||
for (const auto& element : *current->m_parent->m_data.m_value.object)
|
||||
{
|
||||
if (&element.second == current)
|
||||
{
|
||||
tokens.emplace_back(element.first.c_str());
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::null: // LCOV_EXCL_LINE
|
||||
case value_t::string: // LCOV_EXCL_LINE
|
||||
case value_t::boolean: // LCOV_EXCL_LINE
|
||||
case value_t::number_integer: // LCOV_EXCL_LINE
|
||||
case value_t::number_unsigned: // LCOV_EXCL_LINE
|
||||
case value_t::number_float: // LCOV_EXCL_LINE
|
||||
case value_t::binary: // LCOV_EXCL_LINE
|
||||
case value_t::discarded: // LCOV_EXCL_LINE
|
||||
default: // LCOV_EXCL_LINE
|
||||
break; // LCOV_EXCL_LINE
|
||||
}
|
||||
}
|
||||
|
||||
if (tokens.empty())
|
||||
{
|
||||
return "";
|
||||
}
|
||||
|
||||
auto str = std::accumulate(tokens.rbegin(), tokens.rend(), std::string{},
|
||||
[](const std::string & a, const std::string & b)
|
||||
{
|
||||
return concat(a, '/', detail::escape(b));
|
||||
});
|
||||
return concat('(', str, ") ");
|
||||
#else
|
||||
static_cast<void>(leaf_element);
|
||||
return "";
|
||||
#endif
|
||||
}
|
||||
|
||||
private:
|
||||
/// an exception object as storage for error messages
|
||||
std::runtime_error m;
|
||||
};
|
||||
|
||||
/// @brief exception indicating a parse error
|
||||
/// @sa https://json.nlohmann.me/api/basic_json/parse_error/
|
||||
class parse_error : public exception
|
||||
{
|
||||
public:
|
||||
/*!
|
||||
@brief create a parse error exception
|
||||
@param[in] id_ the id of the exception
|
||||
@param[in] pos the position where the error occurred (or with
|
||||
chars_read_total=0 if the position cannot be
|
||||
determined)
|
||||
@param[in] what_arg the explanatory string
|
||||
@return parse_error object
|
||||
*/
|
||||
template<typename BasicJsonContext, enable_if_t<is_basic_json_context<BasicJsonContext>::value, int> = 0>
|
||||
static parse_error create(int id_, const position_t& pos, const std::string& what_arg, BasicJsonContext context)
|
||||
{
|
||||
const std::string w = concat(exception::name("parse_error", id_), "parse error",
|
||||
position_string(pos), ": ", exception::diagnostics(context), what_arg);
|
||||
return {id_, pos.chars_read_total, w.c_str()};
|
||||
}
|
||||
|
||||
template<typename BasicJsonContext, enable_if_t<is_basic_json_context<BasicJsonContext>::value, int> = 0>
|
||||
static parse_error create(int id_, std::size_t byte_, const std::string& what_arg, BasicJsonContext context)
|
||||
{
|
||||
const std::string w = concat(exception::name("parse_error", id_), "parse error",
|
||||
(byte_ != 0 ? (concat(" at byte ", std::to_string(byte_))) : ""),
|
||||
": ", exception::diagnostics(context), what_arg);
|
||||
return {id_, byte_, w.c_str()};
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief byte index of the parse error
|
||||
|
||||
The byte index of the last read character in the input file.
|
||||
|
||||
@note For an input with n bytes, 1 is the index of the first character and
|
||||
n+1 is the index of the terminating null byte or the end of file.
|
||||
This also holds true when reading a byte vector (CBOR or MessagePack).
|
||||
*/
|
||||
const std::size_t byte;
|
||||
|
||||
private:
|
||||
parse_error(int id_, std::size_t byte_, const char* what_arg)
|
||||
: exception(id_, what_arg), byte(byte_) {}
|
||||
|
||||
static std::string position_string(const position_t& pos)
|
||||
{
|
||||
return concat(" at line ", std::to_string(pos.lines_read + 1),
|
||||
", column ", std::to_string(pos.chars_read_current_line));
|
||||
}
|
||||
};
|
||||
|
||||
/// @brief exception indicating errors with iterators
|
||||
/// @sa https://json.nlohmann.me/api/basic_json/invalid_iterator/
|
||||
class invalid_iterator : public exception
|
||||
{
|
||||
public:
|
||||
template<typename BasicJsonContext, enable_if_t<is_basic_json_context<BasicJsonContext>::value, int> = 0>
|
||||
static invalid_iterator create(int id_, const std::string& what_arg, BasicJsonContext context)
|
||||
{
|
||||
const std::string w = concat(exception::name("invalid_iterator", id_), exception::diagnostics(context), what_arg);
|
||||
return {id_, w.c_str()};
|
||||
}
|
||||
|
||||
private:
|
||||
JSON_HEDLEY_NON_NULL(3)
|
||||
invalid_iterator(int id_, const char* what_arg)
|
||||
: exception(id_, what_arg) {}
|
||||
};
|
||||
|
||||
/// @brief exception indicating executing a member function with a wrong type
|
||||
/// @sa https://json.nlohmann.me/api/basic_json/type_error/
|
||||
class type_error : public exception
|
||||
{
|
||||
public:
|
||||
template<typename BasicJsonContext, enable_if_t<is_basic_json_context<BasicJsonContext>::value, int> = 0>
|
||||
static type_error create(int id_, const std::string& what_arg, BasicJsonContext context)
|
||||
{
|
||||
const std::string w = concat(exception::name("type_error", id_), exception::diagnostics(context), what_arg);
|
||||
return {id_, w.c_str()};
|
||||
}
|
||||
|
||||
private:
|
||||
JSON_HEDLEY_NON_NULL(3)
|
||||
type_error(int id_, const char* what_arg) : exception(id_, what_arg) {}
|
||||
};
|
||||
|
||||
/// @brief exception indicating access out of the defined range
|
||||
/// @sa https://json.nlohmann.me/api/basic_json/out_of_range/
|
||||
class out_of_range : public exception
|
||||
{
|
||||
public:
|
||||
template<typename BasicJsonContext, enable_if_t<is_basic_json_context<BasicJsonContext>::value, int> = 0>
|
||||
static out_of_range create(int id_, const std::string& what_arg, BasicJsonContext context)
|
||||
{
|
||||
const std::string w = concat(exception::name("out_of_range", id_), exception::diagnostics(context), what_arg);
|
||||
return {id_, w.c_str()};
|
||||
}
|
||||
|
||||
private:
|
||||
JSON_HEDLEY_NON_NULL(3)
|
||||
out_of_range(int id_, const char* what_arg) : exception(id_, what_arg) {}
|
||||
};
|
||||
|
||||
/// @brief exception indicating other library errors
|
||||
/// @sa https://json.nlohmann.me/api/basic_json/other_error/
|
||||
class other_error : public exception
|
||||
{
|
||||
public:
|
||||
template<typename BasicJsonContext, enable_if_t<is_basic_json_context<BasicJsonContext>::value, int> = 0>
|
||||
static other_error create(int id_, const std::string& what_arg, BasicJsonContext context)
|
||||
{
|
||||
const std::string w = concat(exception::name("other_error", id_), exception::diagnostics(context), what_arg);
|
||||
return {id_, w.c_str()};
|
||||
}
|
||||
|
||||
private:
|
||||
JSON_HEDLEY_NON_NULL(3)
|
||||
other_error(int id_, const char* what_arg) : exception(id_, what_arg) {}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
@@ -1,129 +0,0 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint> // uint8_t
|
||||
#include <cstddef> // size_t
|
||||
#include <functional> // hash
|
||||
|
||||
#include <nlohmann/detail/abi_macros.hpp>
|
||||
#include <nlohmann/detail/value_t.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
// boost::hash_combine
|
||||
inline std::size_t combine(std::size_t seed, std::size_t h) noexcept
|
||||
{
|
||||
seed ^= h + 0x9e3779b9 + (seed << 6U) + (seed >> 2U);
|
||||
return seed;
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief hash a JSON value
|
||||
|
||||
The hash function tries to rely on std::hash where possible. Furthermore, the
|
||||
type of the JSON value is taken into account to have different hash values for
|
||||
null, 0, 0U, and false, etc.
|
||||
|
||||
@tparam BasicJsonType basic_json specialization
|
||||
@param j JSON value to hash
|
||||
@return hash value of j
|
||||
*/
|
||||
template<typename BasicJsonType>
|
||||
std::size_t hash(const BasicJsonType& j)
|
||||
{
|
||||
using string_t = typename BasicJsonType::string_t;
|
||||
using number_integer_t = typename BasicJsonType::number_integer_t;
|
||||
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
|
||||
using number_float_t = typename BasicJsonType::number_float_t;
|
||||
|
||||
const auto type = static_cast<std::size_t>(j.type());
|
||||
switch (j.type())
|
||||
{
|
||||
case BasicJsonType::value_t::null:
|
||||
case BasicJsonType::value_t::discarded:
|
||||
{
|
||||
return combine(type, 0);
|
||||
}
|
||||
|
||||
case BasicJsonType::value_t::object:
|
||||
{
|
||||
auto seed = combine(type, j.size());
|
||||
for (const auto& element : j.items())
|
||||
{
|
||||
const auto h = std::hash<string_t> {}(element.key());
|
||||
seed = combine(seed, h);
|
||||
seed = combine(seed, hash(element.value()));
|
||||
}
|
||||
return seed;
|
||||
}
|
||||
|
||||
case BasicJsonType::value_t::array:
|
||||
{
|
||||
auto seed = combine(type, j.size());
|
||||
for (const auto& element : j)
|
||||
{
|
||||
seed = combine(seed, hash(element));
|
||||
}
|
||||
return seed;
|
||||
}
|
||||
|
||||
case BasicJsonType::value_t::string:
|
||||
{
|
||||
const auto h = std::hash<string_t> {}(j.template get_ref<const string_t&>());
|
||||
return combine(type, h);
|
||||
}
|
||||
|
||||
case BasicJsonType::value_t::boolean:
|
||||
{
|
||||
const auto h = std::hash<bool> {}(j.template get<bool>());
|
||||
return combine(type, h);
|
||||
}
|
||||
|
||||
case BasicJsonType::value_t::number_integer:
|
||||
{
|
||||
const auto h = std::hash<number_integer_t> {}(j.template get<number_integer_t>());
|
||||
return combine(type, h);
|
||||
}
|
||||
|
||||
case BasicJsonType::value_t::number_unsigned:
|
||||
{
|
||||
const auto h = std::hash<number_unsigned_t> {}(j.template get<number_unsigned_t>());
|
||||
return combine(type, h);
|
||||
}
|
||||
|
||||
case BasicJsonType::value_t::number_float:
|
||||
{
|
||||
const auto h = std::hash<number_float_t> {}(j.template get<number_float_t>());
|
||||
return combine(type, h);
|
||||
}
|
||||
|
||||
case BasicJsonType::value_t::binary:
|
||||
{
|
||||
auto seed = combine(type, j.get_binary().size());
|
||||
const auto h = std::hash<bool> {}(j.get_binary().has_subtype());
|
||||
seed = combine(seed, h);
|
||||
seed = combine(seed, static_cast<std::size_t>(j.get_binary().subtype()));
|
||||
for (const auto byte : j.get_binary())
|
||||
{
|
||||
seed = combine(seed, std::hash<std::uint8_t> {}(byte));
|
||||
}
|
||||
return seed;
|
||||
}
|
||||
|
||||
default: // LCOV_EXCL_LINE
|
||||
JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE
|
||||
return 0; // LCOV_EXCL_LINE
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
File diff suppressed because it is too large
Load Diff
@@ -1,492 +0,0 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array> // array
|
||||
#include <cstddef> // size_t
|
||||
#include <cstring> // strlen
|
||||
#include <iterator> // begin, end, iterator_traits, random_access_iterator_tag, distance, next
|
||||
#include <memory> // shared_ptr, make_shared, addressof
|
||||
#include <numeric> // accumulate
|
||||
#include <string> // string, char_traits
|
||||
#include <type_traits> // enable_if, is_base_of, is_pointer, is_integral, remove_pointer
|
||||
#include <utility> // pair, declval
|
||||
|
||||
#ifndef JSON_NO_IO
|
||||
#include <cstdio> // FILE *
|
||||
#include <istream> // istream
|
||||
#endif // JSON_NO_IO
|
||||
|
||||
#include <nlohmann/detail/iterators/iterator_traits.hpp>
|
||||
#include <nlohmann/detail/macro_scope.hpp>
|
||||
#include <nlohmann/detail/meta/type_traits.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
/// the supported input formats
|
||||
enum class input_format_t { json, cbor, msgpack, ubjson, bson, bjdata };
|
||||
|
||||
////////////////////
|
||||
// input adapters //
|
||||
////////////////////
|
||||
|
||||
#ifndef JSON_NO_IO
|
||||
/*!
|
||||
Input adapter for stdio file access. This adapter read only 1 byte and do not use any
|
||||
buffer. This adapter is a very low level adapter.
|
||||
*/
|
||||
class file_input_adapter
|
||||
{
|
||||
public:
|
||||
using char_type = char;
|
||||
|
||||
JSON_HEDLEY_NON_NULL(2)
|
||||
explicit file_input_adapter(std::FILE* f) noexcept
|
||||
: m_file(f)
|
||||
{
|
||||
JSON_ASSERT(m_file != nullptr);
|
||||
}
|
||||
|
||||
// make class move-only
|
||||
file_input_adapter(const file_input_adapter&) = delete;
|
||||
file_input_adapter(file_input_adapter&&) noexcept = default;
|
||||
file_input_adapter& operator=(const file_input_adapter&) = delete;
|
||||
file_input_adapter& operator=(file_input_adapter&&) = delete;
|
||||
~file_input_adapter() = default;
|
||||
|
||||
std::char_traits<char>::int_type get_character() noexcept
|
||||
{
|
||||
return std::fgetc(m_file);
|
||||
}
|
||||
|
||||
private:
|
||||
/// the file pointer to read from
|
||||
std::FILE* m_file;
|
||||
};
|
||||
|
||||
/*!
|
||||
Input adapter for a (caching) istream. Ignores a UFT Byte Order Mark at
|
||||
beginning of input. Does not support changing the underlying std::streambuf
|
||||
in mid-input. Maintains underlying std::istream and std::streambuf to support
|
||||
subsequent use of standard std::istream operations to process any input
|
||||
characters following those used in parsing the JSON input. Clears the
|
||||
std::istream flags; any input errors (e.g., EOF) will be detected by the first
|
||||
subsequent call for input from the std::istream.
|
||||
*/
|
||||
class input_stream_adapter
|
||||
{
|
||||
public:
|
||||
using char_type = char;
|
||||
|
||||
~input_stream_adapter()
|
||||
{
|
||||
// clear stream flags; we use underlying streambuf I/O, do not
|
||||
// maintain ifstream flags, except eof
|
||||
if (is != nullptr)
|
||||
{
|
||||
is->clear(is->rdstate() & std::ios::eofbit);
|
||||
}
|
||||
}
|
||||
|
||||
explicit input_stream_adapter(std::istream& i)
|
||||
: is(&i), sb(i.rdbuf())
|
||||
{}
|
||||
|
||||
// delete because of pointer members
|
||||
input_stream_adapter(const input_stream_adapter&) = delete;
|
||||
input_stream_adapter& operator=(input_stream_adapter&) = delete;
|
||||
input_stream_adapter& operator=(input_stream_adapter&&) = delete;
|
||||
|
||||
input_stream_adapter(input_stream_adapter&& rhs) noexcept
|
||||
: is(rhs.is), sb(rhs.sb)
|
||||
{
|
||||
rhs.is = nullptr;
|
||||
rhs.sb = nullptr;
|
||||
}
|
||||
|
||||
// std::istream/std::streambuf use std::char_traits<char>::to_int_type, to
|
||||
// ensure that std::char_traits<char>::eof() and the character 0xFF do not
|
||||
// end up as the same value, e.g. 0xFFFFFFFF.
|
||||
std::char_traits<char>::int_type get_character()
|
||||
{
|
||||
auto res = sb->sbumpc();
|
||||
// set eof manually, as we don't use the istream interface.
|
||||
if (JSON_HEDLEY_UNLIKELY(res == std::char_traits<char>::eof()))
|
||||
{
|
||||
is->clear(is->rdstate() | std::ios::eofbit);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
private:
|
||||
/// the associated input stream
|
||||
std::istream* is = nullptr;
|
||||
std::streambuf* sb = nullptr;
|
||||
};
|
||||
#endif // JSON_NO_IO
|
||||
|
||||
// General-purpose iterator-based adapter. It might not be as fast as
|
||||
// theoretically possible for some containers, but it is extremely versatile.
|
||||
template<typename IteratorType>
|
||||
class iterator_input_adapter
|
||||
{
|
||||
public:
|
||||
using char_type = typename std::iterator_traits<IteratorType>::value_type;
|
||||
|
||||
iterator_input_adapter(IteratorType first, IteratorType last)
|
||||
: current(std::move(first)), end(std::move(last))
|
||||
{}
|
||||
|
||||
typename char_traits<char_type>::int_type get_character()
|
||||
{
|
||||
if (JSON_HEDLEY_LIKELY(current != end))
|
||||
{
|
||||
auto result = char_traits<char_type>::to_int_type(*current);
|
||||
std::advance(current, 1);
|
||||
return result;
|
||||
}
|
||||
|
||||
return char_traits<char_type>::eof();
|
||||
}
|
||||
|
||||
private:
|
||||
IteratorType current;
|
||||
IteratorType end;
|
||||
|
||||
template<typename BaseInputAdapter, size_t T>
|
||||
friend struct wide_string_input_helper;
|
||||
|
||||
bool empty() const
|
||||
{
|
||||
return current == end;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename BaseInputAdapter, size_t T>
|
||||
struct wide_string_input_helper;
|
||||
|
||||
template<typename BaseInputAdapter>
|
||||
struct wide_string_input_helper<BaseInputAdapter, 4>
|
||||
{
|
||||
// UTF-32
|
||||
static void fill_buffer(BaseInputAdapter& input,
|
||||
std::array<std::char_traits<char>::int_type, 4>& utf8_bytes,
|
||||
size_t& utf8_bytes_index,
|
||||
size_t& utf8_bytes_filled)
|
||||
{
|
||||
utf8_bytes_index = 0;
|
||||
|
||||
if (JSON_HEDLEY_UNLIKELY(input.empty()))
|
||||
{
|
||||
utf8_bytes[0] = std::char_traits<char>::eof();
|
||||
utf8_bytes_filled = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
// get the current character
|
||||
const auto wc = input.get_character();
|
||||
|
||||
// UTF-32 to UTF-8 encoding
|
||||
if (wc < 0x80)
|
||||
{
|
||||
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(wc);
|
||||
utf8_bytes_filled = 1;
|
||||
}
|
||||
else if (wc <= 0x7FF)
|
||||
{
|
||||
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xC0u | ((static_cast<unsigned int>(wc) >> 6u) & 0x1Fu));
|
||||
utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
|
||||
utf8_bytes_filled = 2;
|
||||
}
|
||||
else if (wc <= 0xFFFF)
|
||||
{
|
||||
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xE0u | ((static_cast<unsigned int>(wc) >> 12u) & 0x0Fu));
|
||||
utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | ((static_cast<unsigned int>(wc) >> 6u) & 0x3Fu));
|
||||
utf8_bytes[2] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
|
||||
utf8_bytes_filled = 3;
|
||||
}
|
||||
else if (wc <= 0x10FFFF)
|
||||
{
|
||||
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xF0u | ((static_cast<unsigned int>(wc) >> 18u) & 0x07u));
|
||||
utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | ((static_cast<unsigned int>(wc) >> 12u) & 0x3Fu));
|
||||
utf8_bytes[2] = static_cast<std::char_traits<char>::int_type>(0x80u | ((static_cast<unsigned int>(wc) >> 6u) & 0x3Fu));
|
||||
utf8_bytes[3] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
|
||||
utf8_bytes_filled = 4;
|
||||
}
|
||||
else
|
||||
{
|
||||
// unknown character
|
||||
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(wc);
|
||||
utf8_bytes_filled = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template<typename BaseInputAdapter>
|
||||
struct wide_string_input_helper<BaseInputAdapter, 2>
|
||||
{
|
||||
// UTF-16
|
||||
static void fill_buffer(BaseInputAdapter& input,
|
||||
std::array<std::char_traits<char>::int_type, 4>& utf8_bytes,
|
||||
size_t& utf8_bytes_index,
|
||||
size_t& utf8_bytes_filled)
|
||||
{
|
||||
utf8_bytes_index = 0;
|
||||
|
||||
if (JSON_HEDLEY_UNLIKELY(input.empty()))
|
||||
{
|
||||
utf8_bytes[0] = std::char_traits<char>::eof();
|
||||
utf8_bytes_filled = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
// get the current character
|
||||
const auto wc = input.get_character();
|
||||
|
||||
// UTF-16 to UTF-8 encoding
|
||||
if (wc < 0x80)
|
||||
{
|
||||
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(wc);
|
||||
utf8_bytes_filled = 1;
|
||||
}
|
||||
else if (wc <= 0x7FF)
|
||||
{
|
||||
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xC0u | ((static_cast<unsigned int>(wc) >> 6u)));
|
||||
utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
|
||||
utf8_bytes_filled = 2;
|
||||
}
|
||||
else if (0xD800 > wc || wc >= 0xE000)
|
||||
{
|
||||
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xE0u | ((static_cast<unsigned int>(wc) >> 12u)));
|
||||
utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | ((static_cast<unsigned int>(wc) >> 6u) & 0x3Fu));
|
||||
utf8_bytes[2] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
|
||||
utf8_bytes_filled = 3;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!input.empty()))
|
||||
{
|
||||
const auto wc2 = static_cast<unsigned int>(input.get_character());
|
||||
const auto charcode = 0x10000u + (((static_cast<unsigned int>(wc) & 0x3FFu) << 10u) | (wc2 & 0x3FFu));
|
||||
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xF0u | (charcode >> 18u));
|
||||
utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | ((charcode >> 12u) & 0x3Fu));
|
||||
utf8_bytes[2] = static_cast<std::char_traits<char>::int_type>(0x80u | ((charcode >> 6u) & 0x3Fu));
|
||||
utf8_bytes[3] = static_cast<std::char_traits<char>::int_type>(0x80u | (charcode & 0x3Fu));
|
||||
utf8_bytes_filled = 4;
|
||||
}
|
||||
else
|
||||
{
|
||||
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(wc);
|
||||
utf8_bytes_filled = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Wraps another input adapter to convert wide character types into individual bytes.
|
||||
template<typename BaseInputAdapter, typename WideCharType>
|
||||
class wide_string_input_adapter
|
||||
{
|
||||
public:
|
||||
using char_type = char;
|
||||
|
||||
wide_string_input_adapter(BaseInputAdapter base)
|
||||
: base_adapter(base) {}
|
||||
|
||||
typename std::char_traits<char>::int_type get_character() noexcept
|
||||
{
|
||||
// check if buffer needs to be filled
|
||||
if (utf8_bytes_index == utf8_bytes_filled)
|
||||
{
|
||||
fill_buffer<sizeof(WideCharType)>();
|
||||
|
||||
JSON_ASSERT(utf8_bytes_filled > 0);
|
||||
JSON_ASSERT(utf8_bytes_index == 0);
|
||||
}
|
||||
|
||||
// use buffer
|
||||
JSON_ASSERT(utf8_bytes_filled > 0);
|
||||
JSON_ASSERT(utf8_bytes_index < utf8_bytes_filled);
|
||||
return utf8_bytes[utf8_bytes_index++];
|
||||
}
|
||||
|
||||
private:
|
||||
BaseInputAdapter base_adapter;
|
||||
|
||||
template<size_t T>
|
||||
void fill_buffer()
|
||||
{
|
||||
wide_string_input_helper<BaseInputAdapter, T>::fill_buffer(base_adapter, utf8_bytes, utf8_bytes_index, utf8_bytes_filled);
|
||||
}
|
||||
|
||||
/// a buffer for UTF-8 bytes
|
||||
std::array<std::char_traits<char>::int_type, 4> utf8_bytes = {{0, 0, 0, 0}};
|
||||
|
||||
/// index to the utf8_codes array for the next valid byte
|
||||
std::size_t utf8_bytes_index = 0;
|
||||
/// number of valid bytes in the utf8_codes array
|
||||
std::size_t utf8_bytes_filled = 0;
|
||||
};
|
||||
|
||||
template<typename IteratorType, typename Enable = void>
|
||||
struct iterator_input_adapter_factory
|
||||
{
|
||||
using iterator_type = IteratorType;
|
||||
using char_type = typename std::iterator_traits<iterator_type>::value_type;
|
||||
using adapter_type = iterator_input_adapter<iterator_type>;
|
||||
|
||||
static adapter_type create(IteratorType first, IteratorType last)
|
||||
{
|
||||
return adapter_type(std::move(first), std::move(last));
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct is_iterator_of_multibyte
|
||||
{
|
||||
using value_type = typename std::iterator_traits<T>::value_type;
|
||||
enum
|
||||
{
|
||||
value = sizeof(value_type) > 1
|
||||
};
|
||||
};
|
||||
|
||||
template<typename IteratorType>
|
||||
struct iterator_input_adapter_factory<IteratorType, enable_if_t<is_iterator_of_multibyte<IteratorType>::value>>
|
||||
{
|
||||
using iterator_type = IteratorType;
|
||||
using char_type = typename std::iterator_traits<iterator_type>::value_type;
|
||||
using base_adapter_type = iterator_input_adapter<iterator_type>;
|
||||
using adapter_type = wide_string_input_adapter<base_adapter_type, char_type>;
|
||||
|
||||
static adapter_type create(IteratorType first, IteratorType last)
|
||||
{
|
||||
return adapter_type(base_adapter_type(std::move(first), std::move(last)));
|
||||
}
|
||||
};
|
||||
|
||||
// General purpose iterator-based input
|
||||
template<typename IteratorType>
|
||||
typename iterator_input_adapter_factory<IteratorType>::adapter_type input_adapter(IteratorType first, IteratorType last)
|
||||
{
|
||||
using factory_type = iterator_input_adapter_factory<IteratorType>;
|
||||
return factory_type::create(first, last);
|
||||
}
|
||||
|
||||
// Convenience shorthand from container to iterator
|
||||
// Enables ADL on begin(container) and end(container)
|
||||
// Encloses the using declarations in namespace for not to leak them to outside scope
|
||||
|
||||
namespace container_input_adapter_factory_impl
|
||||
{
|
||||
|
||||
using std::begin;
|
||||
using std::end;
|
||||
|
||||
template<typename ContainerType, typename Enable = void>
|
||||
struct container_input_adapter_factory {};
|
||||
|
||||
template<typename ContainerType>
|
||||
struct container_input_adapter_factory< ContainerType,
|
||||
void_t<decltype(begin(std::declval<ContainerType>()), end(std::declval<ContainerType>()))>>
|
||||
{
|
||||
using adapter_type = decltype(input_adapter(begin(std::declval<ContainerType>()), end(std::declval<ContainerType>())));
|
||||
|
||||
static adapter_type create(const ContainerType& container)
|
||||
{
|
||||
return input_adapter(begin(container), end(container));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace container_input_adapter_factory_impl
|
||||
|
||||
template<typename ContainerType>
|
||||
typename container_input_adapter_factory_impl::container_input_adapter_factory<ContainerType>::adapter_type input_adapter(const ContainerType& container)
|
||||
{
|
||||
return container_input_adapter_factory_impl::container_input_adapter_factory<ContainerType>::create(container);
|
||||
}
|
||||
|
||||
#ifndef JSON_NO_IO
|
||||
// Special cases with fast paths
|
||||
inline file_input_adapter input_adapter(std::FILE* file)
|
||||
{
|
||||
return file_input_adapter(file);
|
||||
}
|
||||
|
||||
inline input_stream_adapter input_adapter(std::istream& stream)
|
||||
{
|
||||
return input_stream_adapter(stream);
|
||||
}
|
||||
|
||||
inline input_stream_adapter input_adapter(std::istream&& stream)
|
||||
{
|
||||
return input_stream_adapter(stream);
|
||||
}
|
||||
#endif // JSON_NO_IO
|
||||
|
||||
using contiguous_bytes_input_adapter = decltype(input_adapter(std::declval<const char*>(), std::declval<const char*>()));
|
||||
|
||||
// Null-delimited strings, and the like.
|
||||
template < typename CharT,
|
||||
typename std::enable_if <
|
||||
std::is_pointer<CharT>::value&&
|
||||
!std::is_array<CharT>::value&&
|
||||
std::is_integral<typename std::remove_pointer<CharT>::type>::value&&
|
||||
sizeof(typename std::remove_pointer<CharT>::type) == 1,
|
||||
int >::type = 0 >
|
||||
contiguous_bytes_input_adapter input_adapter(CharT b)
|
||||
{
|
||||
auto length = std::strlen(reinterpret_cast<const char*>(b));
|
||||
const auto* ptr = reinterpret_cast<const char*>(b);
|
||||
return input_adapter(ptr, ptr + length);
|
||||
}
|
||||
|
||||
template<typename T, std::size_t N>
|
||||
auto input_adapter(T (&array)[N]) -> decltype(input_adapter(array, array + N)) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays)
|
||||
{
|
||||
return input_adapter(array, array + N);
|
||||
}
|
||||
|
||||
// This class only handles inputs of input_buffer_adapter type.
|
||||
// It's required so that expressions like {ptr, len} can be implicitly cast
|
||||
// to the correct adapter.
|
||||
class span_input_adapter
|
||||
{
|
||||
public:
|
||||
template < typename CharT,
|
||||
typename std::enable_if <
|
||||
std::is_pointer<CharT>::value&&
|
||||
std::is_integral<typename std::remove_pointer<CharT>::type>::value&&
|
||||
sizeof(typename std::remove_pointer<CharT>::type) == 1,
|
||||
int >::type = 0 >
|
||||
span_input_adapter(CharT b, std::size_t l)
|
||||
: ia(reinterpret_cast<const char*>(b), reinterpret_cast<const char*>(b) + l) {}
|
||||
|
||||
template<class IteratorType,
|
||||
typename std::enable_if<
|
||||
std::is_same<typename iterator_traits<IteratorType>::iterator_category, std::random_access_iterator_tag>::value,
|
||||
int>::type = 0>
|
||||
span_input_adapter(IteratorType first, IteratorType last)
|
||||
: ia(input_adapter(first, last)) {}
|
||||
|
||||
contiguous_bytes_input_adapter&& get()
|
||||
{
|
||||
return std::move(ia); // NOLINT(hicpp-move-const-arg,performance-move-const-arg)
|
||||
}
|
||||
|
||||
private:
|
||||
contiguous_bytes_input_adapter ia;
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
@@ -1,727 +0,0 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <string> // string
|
||||
#include <utility> // move
|
||||
#include <vector> // vector
|
||||
|
||||
#include <nlohmann/detail/exceptions.hpp>
|
||||
#include <nlohmann/detail/macro_scope.hpp>
|
||||
#include <nlohmann/detail/string_concat.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
|
||||
/*!
|
||||
@brief SAX interface
|
||||
|
||||
This class describes the SAX interface used by @ref nlohmann::json::sax_parse.
|
||||
Each function is called in different situations while the input is parsed. The
|
||||
boolean return value informs the parser whether to continue processing the
|
||||
input.
|
||||
*/
|
||||
template<typename BasicJsonType>
|
||||
struct json_sax
|
||||
{
|
||||
using number_integer_t = typename BasicJsonType::number_integer_t;
|
||||
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
|
||||
using number_float_t = typename BasicJsonType::number_float_t;
|
||||
using string_t = typename BasicJsonType::string_t;
|
||||
using binary_t = typename BasicJsonType::binary_t;
|
||||
|
||||
/*!
|
||||
@brief a null value was read
|
||||
@return whether parsing should proceed
|
||||
*/
|
||||
virtual bool null() = 0;
|
||||
|
||||
/*!
|
||||
@brief a boolean value was read
|
||||
@param[in] val boolean value
|
||||
@return whether parsing should proceed
|
||||
*/
|
||||
virtual bool boolean(bool val) = 0;
|
||||
|
||||
/*!
|
||||
@brief an integer number was read
|
||||
@param[in] val integer value
|
||||
@return whether parsing should proceed
|
||||
*/
|
||||
virtual bool number_integer(number_integer_t val) = 0;
|
||||
|
||||
/*!
|
||||
@brief an unsigned integer number was read
|
||||
@param[in] val unsigned integer value
|
||||
@return whether parsing should proceed
|
||||
*/
|
||||
virtual bool number_unsigned(number_unsigned_t val) = 0;
|
||||
|
||||
/*!
|
||||
@brief a floating-point number was read
|
||||
@param[in] val floating-point value
|
||||
@param[in] s raw token value
|
||||
@return whether parsing should proceed
|
||||
*/
|
||||
virtual bool number_float(number_float_t val, const string_t& s) = 0;
|
||||
|
||||
/*!
|
||||
@brief a string value was read
|
||||
@param[in] val string value
|
||||
@return whether parsing should proceed
|
||||
@note It is safe to move the passed string value.
|
||||
*/
|
||||
virtual bool string(string_t& val) = 0;
|
||||
|
||||
/*!
|
||||
@brief a binary value was read
|
||||
@param[in] val binary value
|
||||
@return whether parsing should proceed
|
||||
@note It is safe to move the passed binary value.
|
||||
*/
|
||||
virtual bool binary(binary_t& val) = 0;
|
||||
|
||||
/*!
|
||||
@brief the beginning of an object was read
|
||||
@param[in] elements number of object elements or -1 if unknown
|
||||
@return whether parsing should proceed
|
||||
@note binary formats may report the number of elements
|
||||
*/
|
||||
virtual bool start_object(std::size_t elements) = 0;
|
||||
|
||||
/*!
|
||||
@brief an object key was read
|
||||
@param[in] val object key
|
||||
@return whether parsing should proceed
|
||||
@note It is safe to move the passed string.
|
||||
*/
|
||||
virtual bool key(string_t& val) = 0;
|
||||
|
||||
/*!
|
||||
@brief the end of an object was read
|
||||
@return whether parsing should proceed
|
||||
*/
|
||||
virtual bool end_object() = 0;
|
||||
|
||||
/*!
|
||||
@brief the beginning of an array was read
|
||||
@param[in] elements number of array elements or -1 if unknown
|
||||
@return whether parsing should proceed
|
||||
@note binary formats may report the number of elements
|
||||
*/
|
||||
virtual bool start_array(std::size_t elements) = 0;
|
||||
|
||||
/*!
|
||||
@brief the end of an array was read
|
||||
@return whether parsing should proceed
|
||||
*/
|
||||
virtual bool end_array() = 0;
|
||||
|
||||
/*!
|
||||
@brief a parse error occurred
|
||||
@param[in] position the position in the input where the error occurs
|
||||
@param[in] last_token the last read token
|
||||
@param[in] ex an exception object describing the error
|
||||
@return whether parsing should proceed (must return false)
|
||||
*/
|
||||
virtual bool parse_error(std::size_t position,
|
||||
const std::string& last_token,
|
||||
const detail::exception& ex) = 0;
|
||||
|
||||
json_sax() = default;
|
||||
json_sax(const json_sax&) = default;
|
||||
json_sax(json_sax&&) noexcept = default;
|
||||
json_sax& operator=(const json_sax&) = default;
|
||||
json_sax& operator=(json_sax&&) noexcept = default;
|
||||
virtual ~json_sax() = default;
|
||||
};
|
||||
|
||||
namespace detail
|
||||
{
|
||||
/*!
|
||||
@brief SAX implementation to create a JSON value from SAX events
|
||||
|
||||
This class implements the @ref json_sax interface and processes the SAX events
|
||||
to create a JSON value which makes it basically a DOM parser. The structure or
|
||||
hierarchy of the JSON value is managed by the stack `ref_stack` which contains
|
||||
a pointer to the respective array or object for each recursion depth.
|
||||
|
||||
After successful parsing, the value that is passed by reference to the
|
||||
constructor contains the parsed value.
|
||||
|
||||
@tparam BasicJsonType the JSON type
|
||||
*/
|
||||
template<typename BasicJsonType>
|
||||
class json_sax_dom_parser
|
||||
{
|
||||
public:
|
||||
using number_integer_t = typename BasicJsonType::number_integer_t;
|
||||
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
|
||||
using number_float_t = typename BasicJsonType::number_float_t;
|
||||
using string_t = typename BasicJsonType::string_t;
|
||||
using binary_t = typename BasicJsonType::binary_t;
|
||||
|
||||
/*!
|
||||
@param[in,out] r reference to a JSON value that is manipulated while
|
||||
parsing
|
||||
@param[in] allow_exceptions_ whether parse errors yield exceptions
|
||||
*/
|
||||
explicit json_sax_dom_parser(BasicJsonType& r, const bool allow_exceptions_ = true)
|
||||
: root(r), allow_exceptions(allow_exceptions_)
|
||||
{}
|
||||
|
||||
// make class move-only
|
||||
json_sax_dom_parser(const json_sax_dom_parser&) = delete;
|
||||
json_sax_dom_parser(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor)
|
||||
json_sax_dom_parser& operator=(const json_sax_dom_parser&) = delete;
|
||||
json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor)
|
||||
~json_sax_dom_parser() = default;
|
||||
|
||||
bool null()
|
||||
{
|
||||
handle_value(nullptr);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool boolean(bool val)
|
||||
{
|
||||
handle_value(val);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool number_integer(number_integer_t val)
|
||||
{
|
||||
handle_value(val);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool number_unsigned(number_unsigned_t val)
|
||||
{
|
||||
handle_value(val);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool number_float(number_float_t val, const string_t& /*unused*/)
|
||||
{
|
||||
handle_value(val);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool string(string_t& val)
|
||||
{
|
||||
handle_value(val);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool binary(binary_t& val)
|
||||
{
|
||||
handle_value(std::move(val));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool start_object(std::size_t len)
|
||||
{
|
||||
ref_stack.push_back(handle_value(BasicJsonType::value_t::object));
|
||||
|
||||
if (JSON_HEDLEY_UNLIKELY(len != static_cast<std::size_t>(-1) && len > ref_stack.back()->max_size()))
|
||||
{
|
||||
JSON_THROW(out_of_range::create(408, concat("excessive object size: ", std::to_string(len)), ref_stack.back()));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool key(string_t& val)
|
||||
{
|
||||
JSON_ASSERT(!ref_stack.empty());
|
||||
JSON_ASSERT(ref_stack.back()->is_object());
|
||||
|
||||
// add null at given key and store the reference for later
|
||||
object_element = &(ref_stack.back()->m_data.m_value.object->operator[](val));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool end_object()
|
||||
{
|
||||
JSON_ASSERT(!ref_stack.empty());
|
||||
JSON_ASSERT(ref_stack.back()->is_object());
|
||||
|
||||
ref_stack.back()->set_parents();
|
||||
ref_stack.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool start_array(std::size_t len)
|
||||
{
|
||||
ref_stack.push_back(handle_value(BasicJsonType::value_t::array));
|
||||
|
||||
if (JSON_HEDLEY_UNLIKELY(len != static_cast<std::size_t>(-1) && len > ref_stack.back()->max_size()))
|
||||
{
|
||||
JSON_THROW(out_of_range::create(408, concat("excessive array size: ", std::to_string(len)), ref_stack.back()));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool end_array()
|
||||
{
|
||||
JSON_ASSERT(!ref_stack.empty());
|
||||
JSON_ASSERT(ref_stack.back()->is_array());
|
||||
|
||||
ref_stack.back()->set_parents();
|
||||
ref_stack.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
template<class Exception>
|
||||
bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/,
|
||||
const Exception& ex)
|
||||
{
|
||||
errored = true;
|
||||
static_cast<void>(ex);
|
||||
if (allow_exceptions)
|
||||
{
|
||||
JSON_THROW(ex);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
constexpr bool is_errored() const
|
||||
{
|
||||
return errored;
|
||||
}
|
||||
|
||||
private:
|
||||
/*!
|
||||
@invariant If the ref stack is empty, then the passed value will be the new
|
||||
root.
|
||||
@invariant If the ref stack contains a value, then it is an array or an
|
||||
object to which we can add elements
|
||||
*/
|
||||
template<typename Value>
|
||||
JSON_HEDLEY_RETURNS_NON_NULL
|
||||
BasicJsonType* handle_value(Value&& v)
|
||||
{
|
||||
if (ref_stack.empty())
|
||||
{
|
||||
root = BasicJsonType(std::forward<Value>(v));
|
||||
return &root;
|
||||
}
|
||||
|
||||
JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object());
|
||||
|
||||
if (ref_stack.back()->is_array())
|
||||
{
|
||||
ref_stack.back()->m_data.m_value.array->emplace_back(std::forward<Value>(v));
|
||||
return &(ref_stack.back()->m_data.m_value.array->back());
|
||||
}
|
||||
|
||||
JSON_ASSERT(ref_stack.back()->is_object());
|
||||
JSON_ASSERT(object_element);
|
||||
*object_element = BasicJsonType(std::forward<Value>(v));
|
||||
return object_element;
|
||||
}
|
||||
|
||||
/// the parsed JSON value
|
||||
BasicJsonType& root;
|
||||
/// stack to model hierarchy of values
|
||||
std::vector<BasicJsonType*> ref_stack {};
|
||||
/// helper to hold the reference for the next object element
|
||||
BasicJsonType* object_element = nullptr;
|
||||
/// whether a syntax error occurred
|
||||
bool errored = false;
|
||||
/// whether to throw exceptions in case of errors
|
||||
const bool allow_exceptions = true;
|
||||
};
|
||||
|
||||
template<typename BasicJsonType>
|
||||
class json_sax_dom_callback_parser
|
||||
{
|
||||
public:
|
||||
using number_integer_t = typename BasicJsonType::number_integer_t;
|
||||
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
|
||||
using number_float_t = typename BasicJsonType::number_float_t;
|
||||
using string_t = typename BasicJsonType::string_t;
|
||||
using binary_t = typename BasicJsonType::binary_t;
|
||||
using parser_callback_t = typename BasicJsonType::parser_callback_t;
|
||||
using parse_event_t = typename BasicJsonType::parse_event_t;
|
||||
|
||||
json_sax_dom_callback_parser(BasicJsonType& r,
|
||||
const parser_callback_t cb,
|
||||
const bool allow_exceptions_ = true)
|
||||
: root(r), callback(cb), allow_exceptions(allow_exceptions_)
|
||||
{
|
||||
keep_stack.push_back(true);
|
||||
}
|
||||
|
||||
// make class move-only
|
||||
json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = delete;
|
||||
json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor)
|
||||
json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_parser&) = delete;
|
||||
json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor)
|
||||
~json_sax_dom_callback_parser() = default;
|
||||
|
||||
bool null()
|
||||
{
|
||||
handle_value(nullptr);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool boolean(bool val)
|
||||
{
|
||||
handle_value(val);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool number_integer(number_integer_t val)
|
||||
{
|
||||
handle_value(val);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool number_unsigned(number_unsigned_t val)
|
||||
{
|
||||
handle_value(val);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool number_float(number_float_t val, const string_t& /*unused*/)
|
||||
{
|
||||
handle_value(val);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool string(string_t& val)
|
||||
{
|
||||
handle_value(val);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool binary(binary_t& val)
|
||||
{
|
||||
handle_value(std::move(val));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool start_object(std::size_t len)
|
||||
{
|
||||
// check callback for object start
|
||||
const bool keep = callback(static_cast<int>(ref_stack.size()), parse_event_t::object_start, discarded);
|
||||
keep_stack.push_back(keep);
|
||||
|
||||
auto val = handle_value(BasicJsonType::value_t::object, true);
|
||||
ref_stack.push_back(val.second);
|
||||
|
||||
// check object limit
|
||||
if (ref_stack.back() && JSON_HEDLEY_UNLIKELY(len != static_cast<std::size_t>(-1) && len > ref_stack.back()->max_size()))
|
||||
{
|
||||
JSON_THROW(out_of_range::create(408, concat("excessive object size: ", std::to_string(len)), ref_stack.back()));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool key(string_t& val)
|
||||
{
|
||||
BasicJsonType k = BasicJsonType(val);
|
||||
|
||||
// check callback for key
|
||||
const bool keep = callback(static_cast<int>(ref_stack.size()), parse_event_t::key, k);
|
||||
key_keep_stack.push_back(keep);
|
||||
|
||||
// add discarded value at given key and store the reference for later
|
||||
if (keep && ref_stack.back())
|
||||
{
|
||||
object_element = &(ref_stack.back()->m_data.m_value.object->operator[](val) = discarded);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool end_object()
|
||||
{
|
||||
if (ref_stack.back())
|
||||
{
|
||||
if (!callback(static_cast<int>(ref_stack.size()) - 1, parse_event_t::object_end, *ref_stack.back()))
|
||||
{
|
||||
// discard object
|
||||
*ref_stack.back() = discarded;
|
||||
}
|
||||
else
|
||||
{
|
||||
ref_stack.back()->set_parents();
|
||||
}
|
||||
}
|
||||
|
||||
JSON_ASSERT(!ref_stack.empty());
|
||||
JSON_ASSERT(!keep_stack.empty());
|
||||
ref_stack.pop_back();
|
||||
keep_stack.pop_back();
|
||||
|
||||
if (!ref_stack.empty() && ref_stack.back() && ref_stack.back()->is_structured())
|
||||
{
|
||||
// remove discarded value
|
||||
for (auto it = ref_stack.back()->begin(); it != ref_stack.back()->end(); ++it)
|
||||
{
|
||||
if (it->is_discarded())
|
||||
{
|
||||
ref_stack.back()->erase(it);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool start_array(std::size_t len)
|
||||
{
|
||||
const bool keep = callback(static_cast<int>(ref_stack.size()), parse_event_t::array_start, discarded);
|
||||
keep_stack.push_back(keep);
|
||||
|
||||
auto val = handle_value(BasicJsonType::value_t::array, true);
|
||||
ref_stack.push_back(val.second);
|
||||
|
||||
// check array limit
|
||||
if (ref_stack.back() && JSON_HEDLEY_UNLIKELY(len != static_cast<std::size_t>(-1) && len > ref_stack.back()->max_size()))
|
||||
{
|
||||
JSON_THROW(out_of_range::create(408, concat("excessive array size: ", std::to_string(len)), ref_stack.back()));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool end_array()
|
||||
{
|
||||
bool keep = true;
|
||||
|
||||
if (ref_stack.back())
|
||||
{
|
||||
keep = callback(static_cast<int>(ref_stack.size()) - 1, parse_event_t::array_end, *ref_stack.back());
|
||||
if (keep)
|
||||
{
|
||||
ref_stack.back()->set_parents();
|
||||
}
|
||||
else
|
||||
{
|
||||
// discard array
|
||||
*ref_stack.back() = discarded;
|
||||
}
|
||||
}
|
||||
|
||||
JSON_ASSERT(!ref_stack.empty());
|
||||
JSON_ASSERT(!keep_stack.empty());
|
||||
ref_stack.pop_back();
|
||||
keep_stack.pop_back();
|
||||
|
||||
// remove discarded value
|
||||
if (!keep && !ref_stack.empty() && ref_stack.back()->is_array())
|
||||
{
|
||||
ref_stack.back()->m_data.m_value.array->pop_back();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template<class Exception>
|
||||
bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/,
|
||||
const Exception& ex)
|
||||
{
|
||||
errored = true;
|
||||
static_cast<void>(ex);
|
||||
if (allow_exceptions)
|
||||
{
|
||||
JSON_THROW(ex);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
constexpr bool is_errored() const
|
||||
{
|
||||
return errored;
|
||||
}
|
||||
|
||||
private:
|
||||
/*!
|
||||
@param[in] v value to add to the JSON value we build during parsing
|
||||
@param[in] skip_callback whether we should skip calling the callback
|
||||
function; this is required after start_array() and
|
||||
start_object() SAX events, because otherwise we would call the
|
||||
callback function with an empty array or object, respectively.
|
||||
|
||||
@invariant If the ref stack is empty, then the passed value will be the new
|
||||
root.
|
||||
@invariant If the ref stack contains a value, then it is an array or an
|
||||
object to which we can add elements
|
||||
|
||||
@return pair of boolean (whether value should be kept) and pointer (to the
|
||||
passed value in the ref_stack hierarchy; nullptr if not kept)
|
||||
*/
|
||||
template<typename Value>
|
||||
std::pair<bool, BasicJsonType*> handle_value(Value&& v, const bool skip_callback = false)
|
||||
{
|
||||
JSON_ASSERT(!keep_stack.empty());
|
||||
|
||||
// do not handle this value if we know it would be added to a discarded
|
||||
// container
|
||||
if (!keep_stack.back())
|
||||
{
|
||||
return {false, nullptr};
|
||||
}
|
||||
|
||||
// create value
|
||||
auto value = BasicJsonType(std::forward<Value>(v));
|
||||
|
||||
// check callback
|
||||
const bool keep = skip_callback || callback(static_cast<int>(ref_stack.size()), parse_event_t::value, value);
|
||||
|
||||
// do not handle this value if we just learnt it shall be discarded
|
||||
if (!keep)
|
||||
{
|
||||
return {false, nullptr};
|
||||
}
|
||||
|
||||
if (ref_stack.empty())
|
||||
{
|
||||
root = std::move(value);
|
||||
return {true, & root};
|
||||
}
|
||||
|
||||
// skip this value if we already decided to skip the parent
|
||||
// (https://github.com/nlohmann/json/issues/971#issuecomment-413678360)
|
||||
if (!ref_stack.back())
|
||||
{
|
||||
return {false, nullptr};
|
||||
}
|
||||
|
||||
// we now only expect arrays and objects
|
||||
JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object());
|
||||
|
||||
// array
|
||||
if (ref_stack.back()->is_array())
|
||||
{
|
||||
ref_stack.back()->m_data.m_value.array->emplace_back(std::move(value));
|
||||
return {true, & (ref_stack.back()->m_data.m_value.array->back())};
|
||||
}
|
||||
|
||||
// object
|
||||
JSON_ASSERT(ref_stack.back()->is_object());
|
||||
// check if we should store an element for the current key
|
||||
JSON_ASSERT(!key_keep_stack.empty());
|
||||
const bool store_element = key_keep_stack.back();
|
||||
key_keep_stack.pop_back();
|
||||
|
||||
if (!store_element)
|
||||
{
|
||||
return {false, nullptr};
|
||||
}
|
||||
|
||||
JSON_ASSERT(object_element);
|
||||
*object_element = std::move(value);
|
||||
return {true, object_element};
|
||||
}
|
||||
|
||||
/// the parsed JSON value
|
||||
BasicJsonType& root;
|
||||
/// stack to model hierarchy of values
|
||||
std::vector<BasicJsonType*> ref_stack {};
|
||||
/// stack to manage which values to keep
|
||||
std::vector<bool> keep_stack {}; // NOLINT(readability-redundant-member-init)
|
||||
/// stack to manage which object keys to keep
|
||||
std::vector<bool> key_keep_stack {}; // NOLINT(readability-redundant-member-init)
|
||||
/// helper to hold the reference for the next object element
|
||||
BasicJsonType* object_element = nullptr;
|
||||
/// whether a syntax error occurred
|
||||
bool errored = false;
|
||||
/// callback function
|
||||
const parser_callback_t callback = nullptr;
|
||||
/// whether to throw exceptions in case of errors
|
||||
const bool allow_exceptions = true;
|
||||
/// a discarded value for the callback
|
||||
BasicJsonType discarded = BasicJsonType::value_t::discarded;
|
||||
};
|
||||
|
||||
template<typename BasicJsonType>
|
||||
class json_sax_acceptor
|
||||
{
|
||||
public:
|
||||
using number_integer_t = typename BasicJsonType::number_integer_t;
|
||||
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
|
||||
using number_float_t = typename BasicJsonType::number_float_t;
|
||||
using string_t = typename BasicJsonType::string_t;
|
||||
using binary_t = typename BasicJsonType::binary_t;
|
||||
|
||||
bool null()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool boolean(bool /*unused*/)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool number_integer(number_integer_t /*unused*/)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool number_unsigned(number_unsigned_t /*unused*/)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool number_float(number_float_t /*unused*/, const string_t& /*unused*/)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool string(string_t& /*unused*/)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool binary(binary_t& /*unused*/)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool start_object(std::size_t /*unused*/ = static_cast<std::size_t>(-1))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool key(string_t& /*unused*/)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool end_object()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool start_array(std::size_t /*unused*/ = static_cast<std::size_t>(-1))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool end_array()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const detail::exception& /*unused*/)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
File diff suppressed because it is too large
Load Diff
@@ -1,519 +0,0 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cmath> // isfinite
|
||||
#include <cstdint> // uint8_t
|
||||
#include <functional> // function
|
||||
#include <string> // string
|
||||
#include <utility> // move
|
||||
#include <vector> // vector
|
||||
|
||||
#include <nlohmann/detail/exceptions.hpp>
|
||||
#include <nlohmann/detail/input/input_adapters.hpp>
|
||||
#include <nlohmann/detail/input/json_sax.hpp>
|
||||
#include <nlohmann/detail/input/lexer.hpp>
|
||||
#include <nlohmann/detail/macro_scope.hpp>
|
||||
#include <nlohmann/detail/meta/is_sax.hpp>
|
||||
#include <nlohmann/detail/string_concat.hpp>
|
||||
#include <nlohmann/detail/value_t.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
////////////
|
||||
// parser //
|
||||
////////////
|
||||
|
||||
enum class parse_event_t : std::uint8_t
|
||||
{
|
||||
/// the parser read `{` and started to process a JSON object
|
||||
object_start,
|
||||
/// the parser read `}` and finished processing a JSON object
|
||||
object_end,
|
||||
/// the parser read `[` and started to process a JSON array
|
||||
array_start,
|
||||
/// the parser read `]` and finished processing a JSON array
|
||||
array_end,
|
||||
/// the parser read a key of a value in an object
|
||||
key,
|
||||
/// the parser finished reading a JSON value
|
||||
value
|
||||
};
|
||||
|
||||
template<typename BasicJsonType>
|
||||
using parser_callback_t =
|
||||
std::function<bool(int /*depth*/, parse_event_t /*event*/, BasicJsonType& /*parsed*/)>;
|
||||
|
||||
/*!
|
||||
@brief syntax analysis
|
||||
|
||||
This class implements a recursive descent parser.
|
||||
*/
|
||||
template<typename BasicJsonType, typename InputAdapterType>
|
||||
class parser
|
||||
{
|
||||
using number_integer_t = typename BasicJsonType::number_integer_t;
|
||||
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
|
||||
using number_float_t = typename BasicJsonType::number_float_t;
|
||||
using string_t = typename BasicJsonType::string_t;
|
||||
using lexer_t = lexer<BasicJsonType, InputAdapterType>;
|
||||
using token_type = typename lexer_t::token_type;
|
||||
|
||||
public:
|
||||
/// a parser reading from an input adapter
|
||||
explicit parser(InputAdapterType&& adapter,
|
||||
const parser_callback_t<BasicJsonType> cb = nullptr,
|
||||
const bool allow_exceptions_ = true,
|
||||
const bool skip_comments = false)
|
||||
: callback(cb)
|
||||
, m_lexer(std::move(adapter), skip_comments)
|
||||
, allow_exceptions(allow_exceptions_)
|
||||
{
|
||||
// read first token
|
||||
get_token();
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief public parser interface
|
||||
|
||||
@param[in] strict whether to expect the last token to be EOF
|
||||
@param[in,out] result parsed JSON value
|
||||
|
||||
@throw parse_error.101 in case of an unexpected token
|
||||
@throw parse_error.102 if to_unicode fails or surrogate error
|
||||
@throw parse_error.103 if to_unicode fails
|
||||
*/
|
||||
void parse(const bool strict, BasicJsonType& result)
|
||||
{
|
||||
if (callback)
|
||||
{
|
||||
json_sax_dom_callback_parser<BasicJsonType> sdp(result, callback, allow_exceptions);
|
||||
sax_parse_internal(&sdp);
|
||||
|
||||
// in strict mode, input must be completely read
|
||||
if (strict && (get_token() != token_type::end_of_input))
|
||||
{
|
||||
sdp.parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(),
|
||||
exception_message(token_type::end_of_input, "value"), nullptr));
|
||||
}
|
||||
|
||||
// in case of an error, return discarded value
|
||||
if (sdp.is_errored())
|
||||
{
|
||||
result = value_t::discarded;
|
||||
return;
|
||||
}
|
||||
|
||||
// set top-level value to null if it was discarded by the callback
|
||||
// function
|
||||
if (result.is_discarded())
|
||||
{
|
||||
result = nullptr;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
json_sax_dom_parser<BasicJsonType> sdp(result, allow_exceptions);
|
||||
sax_parse_internal(&sdp);
|
||||
|
||||
// in strict mode, input must be completely read
|
||||
if (strict && (get_token() != token_type::end_of_input))
|
||||
{
|
||||
sdp.parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::end_of_input, "value"), nullptr));
|
||||
}
|
||||
|
||||
// in case of an error, return discarded value
|
||||
if (sdp.is_errored())
|
||||
{
|
||||
result = value_t::discarded;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
result.assert_invariant();
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief public accept interface
|
||||
|
||||
@param[in] strict whether to expect the last token to be EOF
|
||||
@return whether the input is a proper JSON text
|
||||
*/
|
||||
bool accept(const bool strict = true)
|
||||
{
|
||||
json_sax_acceptor<BasicJsonType> sax_acceptor;
|
||||
return sax_parse(&sax_acceptor, strict);
|
||||
}
|
||||
|
||||
template<typename SAX>
|
||||
JSON_HEDLEY_NON_NULL(2)
|
||||
bool sax_parse(SAX* sax, const bool strict = true)
|
||||
{
|
||||
(void)detail::is_sax_static_asserts<SAX, BasicJsonType> {};
|
||||
const bool result = sax_parse_internal(sax);
|
||||
|
||||
// strict mode: next byte must be EOF
|
||||
if (result && strict && (get_token() != token_type::end_of_input))
|
||||
{
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::end_of_input, "value"), nullptr));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
private:
|
||||
template<typename SAX>
|
||||
JSON_HEDLEY_NON_NULL(2)
|
||||
bool sax_parse_internal(SAX* sax)
|
||||
{
|
||||
// stack to remember the hierarchy of structured values we are parsing
|
||||
// true = array; false = object
|
||||
std::vector<bool> states;
|
||||
// value to avoid a goto (see comment where set to true)
|
||||
bool skip_to_state_evaluation = false;
|
||||
|
||||
while (true)
|
||||
{
|
||||
if (!skip_to_state_evaluation)
|
||||
{
|
||||
// invariant: get_token() was called before each iteration
|
||||
switch (last_token)
|
||||
{
|
||||
case token_type::begin_object:
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->start_object(static_cast<std::size_t>(-1))))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// closing } -> we are done
|
||||
if (get_token() == token_type::end_object)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->end_object()))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// parse key
|
||||
if (JSON_HEDLEY_UNLIKELY(last_token != token_type::value_string))
|
||||
{
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::value_string, "object key"), nullptr));
|
||||
}
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->key(m_lexer.get_string())))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// parse separator (:)
|
||||
if (JSON_HEDLEY_UNLIKELY(get_token() != token_type::name_separator))
|
||||
{
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::name_separator, "object separator"), nullptr));
|
||||
}
|
||||
|
||||
// remember we are now inside an object
|
||||
states.push_back(false);
|
||||
|
||||
// parse values
|
||||
get_token();
|
||||
continue;
|
||||
}
|
||||
|
||||
case token_type::begin_array:
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->start_array(static_cast<std::size_t>(-1))))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// closing ] -> we are done
|
||||
if (get_token() == token_type::end_array)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->end_array()))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// remember we are now inside an array
|
||||
states.push_back(true);
|
||||
|
||||
// parse values (no need to call get_token)
|
||||
continue;
|
||||
}
|
||||
|
||||
case token_type::value_float:
|
||||
{
|
||||
const auto res = m_lexer.get_number_float();
|
||||
|
||||
if (JSON_HEDLEY_UNLIKELY(!std::isfinite(res)))
|
||||
{
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
out_of_range::create(406, concat("number overflow parsing '", m_lexer.get_token_string(), '\''), nullptr));
|
||||
}
|
||||
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->number_float(res, m_lexer.get_string())))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case token_type::literal_false:
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->boolean(false)))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case token_type::literal_null:
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->null()))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case token_type::literal_true:
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->boolean(true)))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case token_type::value_integer:
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->number_integer(m_lexer.get_number_integer())))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case token_type::value_string:
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->string(m_lexer.get_string())))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case token_type::value_unsigned:
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->number_unsigned(m_lexer.get_number_unsigned())))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case token_type::parse_error:
|
||||
{
|
||||
// using "uninitialized" to avoid "expected" message
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::uninitialized, "value"), nullptr));
|
||||
}
|
||||
case token_type::end_of_input:
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(m_lexer.get_position().chars_read_total == 1))
|
||||
{
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(),
|
||||
"attempting to parse an empty input; check that your input string or stream contains the expected JSON", nullptr));
|
||||
}
|
||||
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::literal_or_value, "value"), nullptr));
|
||||
}
|
||||
case token_type::uninitialized:
|
||||
case token_type::end_array:
|
||||
case token_type::end_object:
|
||||
case token_type::name_separator:
|
||||
case token_type::value_separator:
|
||||
case token_type::literal_or_value:
|
||||
default: // the last token was unexpected
|
||||
{
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::literal_or_value, "value"), nullptr));
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
skip_to_state_evaluation = false;
|
||||
}
|
||||
|
||||
// we reached this line after we successfully parsed a value
|
||||
if (states.empty())
|
||||
{
|
||||
// empty stack: we reached the end of the hierarchy: done
|
||||
return true;
|
||||
}
|
||||
|
||||
if (states.back()) // array
|
||||
{
|
||||
// comma -> next value
|
||||
if (get_token() == token_type::value_separator)
|
||||
{
|
||||
// parse a new value
|
||||
get_token();
|
||||
continue;
|
||||
}
|
||||
|
||||
// closing ]
|
||||
if (JSON_HEDLEY_LIKELY(last_token == token_type::end_array))
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->end_array()))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// We are done with this array. Before we can parse a
|
||||
// new value, we need to evaluate the new state first.
|
||||
// By setting skip_to_state_evaluation to false, we
|
||||
// are effectively jumping to the beginning of this if.
|
||||
JSON_ASSERT(!states.empty());
|
||||
states.pop_back();
|
||||
skip_to_state_evaluation = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::end_array, "array"), nullptr));
|
||||
}
|
||||
|
||||
// states.back() is false -> object
|
||||
|
||||
// comma -> next value
|
||||
if (get_token() == token_type::value_separator)
|
||||
{
|
||||
// parse key
|
||||
if (JSON_HEDLEY_UNLIKELY(get_token() != token_type::value_string))
|
||||
{
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::value_string, "object key"), nullptr));
|
||||
}
|
||||
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->key(m_lexer.get_string())))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// parse separator (:)
|
||||
if (JSON_HEDLEY_UNLIKELY(get_token() != token_type::name_separator))
|
||||
{
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::name_separator, "object separator"), nullptr));
|
||||
}
|
||||
|
||||
// parse values
|
||||
get_token();
|
||||
continue;
|
||||
}
|
||||
|
||||
// closing }
|
||||
if (JSON_HEDLEY_LIKELY(last_token == token_type::end_object))
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->end_object()))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// We are done with this object. Before we can parse a
|
||||
// new value, we need to evaluate the new state first.
|
||||
// By setting skip_to_state_evaluation to false, we
|
||||
// are effectively jumping to the beginning of this if.
|
||||
JSON_ASSERT(!states.empty());
|
||||
states.pop_back();
|
||||
skip_to_state_evaluation = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::end_object, "object"), nullptr));
|
||||
}
|
||||
}
|
||||
|
||||
/// get next token from lexer
|
||||
token_type get_token()
|
||||
{
|
||||
return last_token = m_lexer.scan();
|
||||
}
|
||||
|
||||
std::string exception_message(const token_type expected, const std::string& context)
|
||||
{
|
||||
std::string error_msg = "syntax error ";
|
||||
|
||||
if (!context.empty())
|
||||
{
|
||||
error_msg += concat("while parsing ", context, ' ');
|
||||
}
|
||||
|
||||
error_msg += "- ";
|
||||
|
||||
if (last_token == token_type::parse_error)
|
||||
{
|
||||
error_msg += concat(m_lexer.get_error_message(), "; last read: '",
|
||||
m_lexer.get_token_string(), '\'');
|
||||
}
|
||||
else
|
||||
{
|
||||
error_msg += concat("unexpected ", lexer_t::token_type_name(last_token));
|
||||
}
|
||||
|
||||
if (expected != token_type::uninitialized)
|
||||
{
|
||||
error_msg += concat("; expected ", lexer_t::token_type_name(expected));
|
||||
}
|
||||
|
||||
return error_msg;
|
||||
}
|
||||
|
||||
private:
|
||||
/// callback function
|
||||
const parser_callback_t<BasicJsonType> callback = nullptr;
|
||||
/// the type of the last read token
|
||||
token_type last_token = token_type::uninitialized;
|
||||
/// the lexer
|
||||
lexer_t m_lexer;
|
||||
/// whether to throw exceptions in case of errors
|
||||
const bool allow_exceptions = true;
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
@@ -1,37 +0,0 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef> // size_t
|
||||
|
||||
#include <nlohmann/detail/abi_macros.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
/// struct to capture the start position of the current token
|
||||
struct position_t
|
||||
{
|
||||
/// the total number of characters read
|
||||
std::size_t chars_read_total = 0;
|
||||
/// the number of characters read in the current line
|
||||
std::size_t chars_read_current_line = 0;
|
||||
/// the number of lines read
|
||||
std::size_t lines_read = 0;
|
||||
|
||||
/// conversion to size_t to preserve SAX interface
|
||||
constexpr operator size_t() const
|
||||
{
|
||||
return chars_read_total;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
@@ -1,35 +0,0 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <nlohmann/detail/abi_macros.hpp>
|
||||
#include <nlohmann/detail/iterators/primitive_iterator.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
/*!
|
||||
@brief an iterator value
|
||||
|
||||
@note This structure could easily be a union, but MSVC currently does not allow
|
||||
unions members with complex constructors, see https://github.com/nlohmann/json/pull/105.
|
||||
*/
|
||||
template<typename BasicJsonType> struct internal_iterator
|
||||
{
|
||||
/// iterator for JSON objects
|
||||
typename BasicJsonType::object_t::iterator object_iterator {};
|
||||
/// iterator for JSON arrays
|
||||
typename BasicJsonType::array_t::iterator array_iterator {};
|
||||
/// generic iterator for all other types
|
||||
primitive_iterator_t primitive_iterator {};
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
@@ -1,751 +0,0 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <iterator> // iterator, random_access_iterator_tag, bidirectional_iterator_tag, advance, next
|
||||
#include <type_traits> // conditional, is_const, remove_const
|
||||
|
||||
#include <nlohmann/detail/exceptions.hpp>
|
||||
#include <nlohmann/detail/iterators/internal_iterator.hpp>
|
||||
#include <nlohmann/detail/iterators/primitive_iterator.hpp>
|
||||
#include <nlohmann/detail/macro_scope.hpp>
|
||||
#include <nlohmann/detail/meta/cpp_future.hpp>
|
||||
#include <nlohmann/detail/meta/type_traits.hpp>
|
||||
#include <nlohmann/detail/value_t.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
// forward declare, to be able to friend it later on
|
||||
template<typename IteratorType> class iteration_proxy;
|
||||
template<typename IteratorType> class iteration_proxy_value;
|
||||
|
||||
/*!
|
||||
@brief a template for a bidirectional iterator for the @ref basic_json class
|
||||
This class implements a both iterators (iterator and const_iterator) for the
|
||||
@ref basic_json class.
|
||||
@note An iterator is called *initialized* when a pointer to a JSON value has
|
||||
been set (e.g., by a constructor or a copy assignment). If the iterator is
|
||||
default-constructed, it is *uninitialized* and most methods are undefined.
|
||||
**The library uses assertions to detect calls on uninitialized iterators.**
|
||||
@requirement The class satisfies the following concept requirements:
|
||||
-
|
||||
[BidirectionalIterator](https://en.cppreference.com/w/cpp/named_req/BidirectionalIterator):
|
||||
The iterator that can be moved can be moved in both directions (i.e.
|
||||
incremented and decremented).
|
||||
@since version 1.0.0, simplified in version 2.0.9, change to bidirectional
|
||||
iterators in version 3.0.0 (see https://github.com/nlohmann/json/issues/593)
|
||||
*/
|
||||
template<typename BasicJsonType>
|
||||
class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-special-member-functions)
|
||||
{
|
||||
/// the iterator with BasicJsonType of different const-ness
|
||||
using other_iter_impl = iter_impl<typename std::conditional<std::is_const<BasicJsonType>::value, typename std::remove_const<BasicJsonType>::type, const BasicJsonType>::type>;
|
||||
/// allow basic_json to access private members
|
||||
friend other_iter_impl;
|
||||
friend BasicJsonType;
|
||||
friend iteration_proxy<iter_impl>;
|
||||
friend iteration_proxy_value<iter_impl>;
|
||||
|
||||
using object_t = typename BasicJsonType::object_t;
|
||||
using array_t = typename BasicJsonType::array_t;
|
||||
// make sure BasicJsonType is basic_json or const basic_json
|
||||
static_assert(is_basic_json<typename std::remove_const<BasicJsonType>::type>::value,
|
||||
"iter_impl only accepts (const) basic_json");
|
||||
// superficial check for the LegacyBidirectionalIterator named requirement
|
||||
static_assert(std::is_base_of<std::bidirectional_iterator_tag, std::bidirectional_iterator_tag>::value
|
||||
&& std::is_base_of<std::bidirectional_iterator_tag, typename std::iterator_traits<typename array_t::iterator>::iterator_category>::value,
|
||||
"basic_json iterator assumes array and object type iterators satisfy the LegacyBidirectionalIterator named requirement.");
|
||||
|
||||
public:
|
||||
/// The std::iterator class template (used as a base class to provide typedefs) is deprecated in C++17.
|
||||
/// The C++ Standard has never required user-defined iterators to derive from std::iterator.
|
||||
/// A user-defined iterator should provide publicly accessible typedefs named
|
||||
/// iterator_category, value_type, difference_type, pointer, and reference.
|
||||
/// Note that value_type is required to be non-const, even for constant iterators.
|
||||
using iterator_category = std::bidirectional_iterator_tag;
|
||||
|
||||
/// the type of the values when the iterator is dereferenced
|
||||
using value_type = typename BasicJsonType::value_type;
|
||||
/// a type to represent differences between iterators
|
||||
using difference_type = typename BasicJsonType::difference_type;
|
||||
/// defines a pointer to the type iterated over (value_type)
|
||||
using pointer = typename std::conditional<std::is_const<BasicJsonType>::value,
|
||||
typename BasicJsonType::const_pointer,
|
||||
typename BasicJsonType::pointer>::type;
|
||||
/// defines a reference to the type iterated over (value_type)
|
||||
using reference =
|
||||
typename std::conditional<std::is_const<BasicJsonType>::value,
|
||||
typename BasicJsonType::const_reference,
|
||||
typename BasicJsonType::reference>::type;
|
||||
|
||||
iter_impl() = default;
|
||||
~iter_impl() = default;
|
||||
iter_impl(iter_impl&&) noexcept = default;
|
||||
iter_impl& operator=(iter_impl&&) noexcept = default;
|
||||
|
||||
/*!
|
||||
@brief constructor for a given JSON instance
|
||||
@param[in] object pointer to a JSON object for this iterator
|
||||
@pre object != nullptr
|
||||
@post The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
explicit iter_impl(pointer object) noexcept : m_object(object)
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
{
|
||||
m_it.object_iterator = typename object_t::iterator();
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::array:
|
||||
{
|
||||
m_it.array_iterator = typename array_t::iterator();
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::null:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
{
|
||||
m_it.primitive_iterator = primitive_iterator_t();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
@note The conventional copy constructor and copy assignment are implicitly
|
||||
defined. Combined with the following converting constructor and
|
||||
assignment, they support: (1) copy from iterator to iterator, (2)
|
||||
copy from const iterator to const iterator, and (3) conversion from
|
||||
iterator to const iterator. However conversion from const iterator
|
||||
to iterator is not defined.
|
||||
*/
|
||||
|
||||
/*!
|
||||
@brief const copy constructor
|
||||
@param[in] other const iterator to copy from
|
||||
@note This copy constructor had to be defined explicitly to circumvent a bug
|
||||
occurring on msvc v19.0 compiler (VS 2015) debug build. For more
|
||||
information refer to: https://github.com/nlohmann/json/issues/1608
|
||||
*/
|
||||
iter_impl(const iter_impl<const BasicJsonType>& other) noexcept
|
||||
: m_object(other.m_object), m_it(other.m_it)
|
||||
{}
|
||||
|
||||
/*!
|
||||
@brief converting assignment
|
||||
@param[in] other const iterator to copy from
|
||||
@return const/non-const iterator
|
||||
@note It is not checked whether @a other is initialized.
|
||||
*/
|
||||
iter_impl& operator=(const iter_impl<const BasicJsonType>& other) noexcept
|
||||
{
|
||||
if (&other != this)
|
||||
{
|
||||
m_object = other.m_object;
|
||||
m_it = other.m_it;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief converting constructor
|
||||
@param[in] other non-const iterator to copy from
|
||||
@note It is not checked whether @a other is initialized.
|
||||
*/
|
||||
iter_impl(const iter_impl<typename std::remove_const<BasicJsonType>::type>& other) noexcept
|
||||
: m_object(other.m_object), m_it(other.m_it)
|
||||
{}
|
||||
|
||||
/*!
|
||||
@brief converting assignment
|
||||
@param[in] other non-const iterator to copy from
|
||||
@return const/non-const iterator
|
||||
@note It is not checked whether @a other is initialized.
|
||||
*/
|
||||
iter_impl& operator=(const iter_impl<typename std::remove_const<BasicJsonType>::type>& other) noexcept // NOLINT(cert-oop54-cpp)
|
||||
{
|
||||
m_object = other.m_object;
|
||||
m_it = other.m_it;
|
||||
return *this;
|
||||
}
|
||||
|
||||
JSON_PRIVATE_UNLESS_TESTED:
|
||||
/*!
|
||||
@brief set the iterator to the first value
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
void set_begin() noexcept
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
{
|
||||
m_it.object_iterator = m_object->m_data.m_value.object->begin();
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::array:
|
||||
{
|
||||
m_it.array_iterator = m_object->m_data.m_value.array->begin();
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::null:
|
||||
{
|
||||
// set to end so begin()==end() is true: null is empty
|
||||
m_it.primitive_iterator.set_end();
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
{
|
||||
m_it.primitive_iterator.set_begin();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief set the iterator past the last value
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
void set_end() noexcept
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
{
|
||||
m_it.object_iterator = m_object->m_data.m_value.object->end();
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::array:
|
||||
{
|
||||
m_it.array_iterator = m_object->m_data.m_value.array->end();
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::null:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
{
|
||||
m_it.primitive_iterator.set_end();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
/*!
|
||||
@brief return a reference to the value pointed to by the iterator
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
reference operator*() const
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
{
|
||||
JSON_ASSERT(m_it.object_iterator != m_object->m_data.m_value.object->end());
|
||||
return m_it.object_iterator->second;
|
||||
}
|
||||
|
||||
case value_t::array:
|
||||
{
|
||||
JSON_ASSERT(m_it.array_iterator != m_object->m_data.m_value.array->end());
|
||||
return *m_it.array_iterator;
|
||||
}
|
||||
|
||||
case value_t::null:
|
||||
JSON_THROW(invalid_iterator::create(214, "cannot get value", m_object));
|
||||
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
{
|
||||
if (JSON_HEDLEY_LIKELY(m_it.primitive_iterator.is_begin()))
|
||||
{
|
||||
return *m_object;
|
||||
}
|
||||
|
||||
JSON_THROW(invalid_iterator::create(214, "cannot get value", m_object));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief dereference the iterator
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
pointer operator->() const
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
{
|
||||
JSON_ASSERT(m_it.object_iterator != m_object->m_data.m_value.object->end());
|
||||
return &(m_it.object_iterator->second);
|
||||
}
|
||||
|
||||
case value_t::array:
|
||||
{
|
||||
JSON_ASSERT(m_it.array_iterator != m_object->m_data.m_value.array->end());
|
||||
return &*m_it.array_iterator;
|
||||
}
|
||||
|
||||
case value_t::null:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
{
|
||||
if (JSON_HEDLEY_LIKELY(m_it.primitive_iterator.is_begin()))
|
||||
{
|
||||
return m_object;
|
||||
}
|
||||
|
||||
JSON_THROW(invalid_iterator::create(214, "cannot get value", m_object));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief post-increment (it++)
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
iter_impl operator++(int)& // NOLINT(cert-dcl21-cpp)
|
||||
{
|
||||
auto result = *this;
|
||||
++(*this);
|
||||
return result;
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief pre-increment (++it)
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
iter_impl& operator++()
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
{
|
||||
std::advance(m_it.object_iterator, 1);
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::array:
|
||||
{
|
||||
std::advance(m_it.array_iterator, 1);
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::null:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
{
|
||||
++m_it.primitive_iterator;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief post-decrement (it--)
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
iter_impl operator--(int)& // NOLINT(cert-dcl21-cpp)
|
||||
{
|
||||
auto result = *this;
|
||||
--(*this);
|
||||
return result;
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief pre-decrement (--it)
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
iter_impl& operator--()
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
{
|
||||
std::advance(m_it.object_iterator, -1);
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::array:
|
||||
{
|
||||
std::advance(m_it.array_iterator, -1);
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::null:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
{
|
||||
--m_it.primitive_iterator;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief comparison: equal
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
template < typename IterImpl, detail::enable_if_t < (std::is_same<IterImpl, iter_impl>::value || std::is_same<IterImpl, other_iter_impl>::value), std::nullptr_t > = nullptr >
|
||||
bool operator==(const IterImpl& other) const
|
||||
{
|
||||
// if objects are not the same, the comparison is undefined
|
||||
if (JSON_HEDLEY_UNLIKELY(m_object != other.m_object))
|
||||
{
|
||||
JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers", m_object));
|
||||
}
|
||||
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
return (m_it.object_iterator == other.m_it.object_iterator);
|
||||
|
||||
case value_t::array:
|
||||
return (m_it.array_iterator == other.m_it.array_iterator);
|
||||
|
||||
case value_t::null:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
return (m_it.primitive_iterator == other.m_it.primitive_iterator);
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief comparison: not equal
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
template < typename IterImpl, detail::enable_if_t < (std::is_same<IterImpl, iter_impl>::value || std::is_same<IterImpl, other_iter_impl>::value), std::nullptr_t > = nullptr >
|
||||
bool operator!=(const IterImpl& other) const
|
||||
{
|
||||
return !operator==(other);
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief comparison: smaller
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
bool operator<(const iter_impl& other) const
|
||||
{
|
||||
// if objects are not the same, the comparison is undefined
|
||||
if (JSON_HEDLEY_UNLIKELY(m_object != other.m_object))
|
||||
{
|
||||
JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers", m_object));
|
||||
}
|
||||
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
JSON_THROW(invalid_iterator::create(213, "cannot compare order of object iterators", m_object));
|
||||
|
||||
case value_t::array:
|
||||
return (m_it.array_iterator < other.m_it.array_iterator);
|
||||
|
||||
case value_t::null:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
return (m_it.primitive_iterator < other.m_it.primitive_iterator);
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief comparison: less than or equal
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
bool operator<=(const iter_impl& other) const
|
||||
{
|
||||
return !other.operator < (*this);
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief comparison: greater than
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
bool operator>(const iter_impl& other) const
|
||||
{
|
||||
return !operator<=(other);
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief comparison: greater than or equal
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
bool operator>=(const iter_impl& other) const
|
||||
{
|
||||
return !operator<(other);
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief add to iterator
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
iter_impl& operator+=(difference_type i)
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
JSON_THROW(invalid_iterator::create(209, "cannot use offsets with object iterators", m_object));
|
||||
|
||||
case value_t::array:
|
||||
{
|
||||
std::advance(m_it.array_iterator, i);
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::null:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
{
|
||||
m_it.primitive_iterator += i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief subtract from iterator
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
iter_impl& operator-=(difference_type i)
|
||||
{
|
||||
return operator+=(-i);
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief add to iterator
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
iter_impl operator+(difference_type i) const
|
||||
{
|
||||
auto result = *this;
|
||||
result += i;
|
||||
return result;
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief addition of distance and iterator
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
friend iter_impl operator+(difference_type i, const iter_impl& it)
|
||||
{
|
||||
auto result = it;
|
||||
result += i;
|
||||
return result;
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief subtract from iterator
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
iter_impl operator-(difference_type i) const
|
||||
{
|
||||
auto result = *this;
|
||||
result -= i;
|
||||
return result;
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief return difference
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
difference_type operator-(const iter_impl& other) const
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
JSON_THROW(invalid_iterator::create(209, "cannot use offsets with object iterators", m_object));
|
||||
|
||||
case value_t::array:
|
||||
return m_it.array_iterator - other.m_it.array_iterator;
|
||||
|
||||
case value_t::null:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
return m_it.primitive_iterator - other.m_it.primitive_iterator;
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief access to successor
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
reference operator[](difference_type n) const
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
JSON_THROW(invalid_iterator::create(208, "cannot use operator[] for object iterators", m_object));
|
||||
|
||||
case value_t::array:
|
||||
return *std::next(m_it.array_iterator, n);
|
||||
|
||||
case value_t::null:
|
||||
JSON_THROW(invalid_iterator::create(214, "cannot get value", m_object));
|
||||
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
{
|
||||
if (JSON_HEDLEY_LIKELY(m_it.primitive_iterator.get_value() == -n))
|
||||
{
|
||||
return *m_object;
|
||||
}
|
||||
|
||||
JSON_THROW(invalid_iterator::create(214, "cannot get value", m_object));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief return the key of an object iterator
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
const typename object_t::key_type& key() const
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
if (JSON_HEDLEY_LIKELY(m_object->is_object()))
|
||||
{
|
||||
return m_it.object_iterator->first;
|
||||
}
|
||||
|
||||
JSON_THROW(invalid_iterator::create(207, "cannot use key() for non-object iterators", m_object));
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief return the value of an iterator
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
reference value() const
|
||||
{
|
||||
return operator*();
|
||||
}
|
||||
|
||||
JSON_PRIVATE_UNLESS_TESTED:
|
||||
/// associated JSON instance
|
||||
pointer m_object = nullptr;
|
||||
/// the actual iterator of the associated instance
|
||||
internal_iterator<typename std::remove_const<BasicJsonType>::type> m_it {};
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
@@ -1,242 +0,0 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef> // size_t
|
||||
#include <iterator> // input_iterator_tag
|
||||
#include <string> // string, to_string
|
||||
#include <tuple> // tuple_size, get, tuple_element
|
||||
#include <utility> // move
|
||||
|
||||
#if JSON_HAS_RANGES
|
||||
#include <ranges> // enable_borrowed_range
|
||||
#endif
|
||||
|
||||
#include <nlohmann/detail/abi_macros.hpp>
|
||||
#include <nlohmann/detail/meta/type_traits.hpp>
|
||||
#include <nlohmann/detail/value_t.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
template<typename string_type>
|
||||
void int_to_string( string_type& target, std::size_t value )
|
||||
{
|
||||
// For ADL
|
||||
using std::to_string;
|
||||
target = to_string(value);
|
||||
}
|
||||
template<typename IteratorType> class iteration_proxy_value
|
||||
{
|
||||
public:
|
||||
using difference_type = std::ptrdiff_t;
|
||||
using value_type = iteration_proxy_value;
|
||||
using pointer = value_type *;
|
||||
using reference = value_type &;
|
||||
using iterator_category = std::input_iterator_tag;
|
||||
using string_type = typename std::remove_cv< typename std::remove_reference<decltype( std::declval<IteratorType>().key() ) >::type >::type;
|
||||
|
||||
private:
|
||||
/// the iterator
|
||||
IteratorType anchor{};
|
||||
/// an index for arrays (used to create key names)
|
||||
std::size_t array_index = 0;
|
||||
/// last stringified array index
|
||||
mutable std::size_t array_index_last = 0;
|
||||
/// a string representation of the array index
|
||||
mutable string_type array_index_str = "0";
|
||||
/// an empty string (to return a reference for primitive values)
|
||||
string_type empty_str{};
|
||||
|
||||
public:
|
||||
explicit iteration_proxy_value() = default;
|
||||
explicit iteration_proxy_value(IteratorType it, std::size_t array_index_ = 0)
|
||||
noexcept(std::is_nothrow_move_constructible<IteratorType>::value
|
||||
&& std::is_nothrow_default_constructible<string_type>::value)
|
||||
: anchor(std::move(it))
|
||||
, array_index(array_index_)
|
||||
{}
|
||||
|
||||
iteration_proxy_value(iteration_proxy_value const&) = default;
|
||||
iteration_proxy_value& operator=(iteration_proxy_value const&) = default;
|
||||
// older GCCs are a bit fussy and require explicit noexcept specifiers on defaulted functions
|
||||
iteration_proxy_value(iteration_proxy_value&&)
|
||||
noexcept(std::is_nothrow_move_constructible<IteratorType>::value
|
||||
&& std::is_nothrow_move_constructible<string_type>::value) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor,cppcoreguidelines-noexcept-move-operations)
|
||||
iteration_proxy_value& operator=(iteration_proxy_value&&)
|
||||
noexcept(std::is_nothrow_move_assignable<IteratorType>::value
|
||||
&& std::is_nothrow_move_assignable<string_type>::value) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor,cppcoreguidelines-noexcept-move-operations)
|
||||
~iteration_proxy_value() = default;
|
||||
|
||||
/// dereference operator (needed for range-based for)
|
||||
const iteration_proxy_value& operator*() const
|
||||
{
|
||||
return *this;
|
||||
}
|
||||
|
||||
/// increment operator (needed for range-based for)
|
||||
iteration_proxy_value& operator++()
|
||||
{
|
||||
++anchor;
|
||||
++array_index;
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
iteration_proxy_value operator++(int)& // NOLINT(cert-dcl21-cpp)
|
||||
{
|
||||
auto tmp = iteration_proxy_value(anchor, array_index);
|
||||
++anchor;
|
||||
++array_index;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
/// equality operator (needed for InputIterator)
|
||||
bool operator==(const iteration_proxy_value& o) const
|
||||
{
|
||||
return anchor == o.anchor;
|
||||
}
|
||||
|
||||
/// inequality operator (needed for range-based for)
|
||||
bool operator!=(const iteration_proxy_value& o) const
|
||||
{
|
||||
return anchor != o.anchor;
|
||||
}
|
||||
|
||||
/// return key of the iterator
|
||||
const string_type& key() const
|
||||
{
|
||||
JSON_ASSERT(anchor.m_object != nullptr);
|
||||
|
||||
switch (anchor.m_object->type())
|
||||
{
|
||||
// use integer array index as key
|
||||
case value_t::array:
|
||||
{
|
||||
if (array_index != array_index_last)
|
||||
{
|
||||
int_to_string( array_index_str, array_index );
|
||||
array_index_last = array_index;
|
||||
}
|
||||
return array_index_str;
|
||||
}
|
||||
|
||||
// use key from the object
|
||||
case value_t::object:
|
||||
return anchor.key();
|
||||
|
||||
// use an empty key for all primitive types
|
||||
case value_t::null:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
return empty_str;
|
||||
}
|
||||
}
|
||||
|
||||
/// return value of the iterator
|
||||
typename IteratorType::reference value() const
|
||||
{
|
||||
return anchor.value();
|
||||
}
|
||||
};
|
||||
|
||||
/// proxy class for the items() function
|
||||
template<typename IteratorType> class iteration_proxy
|
||||
{
|
||||
private:
|
||||
/// the container to iterate
|
||||
typename IteratorType::pointer container = nullptr;
|
||||
|
||||
public:
|
||||
explicit iteration_proxy() = default;
|
||||
|
||||
/// construct iteration proxy from a container
|
||||
explicit iteration_proxy(typename IteratorType::reference cont) noexcept
|
||||
: container(&cont) {}
|
||||
|
||||
iteration_proxy(iteration_proxy const&) = default;
|
||||
iteration_proxy& operator=(iteration_proxy const&) = default;
|
||||
iteration_proxy(iteration_proxy&&) noexcept = default;
|
||||
iteration_proxy& operator=(iteration_proxy&&) noexcept = default;
|
||||
~iteration_proxy() = default;
|
||||
|
||||
/// return iterator begin (needed for range-based for)
|
||||
iteration_proxy_value<IteratorType> begin() const noexcept
|
||||
{
|
||||
return iteration_proxy_value<IteratorType>(container->begin());
|
||||
}
|
||||
|
||||
/// return iterator end (needed for range-based for)
|
||||
iteration_proxy_value<IteratorType> end() const noexcept
|
||||
{
|
||||
return iteration_proxy_value<IteratorType>(container->end());
|
||||
}
|
||||
};
|
||||
|
||||
// Structured Bindings Support
|
||||
// For further reference see https://blog.tartanllama.xyz/structured-bindings/
|
||||
// And see https://github.com/nlohmann/json/pull/1391
|
||||
template<std::size_t N, typename IteratorType, enable_if_t<N == 0, int> = 0>
|
||||
auto get(const nlohmann::detail::iteration_proxy_value<IteratorType>& i) -> decltype(i.key())
|
||||
{
|
||||
return i.key();
|
||||
}
|
||||
// Structured Bindings Support
|
||||
// For further reference see https://blog.tartanllama.xyz/structured-bindings/
|
||||
// And see https://github.com/nlohmann/json/pull/1391
|
||||
template<std::size_t N, typename IteratorType, enable_if_t<N == 1, int> = 0>
|
||||
auto get(const nlohmann::detail::iteration_proxy_value<IteratorType>& i) -> decltype(i.value())
|
||||
{
|
||||
return i.value();
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
||||
|
||||
// The Addition to the STD Namespace is required to add
|
||||
// Structured Bindings Support to the iteration_proxy_value class
|
||||
// For further reference see https://blog.tartanllama.xyz/structured-bindings/
|
||||
// And see https://github.com/nlohmann/json/pull/1391
|
||||
namespace std
|
||||
{
|
||||
|
||||
#if defined(__clang__)
|
||||
// Fix: https://github.com/nlohmann/json/issues/1401
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wmismatched-tags"
|
||||
#endif
|
||||
template<typename IteratorType>
|
||||
class tuple_size<::nlohmann::detail::iteration_proxy_value<IteratorType>> // NOLINT(cert-dcl58-cpp)
|
||||
: public std::integral_constant<std::size_t, 2> {};
|
||||
|
||||
template<std::size_t N, typename IteratorType>
|
||||
class tuple_element<N, ::nlohmann::detail::iteration_proxy_value<IteratorType >> // NOLINT(cert-dcl58-cpp)
|
||||
{
|
||||
public:
|
||||
using type = decltype(
|
||||
get<N>(std::declval <
|
||||
::nlohmann::detail::iteration_proxy_value<IteratorType >> ()));
|
||||
};
|
||||
#if defined(__clang__)
|
||||
#pragma clang diagnostic pop
|
||||
#endif
|
||||
|
||||
} // namespace std
|
||||
|
||||
#if JSON_HAS_RANGES
|
||||
template <typename IteratorType>
|
||||
inline constexpr bool ::std::ranges::enable_borrowed_range<::nlohmann::detail::iteration_proxy<IteratorType>> = true;
|
||||
#endif
|
@@ -1,61 +0,0 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <iterator> // random_access_iterator_tag
|
||||
|
||||
#include <nlohmann/detail/abi_macros.hpp>
|
||||
#include <nlohmann/detail/meta/void_t.hpp>
|
||||
#include <nlohmann/detail/meta/cpp_future.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
template<typename It, typename = void>
|
||||
struct iterator_types {};
|
||||
|
||||
template<typename It>
|
||||
struct iterator_types <
|
||||
It,
|
||||
void_t<typename It::difference_type, typename It::value_type, typename It::pointer,
|
||||
typename It::reference, typename It::iterator_category >>
|
||||
{
|
||||
using difference_type = typename It::difference_type;
|
||||
using value_type = typename It::value_type;
|
||||
using pointer = typename It::pointer;
|
||||
using reference = typename It::reference;
|
||||
using iterator_category = typename It::iterator_category;
|
||||
};
|
||||
|
||||
// This is required as some compilers implement std::iterator_traits in a way that
|
||||
// doesn't work with SFINAE. See https://github.com/nlohmann/json/issues/1341.
|
||||
template<typename T, typename = void>
|
||||
struct iterator_traits
|
||||
{
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct iterator_traits < T, enable_if_t < !std::is_pointer<T>::value >>
|
||||
: iterator_types<T>
|
||||
{
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct iterator_traits<T*, enable_if_t<std::is_object<T>::value>>
|
||||
{
|
||||
using iterator_category = std::random_access_iterator_tag;
|
||||
using value_type = T;
|
||||
using difference_type = ptrdiff_t;
|
||||
using pointer = T*;
|
||||
using reference = T&;
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
@@ -1,130 +0,0 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef> // ptrdiff_t
|
||||
#include <iterator> // reverse_iterator
|
||||
#include <utility> // declval
|
||||
|
||||
#include <nlohmann/detail/abi_macros.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
//////////////////////
|
||||
// reverse_iterator //
|
||||
//////////////////////
|
||||
|
||||
/*!
|
||||
@brief a template for a reverse iterator class
|
||||
|
||||
@tparam Base the base iterator type to reverse. Valid types are @ref
|
||||
iterator (to create @ref reverse_iterator) and @ref const_iterator (to
|
||||
create @ref const_reverse_iterator).
|
||||
|
||||
@requirement The class satisfies the following concept requirements:
|
||||
-
|
||||
[BidirectionalIterator](https://en.cppreference.com/w/cpp/named_req/BidirectionalIterator):
|
||||
The iterator that can be moved can be moved in both directions (i.e.
|
||||
incremented and decremented).
|
||||
- [OutputIterator](https://en.cppreference.com/w/cpp/named_req/OutputIterator):
|
||||
It is possible to write to the pointed-to element (only if @a Base is
|
||||
@ref iterator).
|
||||
|
||||
@since version 1.0.0
|
||||
*/
|
||||
template<typename Base>
|
||||
class json_reverse_iterator : public std::reverse_iterator<Base>
|
||||
{
|
||||
public:
|
||||
using difference_type = std::ptrdiff_t;
|
||||
/// shortcut to the reverse iterator adapter
|
||||
using base_iterator = std::reverse_iterator<Base>;
|
||||
/// the reference type for the pointed-to element
|
||||
using reference = typename Base::reference;
|
||||
|
||||
/// create reverse iterator from iterator
|
||||
explicit json_reverse_iterator(const typename base_iterator::iterator_type& it) noexcept
|
||||
: base_iterator(it) {}
|
||||
|
||||
/// create reverse iterator from base class
|
||||
explicit json_reverse_iterator(const base_iterator& it) noexcept : base_iterator(it) {}
|
||||
|
||||
/// post-increment (it++)
|
||||
json_reverse_iterator operator++(int)& // NOLINT(cert-dcl21-cpp)
|
||||
{
|
||||
return static_cast<json_reverse_iterator>(base_iterator::operator++(1));
|
||||
}
|
||||
|
||||
/// pre-increment (++it)
|
||||
json_reverse_iterator& operator++()
|
||||
{
|
||||
return static_cast<json_reverse_iterator&>(base_iterator::operator++());
|
||||
}
|
||||
|
||||
/// post-decrement (it--)
|
||||
json_reverse_iterator operator--(int)& // NOLINT(cert-dcl21-cpp)
|
||||
{
|
||||
return static_cast<json_reverse_iterator>(base_iterator::operator--(1));
|
||||
}
|
||||
|
||||
/// pre-decrement (--it)
|
||||
json_reverse_iterator& operator--()
|
||||
{
|
||||
return static_cast<json_reverse_iterator&>(base_iterator::operator--());
|
||||
}
|
||||
|
||||
/// add to iterator
|
||||
json_reverse_iterator& operator+=(difference_type i)
|
||||
{
|
||||
return static_cast<json_reverse_iterator&>(base_iterator::operator+=(i));
|
||||
}
|
||||
|
||||
/// add to iterator
|
||||
json_reverse_iterator operator+(difference_type i) const
|
||||
{
|
||||
return static_cast<json_reverse_iterator>(base_iterator::operator+(i));
|
||||
}
|
||||
|
||||
/// subtract from iterator
|
||||
json_reverse_iterator operator-(difference_type i) const
|
||||
{
|
||||
return static_cast<json_reverse_iterator>(base_iterator::operator-(i));
|
||||
}
|
||||
|
||||
/// return difference
|
||||
difference_type operator-(const json_reverse_iterator& other) const
|
||||
{
|
||||
return base_iterator(*this) - base_iterator(other);
|
||||
}
|
||||
|
||||
/// access to successor
|
||||
reference operator[](difference_type n) const
|
||||
{
|
||||
return *(this->operator+(n));
|
||||
}
|
||||
|
||||
/// return the key of an object iterator
|
||||
auto key() const -> decltype(std::declval<Base>().key())
|
||||
{
|
||||
auto it = --this->base();
|
||||
return it.key();
|
||||
}
|
||||
|
||||
/// return the value of an iterator
|
||||
reference value() const
|
||||
{
|
||||
auto it = --this->base();
|
||||
return it.operator * ();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
@@ -1,132 +0,0 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef> // ptrdiff_t
|
||||
#include <limits> // numeric_limits
|
||||
|
||||
#include <nlohmann/detail/macro_scope.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
/*
|
||||
@brief an iterator for primitive JSON types
|
||||
|
||||
This class models an iterator for primitive JSON types (boolean, number,
|
||||
string). It's only purpose is to allow the iterator/const_iterator classes
|
||||
to "iterate" over primitive values. Internally, the iterator is modeled by
|
||||
a `difference_type` variable. Value begin_value (`0`) models the begin,
|
||||
end_value (`1`) models past the end.
|
||||
*/
|
||||
class primitive_iterator_t
|
||||
{
|
||||
private:
|
||||
using difference_type = std::ptrdiff_t;
|
||||
static constexpr difference_type begin_value = 0;
|
||||
static constexpr difference_type end_value = begin_value + 1;
|
||||
|
||||
JSON_PRIVATE_UNLESS_TESTED:
|
||||
/// iterator as signed integer type
|
||||
difference_type m_it = (std::numeric_limits<std::ptrdiff_t>::min)();
|
||||
|
||||
public:
|
||||
constexpr difference_type get_value() const noexcept
|
||||
{
|
||||
return m_it;
|
||||
}
|
||||
|
||||
/// set iterator to a defined beginning
|
||||
void set_begin() noexcept
|
||||
{
|
||||
m_it = begin_value;
|
||||
}
|
||||
|
||||
/// set iterator to a defined past the end
|
||||
void set_end() noexcept
|
||||
{
|
||||
m_it = end_value;
|
||||
}
|
||||
|
||||
/// return whether the iterator can be dereferenced
|
||||
constexpr bool is_begin() const noexcept
|
||||
{
|
||||
return m_it == begin_value;
|
||||
}
|
||||
|
||||
/// return whether the iterator is at end
|
||||
constexpr bool is_end() const noexcept
|
||||
{
|
||||
return m_it == end_value;
|
||||
}
|
||||
|
||||
friend constexpr bool operator==(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept
|
||||
{
|
||||
return lhs.m_it == rhs.m_it;
|
||||
}
|
||||
|
||||
friend constexpr bool operator<(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept
|
||||
{
|
||||
return lhs.m_it < rhs.m_it;
|
||||
}
|
||||
|
||||
primitive_iterator_t operator+(difference_type n) noexcept
|
||||
{
|
||||
auto result = *this;
|
||||
result += n;
|
||||
return result;
|
||||
}
|
||||
|
||||
friend constexpr difference_type operator-(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept
|
||||
{
|
||||
return lhs.m_it - rhs.m_it;
|
||||
}
|
||||
|
||||
primitive_iterator_t& operator++() noexcept
|
||||
{
|
||||
++m_it;
|
||||
return *this;
|
||||
}
|
||||
|
||||
primitive_iterator_t operator++(int)& noexcept // NOLINT(cert-dcl21-cpp)
|
||||
{
|
||||
auto result = *this;
|
||||
++m_it;
|
||||
return result;
|
||||
}
|
||||
|
||||
primitive_iterator_t& operator--() noexcept
|
||||
{
|
||||
--m_it;
|
||||
return *this;
|
||||
}
|
||||
|
||||
primitive_iterator_t operator--(int)& noexcept // NOLINT(cert-dcl21-cpp)
|
||||
{
|
||||
auto result = *this;
|
||||
--m_it;
|
||||
return result;
|
||||
}
|
||||
|
||||
primitive_iterator_t& operator+=(difference_type n) noexcept
|
||||
{
|
||||
m_it += n;
|
||||
return *this;
|
||||
}
|
||||
|
||||
primitive_iterator_t& operator-=(difference_type n) noexcept
|
||||
{
|
||||
m_it -= n;
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user