Compare commits

...

81 Commits

Author SHA1 Message Date
5f95117dd4 Merge pull request 'Replace git submodule dependencies for vcpg dependencies' (#35) from vcpkg into main
Reviewed-on: #35
2025-04-27 20:55:03 +00:00
2f5bc10b8e Update sample project and README 2025-04-27 21:25:21 +02:00
257f519641 Fix update_coverage.py mistake in url 2025-04-27 18:41:34 +02:00
5c5ecef3cf Update vcpkg private repo baseline 2025-04-27 18:37:46 +02:00
d0ebe596f6 Fix json module version in test 2025-04-27 18:34:15 +02:00
670b93d0a1 Remove git modules and add vcpkg configuration 2025-04-27 18:33:23 +02:00
306d3a4b55 Reformat source 2025-03-22 10:31:54 +01:00
bf08b0de89 Change clang-format braces position 2025-03-17 18:02:21 +01:00
b976db53c6 Add models to README 2025-03-17 13:13:06 +01:00
be39d2dedb Add ulm class diagram & update .clang-format 2025-03-17 13:06:15 +01:00
4ca770d16b Update README.md & .clang-format 2025-03-17 12:14:57 +01:00
6bf3b939bc Add items to .clang-format 2025-03-17 11:39:33 +01:00
7076efc2a1 Merge pull request 'Optimize BoostAODE -> XBAODE' (#33) from WA2DE into main
Reviewed-on: #33
2025-03-16 17:58:10 +00:00
9ee388561f Update version, changelog, and Xsp2de clf name 2025-03-16 18:55:24 +01:00
70c7d3dd3d Add test to 99.1% 2025-03-14 18:55:29 +01:00
400967b4e3 Add tests to 90% coverage 2025-03-14 14:53:22 +01:00
c234308701 Add SPnDE n=2 2025-03-13 10:58:43 +01:00
4ded6f51eb TestXBAODE complete, fix XBAODE error in no convergence & 99% coverage 2025-03-13 01:28:48 +01:00
b1d317d8f4 Add format and launch config 2025-03-12 16:29:29 +01:00
7876d1a370 Add test 2025-03-12 16:27:19 +01:00
3bdb14bd65 Tests XSpode & XBAODE 2025-03-12 13:46:04 +01:00
71b05cc1a7 Begin XBAODE tests 2025-03-11 18:16:50 +01:00
a59689272d Fix tests 2025-03-11 01:09:37 +01:00
3d8be79b37 Fix XSpode 2025-03-10 22:18:50 +01:00
619276a5ea Update sample_xpode 2025-03-10 21:44:12 +01:00
e681099360 Add sample_xspode 2025-03-10 21:37:14 +01:00
5919fbfd34 Fix Xspode 2025-03-10 21:29:47 +01:00
a26522e62f Fix XSPode 2025-03-10 15:55:48 +01:00
86cccb6c7b Fix XSpode 2025-03-10 14:23:47 +01:00
d1b235261e Fix XSpode 2025-03-10 14:21:01 +01:00
7a8e0391dc continue fixing xspode 2025-03-10 12:18:10 +01:00
6cfbc482d8 change launch.json 2025-03-10 11:20:36 +01:00
ca54f799ee Fix XSpode predict 2025-03-10 11:18:04 +01:00
06621ea361 Add XBAODE & XSpode classifiers 2025-03-09 19:15:00 +01:00
a70ac3e883 Add namespace to Smoothing.h 2025-03-09 11:21:31 +01:00
b987dcbcc4 Refactor Smoothing type to its own file
Add log to boost
2025-03-08 14:04:08 +01:00
81fd7df7f0 Update CHANGELOG 2025-02-13 01:18:43 +01:00
dd98cf159d ComputeCPT Optimization 2025-02-13 01:17:37 +01:00
f658149977 Add dump_cpt to Ensemble 2025-02-12 20:55:35 +01:00
fb957ac3fe First implemented aproximation 2025-01-31 13:55:46 +01:00
b90e558238 Hyperparameter *maxTolerance* in the BoostAODE class is now in [1, 6] range (it was in [1, 4] range before) 2025-01-23 00:56:18 +01:00
64970cf7f7 Merge pull request 'alphablock' (#32) from alphablock into main
Reviewed-on: #32
Added

- Add a new hyperparameter to the BoostAODE class, alphablock, to control the way α is computed, with the last model or with the ensmble built so far. Default value is false.
- Add a new hyperparameter to the SPODE class, parent, to set the root node of the model. If no value is set the root parameter of the constructor is used.
- Add a new hyperparameter to the TAN class, parent, to set the root node of the model. If not set the first feature is used as root.
2025-01-22 11:48:09 +00:00
b571a4da4d Fix typo in CHANGELOG 2025-01-22 12:43:40 +01:00
8a9f329ff9 Remove typo in README 2024-12-18 14:29:12 +01:00
e2781ee525 Add parent hyperparameter to TAN & SPODE 2024-12-17 10:14:14 +01:00
56a2d3ead0 remove uneeded submodule 2024-12-14 20:27:07 +01:00
dc32a0fc47 Fix tests & update dependencies versions 2024-12-14 14:32:51 +01:00
3d6b4f0614 Implement the functionality of the hyperparameter alpha_block with test 2024-12-14 14:02:45 +01:00
18844c7da7 Add hyperparameter to ChangeLog and Boost class 2024-12-14 14:02:10 +01:00
43ceefd2c9 Fix comment in AODELd 2024-12-10 13:35:23 +01:00
e6501502d1 Update docs and help 2024-11-23 20:28:16 +01:00
d84adf6172 Add model to changelog 2024-11-23 19:13:54 +01:00
268a86cbe0 Actualiza Changelog 2024-11-23 19:11:00 +01:00
fc4c93b299 Fix Mst test 2024-11-23 19:07:35 +01:00
86f2bc44fc libmdlp (#31)
Add mdlp as library in lib/
Fix tests to reach 99.1% of coverage

Reviewed-on: #31
2024-11-23 17:22:41 +00:00
f0f3d9ad6e Fix CUDA and mdlp library issues 2024-11-20 21:02:56 +01:00
9a323cd7a3 Remove mdlp submodule 2024-11-20 20:15:49 +01:00
cb949ac7e5 Update dependecies versions 2024-09-29 13:17:44 +02:00
2c297ea15d Control optional doxygen dependency 2024-09-29 12:48:15 +02:00
4e4b6e67f4 Add env parallel variable to Makefile 2024-09-18 11:05:19 +02:00
82847774ee Update Dockerfile 2024-09-13 09:42:06 +02:00
d0955d9369 Merge pull request 'smoothing' (#30) from smoothing into main
Reviewed-on: #30
2024-09-12 20:28:33 +00:00
2d34eb8c89 Update Makefile to get parallel info from env 2024-08-31 12:43:39 +02:00
0159c397fa Update optimization flag in CMakeLists 2024-07-11 12:29:57 +02:00
0bbc8328a9 Change cpt table type to float 2024-07-08 13:27:55 +02:00
35ca862eca Don't allow add node nor add edge on fitted networks 2024-07-07 21:06:59 +02:00
26eb58b104 Forbids to insert the same edge twice 2024-07-04 18:52:41 +02:00
6fcc15d39a Upgrade mdlp library 2024-06-24 12:38:44 +02:00
9a14133be5 Add thread control to vectors predict 2024-06-23 13:02:40 +02:00
59c1cf5b3b Fix number of threads spawned 2024-06-21 19:56:35 +02:00
8e9090d283 Fix tests 2024-06-21 13:58:42 +02:00
02bcab01be Refactor CountingSemaphore as singleton 2024-06-21 09:30:24 +02:00
716748e18c Add Counting Semaphore class
Fix threading in Network
2024-06-20 10:36:09 +02:00
0b31780d39 Add Thread max spawning to Network 2024-06-18 23:18:24 +02:00
fa26aa80f7 Rename OLD_LAPLACE to ORIGINAL 2024-06-13 15:04:15 +02:00
3eb61905fb Upgrade ArffFiles Module version 2024-06-13 12:33:54 +02:00
ca0ae4dacf Refactor Cestnik smoothin factor assuming m=1 2024-06-13 09:11:47 +02:00
b34869cc61 Set smoothing as fit parameter 2024-06-11 11:40:45 +02:00
27a3e5a5e0 Implement 3 types of smoothing 2024-06-10 15:49:01 +02:00
684443a788 Implement Cestnik & Laplace smoothing 2024-06-09 17:19:38 +02:00
6d9badc33b Merge pull request 'BoostA2DE' (#29) from BoostA2DE into main
Reviewed-on: #29
2024-06-09 10:02:47 +00:00
146 changed files with 9840 additions and 29932 deletions

10
.clang-format Normal file
View File

@@ -0,0 +1,10 @@
# .clang-format
---
BasedOnStyle: LLVM
AccessModifierOffset: -4
BreakBeforeBraces: Linux
ColumnLimit: 0
FixNamespaceComments: false
IndentWidth: 4
NamespaceIndentation: All
TabWidth: 4

View File

@@ -1,4 +1,4 @@
compilation_database_dir: build_debug compilation_database_dir: build_Debug
output_directory: diagrams output_directory: diagrams
diagrams: diagrams:
BayesNet: BayesNet:

View File

@@ -1,6 +1,6 @@
FROM mcr.microsoft.com/devcontainers/cpp:ubuntu22.04 FROM mcr.microsoft.com/devcontainers/cpp:ubuntu22.04
ARG REINSTALL_CMAKE_VERSION_FROM_SOURCE="3.22.2" ARG REINSTALL_CMAKE_VERSION_FROM_SOURCE="3.29.3"
# Optionally install the cmake for vcpkg # Optionally install the cmake for vcpkg
COPY ./reinstall-cmake.sh /tmp/ COPY ./reinstall-cmake.sh /tmp/
@@ -23,7 +23,7 @@ RUN add-apt-repository ppa:ubuntu-toolchain-r/test
RUN apt-get update RUN apt-get update
# Install GCC 13.1 # Install GCC 13.1
RUN apt-get install -y gcc-13 g++-13 RUN apt-get install -y gcc-13 g++-13 doxygen
# Install lcov 2.1 # Install lcov 2.1
RUN wget --quiet https://github.com/linux-test-project/lcov/releases/download/v2.1/lcov-2.1.tar.gz && \ RUN wget --quiet https://github.com/linux-test-project/lcov/releases/download/v2.1/lcov-2.1.tar.gz && \

3
.gitignore vendored
View File

@@ -44,4 +44,5 @@ docs/manual
docs/man3 docs/man3
docs/man docs/man
docs/Doxyfile docs/Doxyfile
.cache
vcpkg_installed

23
.gitmodules vendored
View File

@@ -1,23 +0,0 @@
[submodule "lib/mdlp"]
path = lib/mdlp
url = https://github.com/rmontanana/mdlp
main = main
update = merge
[submodule "lib/json"]
path = lib/json
url = https://github.com/nlohmann/json.git
master = master
update = merge
[submodule "lib/folding"]
path = lib/folding
url = https://github.com/rmontanana/folding
main = main
update = merge
[submodule "tests/lib/catch2"]
path = tests/lib/catch2
url = https://github.com/catchorg/Catch2.git
main = main
update = merge
[submodule "tests/lib/Files"]
path = tests/lib/Files
url = https://github.com/rmontanana/ArffFiles

8
.vscode/launch.json vendored
View File

@@ -5,7 +5,7 @@
"type": "lldb", "type": "lldb",
"request": "launch", "request": "launch",
"name": "sample", "name": "sample",
"program": "${workspaceFolder}/build_release/sample/bayesnet_sample", "program": "${workspaceFolder}/sample/build/bayesnet_sample",
"args": [ "args": [
"${workspaceFolder}/tests/data/glass.arff" "${workspaceFolder}/tests/data/glass.arff"
] ]
@@ -14,11 +14,11 @@
"type": "lldb", "type": "lldb",
"request": "launch", "request": "launch",
"name": "test", "name": "test",
"program": "${workspaceFolder}/build_debug/tests/TestBayesNet", "program": "${workspaceFolder}/build_Debug/tests/TestBayesNet",
"args": [ "args": [
"[Node]" "[XBAODE]"
], ],
"cwd": "${workspaceFolder}/build_debug/tests" "cwd": "${workspaceFolder}/build_Debug/tests"
}, },
{ {
"name": "(gdb) Launch", "name": "(gdb) Launch",

View File

@@ -7,6 +7,44 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased] ## [Unreleased]
## [1.1.0] - 2025-04-27
### Internal
- Add changes to .clang-format to ajust to vscode format style thanks to <https://clang-format-configurator.site/>
- Remove all the dependencies as git submodules and add them as vcpkg dependencies.
- Fix the dependencies versions for this specific BayesNet version.
## [1.0.7] 2025-03-16
### Added
- A new hyperparameter to the BoostAODE class, *alphablock*, to control the way &alpha; is computed, with the last model or with the ensmble built so far. Default value is *false*.
- A new hyperparameter to the SPODE class, *parent*, to set the root node of the model. If no value is set the root parameter of the constructor is used.
- A new hyperparameter to the TAN class, *parent*, to set the root node of the model. If not set the first feature is used as root.
- A new model named XSPODE, an optimized for speed averaged one dependence estimator.
- A new model named XSP2DE, an optimized for speed averaged two dependence estimator.
- A new model named XBAODE, an optimized for speed BoostAODE model.
- A new model named XBA2DE, an optimized for speed BoostA2DE model.
### Internal
- Optimize ComputeCPT method in the Node class.
- Add methods getCount and getMaxCount to the CountingSemaphore class, returning the current count and the maximum count of threads respectively.
### Changed
- Hyperparameter *maxTolerance* in the BoostAODE class is now in [1, 6] range (it was in [1, 4] range before).
## [1.0.6] 2024-11-23
### Fixed
- Prevent existing edges to be added to the network in the `add_edge` method.
- Don't allow to add nodes or edges on already fiited networks.
- Number of threads spawned
- Network class tests
### Added ### Added
- Library logo generated with <https://openart.ai> to README.md - Library logo generated with <https://openart.ai> to README.md
@@ -14,14 +52,21 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- *convergence_best* hyperparameter to the BoostAODE class, to control the way the prior accuracy is computed if convergence is set. Default value is *false*. - *convergence_best* hyperparameter to the BoostAODE class, to control the way the prior accuracy is computed if convergence is set. Default value is *false*.
- SPnDE model. - SPnDE model.
- A2DE model. - A2DE model.
- BoostA2DE model.
- A2DE & SPnDE tests. - A2DE & SPnDE tests.
- Add tests to reach 99% of coverage. - Add tests to reach 99% of coverage.
- Add tests to check the correct version of the mdlp, folding and json libraries. - Add tests to check the correct version of the mdlp, folding and json libraries.
- Library documentation generated with Doxygen. - Library documentation generated with Doxygen.
- Link to documentation in the README.md. - Link to documentation in the README.md.
- Three types of smoothing the Bayesian Network ORIGINAL, LAPLACE and CESTNIK.
### Internal ### Internal
- Fixed doxygen optional dependency
- Add env parallel variable to Makefile
- Add CountingSemaphore class to manage the number of threads spawned.
- Ignore CUDA language in CMake CodeCoverage module.
- Update mdlp library as a git submodule.
- Create library ShuffleArffFile to limit the number of samples with a parameter and shuffle them. - Create library ShuffleArffFile to limit the number of samples with a parameter and shuffle them.
- Refactor catch2 library location to test/lib - Refactor catch2 library location to test/lib
- Refactor loadDataset function in tests. - Refactor loadDataset function in tests.
@@ -32,6 +77,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Add a Makefile target (doc) to generate the documentation. - Add a Makefile target (doc) to generate the documentation.
- Add a Makefile target (doc-install) to install the documentation. - Add a Makefile target (doc-install) to install the documentation.
### Libraries versions
- mdlp: 2.0.1
- Folding: 1.1.0
- json: 3.11
- ArffFiles: 1.1.0
## [1.0.5] 2024-04-20 ## [1.0.5] 2024-04-20
### Added ### Added

View File

@@ -1,7 +1,7 @@
cmake_minimum_required(VERSION 3.20) cmake_minimum_required(VERSION 3.20)
project(BayesNet project(BayesNet
VERSION 1.0.5.1 VERSION 1.1.0
DESCRIPTION "Bayesian Network and basic classifiers Library." DESCRIPTION "Bayesian Network and basic classifiers Library."
HOMEPAGE_URL "https://github.com/rmontanana/bayesnet" HOMEPAGE_URL "https://github.com/rmontanana/bayesnet"
LANGUAGES CXX LANGUAGES CXX
@@ -26,7 +26,7 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fprofile-arcs -ftest-coverage -fno-elide-constructors") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fprofile-arcs -ftest-coverage -fno-elide-constructors")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3") set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -Ofast")
if (NOT ${CMAKE_SYSTEM_NAME} MATCHES "Darwin") if (NOT ${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-default-inline") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-default-inline")
endif() endif()
@@ -41,7 +41,6 @@ option(INSTALL_GTEST "Enable installation of googletest." OFF)
# CMakes modules # CMakes modules
# -------------- # --------------
set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules ${CMAKE_MODULE_PATH}) set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules ${CMAKE_MODULE_PATH})
include(AddGitSubmodule)
if (CMAKE_BUILD_TYPE STREQUAL "Debug") if (CMAKE_BUILD_TYPE STREQUAL "Debug")
MESSAGE("Debug mode") MESSAGE("Debug mode")
@@ -49,11 +48,12 @@ if (CMAKE_BUILD_TYPE STREQUAL "Debug")
set(CODE_COVERAGE ON) set(CODE_COVERAGE ON)
endif (CMAKE_BUILD_TYPE STREQUAL "Debug") endif (CMAKE_BUILD_TYPE STREQUAL "Debug")
get_property(LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES)
message(STATUS "Languages=${LANGUAGES}")
if (CODE_COVERAGE) if (CODE_COVERAGE)
enable_testing() enable_testing()
include(CodeCoverage) include(CodeCoverage)
MESSAGE("Code coverage enabled") MESSAGE(STATUS "Code coverage enabled")
SET(GCC_COVERAGE_LINK_FLAGS " ${GCC_COVERAGE_LINK_FLAGS} -lgcov --coverage") SET(GCC_COVERAGE_LINK_FLAGS " ${GCC_COVERAGE_LINK_FLAGS} -lgcov --coverage")
endif (CODE_COVERAGE) endif (CODE_COVERAGE)
@@ -63,9 +63,11 @@ endif (ENABLE_CLANG_TIDY)
# External libraries - dependencies of BayesNet # External libraries - dependencies of BayesNet
# --------------------------------------------- # ---------------------------------------------
# include(FetchContent)
add_git_submodule("lib/json") find_package(Torch CONFIG REQUIRED)
add_git_submodule("lib/mdlp") find_package(fimdlp CONFIG REQUIRED)
find_package(nlohmann_json CONFIG REQUIRED)
find_package(folding CONFIG REQUIRED)
# Subdirectories # Subdirectories
# -------------- # --------------
@@ -75,8 +77,8 @@ add_subdirectory(bayesnet)
# Testing # Testing
# ------- # -------
if (ENABLE_TESTING) if (ENABLE_TESTING)
MESSAGE("Testing enabled") MESSAGE(STATUS "Testing enabled")
add_subdirectory(tests/lib/catch2) find_package(Catch2 CONFIG REQUIRED)
include(CTest) include(CTest)
add_subdirectory(tests) add_subdirectory(tests)
endif (ENABLE_TESTING) endif (ENABLE_TESTING)
@@ -93,10 +95,14 @@ install(FILES ${CMAKE_BINARY_DIR}/configured_files/include/bayesnet/config.h DES
# Documentation # Documentation
# ------------- # -------------
find_package(Doxygen) find_package(Doxygen)
set(DOC_DIR ${CMAKE_CURRENT_SOURCE_DIR}/docs) if (Doxygen_FOUND)
set(doxyfile_in ${DOC_DIR}/Doxyfile.in) set(DOC_DIR ${CMAKE_CURRENT_SOURCE_DIR}/docs)
set(doxyfile ${DOC_DIR}/Doxyfile) set(doxyfile_in ${DOC_DIR}/Doxyfile.in)
configure_file(${doxyfile_in} ${doxyfile} @ONLY) set(doxyfile ${DOC_DIR}/Doxyfile)
doxygen_add_docs(doxygen configure_file(${doxyfile_in} ${doxyfile} @ONLY)
WORKING_DIRECTORY ${DOC_DIR} doxygen_add_docs(doxygen
WORKING_DIRECTORY ${DOC_DIR}
CONFIG_FILE ${doxyfile}) CONFIG_FILE ${doxyfile})
else (Doxygen_FOUND)
MESSAGE("* Doxygen not found")
endif (Doxygen_FOUND)

View File

@@ -1,6 +1,6 @@
SHELL := /bin/bash SHELL := /bin/bash
.DEFAULT_GOAL := help .DEFAULT_GOAL := help
.PHONY: viewcoverage coverage setup help install uninstall diagrams buildr buildd test clean debug release sample updatebadge doc doc-install .PHONY: viewcoverage coverage setup help install uninstall diagrams buildr buildd test clean debug release sample updatebadge doc doc-install init clean-test
f_release = build_Release f_release = build_Release
f_debug = build_Debug f_debug = build_Debug
@@ -12,7 +12,6 @@ plantuml = plantuml
lcov = lcov lcov = lcov
genhtml = genhtml genhtml = genhtml
dot = dot dot = dot
n_procs = -j 16
docsrcdir = docs/manual docsrcdir = docs/manual
mansrcdir = docs/man3 mansrcdir = docs/man3
mandestdir = /usr/local/share/man mandestdir = /usr/local/share/man
@@ -44,7 +43,7 @@ setup: ## Install dependencies for tests and coverage
fi fi
@echo "* You should install plantuml & graphviz for the diagrams" @echo "* You should install plantuml & graphviz for the diagrams"
diagrams: ## Create an UML class diagram & depnendency of the project (diagrams/BayesNet.png) diagrams: ## Create an UML class diagram & dependency of the project (diagrams/BayesNet.png)
@which $(plantuml) || (echo ">>> Please install plantuml"; exit 1) @which $(plantuml) || (echo ">>> Please install plantuml"; exit 1)
@which $(dot) || (echo ">>> Please install graphviz"; exit 1) @which $(dot) || (echo ">>> Please install graphviz"; exit 1)
@which $(clang-uml) || (echo ">>> Please install clang-uml"; exit 1) @which $(clang-uml) || (echo ">>> Please install clang-uml"; exit 1)
@@ -59,12 +58,12 @@ diagrams: ## Create an UML class diagram & depnendency of the project (diagrams/
@$(dot) -Tsvg $(f_debug)/dependency.dot.BayesNet -o $(f_diagrams)/dependency.svg @$(dot) -Tsvg $(f_debug)/dependency.dot.BayesNet -o $(f_diagrams)/dependency.svg
buildd: ## Build the debug targets buildd: ## Build the debug targets
cmake --build $(f_debug) -t $(app_targets) $(n_procs) cmake --build $(f_debug) -t $(app_targets) --parallel $(CMAKE_BUILD_PARALLEL_LEVEL)
buildr: ## Build the release targets buildr: ## Build the release targets
cmake --build $(f_release) -t $(app_targets) $(n_procs) cmake --build $(f_release) -t $(app_targets) --parallel $(CMAKE_BUILD_PARALLEL_LEVEL)
clean: ## Clean the tests info clean-test: ## Clean the tests info
@echo ">>> Cleaning Debug BayesNet tests..."; @echo ">>> Cleaning Debug BayesNet tests...";
$(call ClearTests) $(call ClearTests)
@echo ">>> Done"; @echo ">>> Done";
@@ -80,33 +79,56 @@ install: ## Install library
@cmake --install $(f_release) --prefix $(prefix) @cmake --install $(f_release) --prefix $(prefix)
@echo ">>> Done"; @echo ">>> Done";
init: ## Initialize the project installing dependencies
@echo ">>> Installing dependencies"
@vcpkg install
@echo ">>> Done";
clean: ## Clean the project
@echo ">>> Cleaning the project..."
@if test -d build_Debug ; then echo "- Deleting build_Debug folder" ; rm -rf build_Debug; fi
@if test -d build_Release ; then echo "- Deleting build_Release folder" ; rm -rf build_Release; fi
@if test -f CMakeCache.txt ; then echo "- Deleting CMakeCache.txt"; rm -f CMakeCache.txt; fi
@if test -d vcpkg_installed ; then echo "- Deleting vcpkg_installed folder" ; rm -rf vcpkg_installed; fi
@$(MAKE) clean-test
@echo ">>> Done";
debug: ## Build a debug version of the project debug: ## Build a debug version of the project
@echo ">>> Building Debug BayesNet..."; @echo ">>> Building Debug BayesNet...";
@if [ -d ./$(f_debug) ]; then rm -rf ./$(f_debug); fi @if [ -d ./$(f_debug) ]; then rm -rf ./$(f_debug); fi
@mkdir $(f_debug); @mkdir $(f_debug);
@cmake -S . -B $(f_debug) -D CMAKE_BUILD_TYPE=Debug -D ENABLE_TESTING=ON -D CODE_COVERAGE=ON @cmake -S . -B $(f_debug) -D CMAKE_BUILD_TYPE=Debug -D ENABLE_TESTING=ON -D CODE_COVERAGE=ON -DCMAKE_TOOLCHAIN_FILE=${VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake
@echo ">>> Done"; @echo ">>> Done";
release: ## Build a Release version of the project release: ## Build a Release version of the project
@echo ">>> Building Release BayesNet..."; @echo ">>> Building Release BayesNet...";
@if [ -d ./$(f_release) ]; then rm -rf ./$(f_release); fi @if [ -d ./$(f_release) ]; then rm -rf ./$(f_release); fi
@mkdir $(f_release); @mkdir $(f_release);
@cmake -S . -B $(f_release) -D CMAKE_BUILD_TYPE=Release @cmake -S . -B $(f_release) -D CMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=${VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake
@echo ">>> Done"; @echo ">>> Done";
fname = "tests/data/iris.arff" fname = "tests/data/iris.arff"
sample: ## Build sample sample: ## Build sample
@echo ">>> Building Sample..."; @echo ">>> Building Sample...";
@if [ -d ./sample/build ]; then rm -rf ./sample/build; fi @if [ -d ./sample/build ]; then rm -rf ./sample/build; fi
@cd sample && cmake -B build -S . && cmake --build build -t bayesnet_sample @cd sample && cmake -B build -S . -D CMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=${VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake && \
cmake --build build -t bayesnet_sample
sample/build/bayesnet_sample $(fname) sample/build/bayesnet_sample $(fname)
@echo ">>> Done"; @echo ">>> Done";
fname = "tests/data/iris.arff"
sample2: ## Build sample2
@echo ">>> Building Sample...";
@if [ -d ./sample/build ]; then rm -rf ./sample/build; fi
@cd sample && cmake -B build -S . -D CMAKE_BUILD_TYPE=Debug && cmake --build build -t bayesnet_sample_xspode
sample/build/bayesnet_sample_xspode $(fname)
@echo ">>> Done";
opt = "" opt = ""
test: ## Run tests (opt="-s") to verbose output the tests, (opt="-c='Test Maximum Spanning Tree'") to run only that section test: ## Run tests (opt="-s") to verbose output the tests, (opt="-c='Test Maximum Spanning Tree'") to run only that section
@echo ">>> Running BayesNet tests..."; @echo ">>> Running BayesNet tests...";
@$(MAKE) clean @$(MAKE) clean-test
@cmake --build $(f_debug) -t $(test_targets) $(n_procs) @cmake --build $(f_debug) -t $(test_targets) --parallel $(CMAKE_BUILD_PARALLEL_LEVEL)
@for t in $(test_targets); do \ @for t in $(test_targets); do \
echo ">>> Running $$t...";\ echo ">>> Running $$t...";\
if [ -f $(f_debug)/tests/$$t ]; then \ if [ -f $(f_debug)/tests/$$t ]; then \
@@ -119,13 +141,14 @@ test: ## Run tests (opt="-s") to verbose output the tests, (opt="-c='Test Maximu
coverage: ## Run tests and generate coverage report (build/index.html) coverage: ## Run tests and generate coverage report (build/index.html)
@echo ">>> Building tests with coverage..." @echo ">>> Building tests with coverage..."
@which $(lcov) || (echo ">>> Please install lcov"; exit 1) @which $(lcov) || (echo ">>ease install lcov"; exit 1)
@if [ ! -f $(f_debug)/tests/coverage.info ] ; then $(MAKE) test ; fi @if [ ! -f $(f_debug)/tests/coverage.info ] ; then $(MAKE) test ; fi
@echo ">>> Building report..." @echo ">>> Building report..."
@cd $(f_debug)/tests; \ @cd $(f_debug)/tests; \
$(lcov) --directory CMakeFiles --capture --demangle-cpp --ignore-errors source,source --output-file coverage.info >/dev/null 2>&1; \ $(lcov) --directory CMakeFiles --capture --demangle-cpp --ignore-errors source,source --output-file coverage.info >/dev/null 2>&1; \
$(lcov) --remove coverage.info '/usr/*' --output-file coverage.info >/dev/null 2>&1; \ $(lcov) --remove coverage.info '/usr/*' --output-file coverage.info >/dev/null 2>&1; \
$(lcov) --remove coverage.info 'lib/*' --output-file coverage.info >/dev/null 2>&1; \ $(lcov) --remove coverage.info 'lib/*' --output-file coverage.info >/dev/null 2>&1; \
$(lcov) --remove coverage.info 'include/*' --output-file coverage.info >/dev/null 2>&1; \
$(lcov) --remove coverage.info 'libtorch/*' --output-file coverage.info >/dev/null 2>&1; \ $(lcov) --remove coverage.info 'libtorch/*' --output-file coverage.info >/dev/null 2>&1; \
$(lcov) --remove coverage.info 'tests/*' --output-file coverage.info >/dev/null 2>&1; \ $(lcov) --remove coverage.info 'tests/*' --output-file coverage.info >/dev/null 2>&1; \
$(lcov) --remove coverage.info 'bayesnet/utils/loguru.*' --ignore-errors unused --output-file coverage.info >/dev/null 2>&1; \ $(lcov) --remove coverage.info 'bayesnet/utils/loguru.*' --ignore-errors unused --output-file coverage.info >/dev/null 2>&1; \
@@ -173,7 +196,7 @@ docdir = ""
doc-install: ## Install documentation doc-install: ## Install documentation
@echo ">>> Installing documentation..." @echo ">>> Installing documentation..."
@if [ "$(docdir)" = "" ]; then \ @if [ "$(docdir)" = "" ]; then \
echo "docdir parameter has to be set when calling doc-install"; \ echo "docdir parameter has to be set when calling doc-install, i.e. docdir=../bayesnet_help"; \
exit 1; \ exit 1; \
fi fi
@if [ ! -d $(docdir) ]; then \ @if [ ! -d $(docdir) ]; then \

109
README.md
View File

@@ -2,52 +2,113 @@
![C++](https://img.shields.io/badge/c++-%2300599C.svg?style=flat&logo=c%2B%2B&logoColor=white) ![C++](https://img.shields.io/badge/c++-%2300599C.svg?style=flat&logo=c%2B%2B&logoColor=white)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](<https://opensource.org/licenses/MIT>) [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](<https://opensource.org/licenses/MIT>)
![Gitea Release](https://img.shields.io/gitea/v/release/rmontanana/bayesnet?gitea_url=https://gitea.rmontanana.es:3000) ![Gitea Release](https://img.shields.io/gitea/v/release/rmontanana/bayesnet?gitea_url=https://gitea.rmontanana.es)
[![Codacy Badge](https://app.codacy.com/project/badge/Grade/cf3e0ac71d764650b1bf4d8d00d303b1)](https://app.codacy.com/gh/Doctorado-ML/BayesNet/dashboard?utm_source=gh&utm_medium=referral&utm_content=&utm_campaign=Badge_grade) [![Codacy Badge](https://app.codacy.com/project/badge/Grade/cf3e0ac71d764650b1bf4d8d00d303b1)](https://app.codacy.com/gh/Doctorado-ML/BayesNet/dashboard?utm_source=gh&utm_medium=referral&utm_content=&utm_campaign=Badge_grade)
[![Security Rating](https://sonarcloud.io/api/project_badges/measure?project=rmontanana_BayesNet&metric=security_rating)](https://sonarcloud.io/summary/new_code?id=rmontanana_BayesNet) [![Security Rating](https://sonarcloud.io/api/project_badges/measure?project=rmontanana_BayesNet&metric=security_rating)](https://sonarcloud.io/summary/new_code?id=rmontanana_BayesNet)
[![Reliability Rating](https://sonarcloud.io/api/project_badges/measure?project=rmontanana_BayesNet&metric=reliability_rating)](https://sonarcloud.io/summary/new_code?id=rmontanana_BayesNet) [![Reliability Rating](https://sonarcloud.io/api/project_badges/measure?project=rmontanana_BayesNet&metric=reliability_rating)](https://sonarcloud.io/summary/new_code?id=rmontanana_BayesNet)
![Gitea Last Commit](https://img.shields.io/gitea/last-commit/rmontanana/bayesnet?gitea_url=https://gitea.rmontanana.es:3000&logo=gitea) ![Gitea Last Commit](https://img.shields.io/gitea/last-commit/rmontanana/bayesnet?gitea_url=https://gitea.rmontanana.es&logo=gitea)
[![Coverage Badge](https://img.shields.io/badge/Coverage-97,3%25-green)](html/index.html) [![Coverage Badge](https://img.shields.io/badge/Coverage-99,1%25-green)](https://gitea.rmontanana.es/rmontanana/BayesNet)
[![DOI](https://zenodo.org/badge/667782806.svg)](https://doi.org/10.5281/zenodo.14210344)
Bayesian Network Classifiers using libtorch from scratch Bayesian Network Classifiers library
## Dependencies
The only external dependency is [libtorch](https://pytorch.org/cppdocs/installing.html) which can be installed with the following commands:
```bash
wget https://download.pytorch.org/libtorch/nightly/cpu/libtorch-shared-with-deps-latest.zip
unzip libtorch-shared-with-deps-latest.zips
```
## Setup ## Setup
### Using the vcpkg library
You can use the library with the vcpkg library manager. In your project you have to add the following files:
#### vcpkg.json
```json
{
"name": "sample-project",
"version-string": "0.1.0",
"dependencies": [
"bayesnet"
]
}
```
#### vcpkg-configuration.json
```json
{
"registries": [
{
"kind": "git",
"repository": "https://github.com/rmontanana/vcpkg-stash",
"baseline": "393efa4e74e053b6f02c4ab03738c8fe796b28e5",
"packages": [
"folding",
"bayesnet",
"arff-files",
"fimdlp",
"libtorch-bin"
]
}
],
"default-registry": {
"kind": "git",
"repository": "https://github.com/microsoft/vcpkg",
"baseline": "760bfd0c8d7c89ec640aec4df89418b7c2745605"
}
}
```
#### CMakeLists.txt
You have to include the following lines in your `CMakeLists.txt` file:
```cmake
find_package(bayesnet CONFIG REQUIRED)
add_executable(myapp main.cpp)
target_link_libraries(myapp PRIVATE bayesnet::bayesnet)
```
After that, you can use the `vcpkg` command to install the dependencies:
```bash
vcpkg install
```
**Note: In the `sample` folder you can find a sample application that uses the library. You can use it as a reference to create your own application.**
## Playing with the library
The dependencies are managed with [vcpkg](https://vcpkg.io/) and supported by a private vcpkg repository in [https://github.com/rmontanana/vcpkg-stash](https://github.com/rmontanana/vcpkg-stash).
### Getting the code ### Getting the code
```bash ```bash
git clone --recurse-submodules https://github.com/doctorado-ml/bayesnet git clone https://github.com/doctorado-ml/bayesnet
``` ```
Once you have the code, you can use the `make` command to build the project. The `Makefile` is used to manage the build process and it will automatically download and install the dependencies.
### Release ### Release
```bash ```bash
make release make init # Install dependencies
make buildr make release # Build the release version
sudo make install make buildr # compile and link the release version
``` ```
### Debug & Tests ### Debug & Tests
```bash ```bash
make debug make init # Install dependencies
make test make debug # Build the debug version
make test # Run the tests
``` ```
### Coverage ### Coverage
```bash ```bash
make coverage make coverage # Run the tests with coverage
make viewcoverage make viewcoverage # View the coverage report in the browser
``` ```
### Sample app ### Sample app
@@ -71,10 +132,16 @@ make sample fname=tests/data/glass.arff
#### - AODE #### - AODE
#### - A2DE
#### - [BoostAODE](docs/BoostAODE.md) #### - [BoostAODE](docs/BoostAODE.md)
#### - XBAODE
#### - BoostA2DE #### - BoostA2DE
#### - XBA2DE
### With Local Discretization ### With Local Discretization
#### - TANLd #### - TANLd

View File

@@ -8,17 +8,19 @@
#include <vector> #include <vector>
#include <torch/torch.h> #include <torch/torch.h>
#include <nlohmann/json.hpp> #include <nlohmann/json.hpp>
#include "bayesnet/network/Network.h"
namespace bayesnet { namespace bayesnet {
enum status_t { NORMAL, WARNING, ERROR }; enum status_t { NORMAL, WARNING, ERROR };
class BaseClassifier { class BaseClassifier {
public: public:
// X is nxm std::vector, y is nx1 std::vector
virtual BaseClassifier& fit(std::vector<std::vector<int>>& X, std::vector<int>& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states) = 0;
// X is nxm tensor, y is nx1 tensor
virtual BaseClassifier& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states) = 0;
virtual BaseClassifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states) = 0;
virtual BaseClassifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights) = 0;
virtual ~BaseClassifier() = default; virtual ~BaseClassifier() = default;
// X is nxm std::vector, y is nx1 std::vector
virtual BaseClassifier& fit(std::vector<std::vector<int>>& X, std::vector<int>& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) = 0;
// X is nxm tensor, y is nx1 tensor
virtual BaseClassifier& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) = 0;
virtual BaseClassifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) = 0;
virtual BaseClassifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing) = 0;
torch::Tensor virtual predict(torch::Tensor& X) = 0; torch::Tensor virtual predict(torch::Tensor& X) = 0;
std::vector<int> virtual predict(std::vector<std::vector<int >>& X) = 0; std::vector<int> virtual predict(std::vector<std::vector<int >>& X) = 0;
torch::Tensor virtual predict_proba(torch::Tensor& X) = 0; torch::Tensor virtual predict_proba(torch::Tensor& X) = 0;
@@ -26,8 +28,8 @@ namespace bayesnet {
status_t virtual getStatus() const = 0; status_t virtual getStatus() const = 0;
float virtual score(std::vector<std::vector<int>>& X, std::vector<int>& y) = 0; float virtual score(std::vector<std::vector<int>>& X, std::vector<int>& y) = 0;
float virtual score(torch::Tensor& X, torch::Tensor& y) = 0; float virtual score(torch::Tensor& X, torch::Tensor& y) = 0;
int virtual getNumberOfNodes()const = 0; int virtual getNumberOfNodes() const = 0;
int virtual getNumberOfEdges()const = 0; int virtual getNumberOfEdges() const = 0;
int virtual getNumberOfStates() const = 0; int virtual getNumberOfStates() const = 0;
int virtual getClassNumStates() const = 0; int virtual getClassNumStates() const = 0;
std::vector<std::string> virtual show() const = 0; std::vector<std::string> virtual show() const = 0;
@@ -35,11 +37,13 @@ namespace bayesnet {
virtual std::string getVersion() = 0; virtual std::string getVersion() = 0;
std::vector<std::string> virtual topological_order() = 0; std::vector<std::string> virtual topological_order() = 0;
std::vector<std::string> virtual getNotes() const = 0; std::vector<std::string> virtual getNotes() const = 0;
std::string virtual dump_cpt()const = 0; std::string virtual dump_cpt() const = 0;
virtual void setHyperparameters(const nlohmann::json& hyperparameters) = 0; virtual void setHyperparameters(const nlohmann::json& hyperparameters) = 0;
std::vector<std::string>& getValidHyperparameters() { return validHyperparameters; } std::vector<std::string>& getValidHyperparameters() { return validHyperparameters; }
protected: protected:
virtual void trainModel(const torch::Tensor& weights) = 0; virtual void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) = 0;
std::vector<std::string> validHyperparameters; std::vector<std::string> validHyperparameters;
std::vector<std::string> notes; // Used to store messages occurred during the fit process
status_t status = NORMAL;
}; };
} }

View File

@@ -1,5 +1,6 @@
include_directories( include_directories(
${BayesNet_SOURCE_DIR}/lib/mdlp ${BayesNet_SOURCE_DIR}/lib/log
${BayesNet_SOURCE_DIR}/lib/mdlp/src
${BayesNet_SOURCE_DIR}/lib/folding ${BayesNet_SOURCE_DIR}/lib/folding
${BayesNet_SOURCE_DIR}/lib/json/include ${BayesNet_SOURCE_DIR}/lib/json/include
${BayesNet_SOURCE_DIR} ${BayesNet_SOURCE_DIR}
@@ -9,4 +10,4 @@ include_directories(
file(GLOB_RECURSE Sources "*.cc") file(GLOB_RECURSE Sources "*.cc")
add_library(BayesNet ${Sources}) add_library(BayesNet ${Sources})
target_link_libraries(BayesNet mdlp "${TORCH_LIBRARIES}") target_link_libraries(BayesNet fimdlp "${TORCH_LIBRARIES}")

View File

@@ -10,8 +10,7 @@
namespace bayesnet { namespace bayesnet {
Classifier::Classifier(Network model) : model(model), m(0), n(0), metrics(Metrics()), fitted(false) {} Classifier::Classifier(Network model) : model(model), m(0), n(0), metrics(Metrics()), fitted(false) {}
const std::string CLASSIFIER_NOT_FITTED = "Classifier has not been fitted"; Classifier& Classifier::build(const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing)
Classifier& Classifier::build(const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights)
{ {
this->features = features; this->features = features;
this->className = className; this->className = className;
@@ -23,7 +22,7 @@ namespace bayesnet {
metrics = Metrics(dataset, features, className, n_classes); metrics = Metrics(dataset, features, className, n_classes);
model.initialize(); model.initialize();
buildModel(weights); buildModel(weights);
trainModel(weights); trainModel(weights, smoothing);
fitted = true; fitted = true;
return *this; return *this;
} }
@@ -41,20 +40,20 @@ namespace bayesnet {
throw std::runtime_error(oss.str()); throw std::runtime_error(oss.str());
} }
} }
void Classifier::trainModel(const torch::Tensor& weights) void Classifier::trainModel(const torch::Tensor& weights, Smoothing_t smoothing)
{ {
model.fit(dataset, weights, features, className, states); model.fit(dataset, weights, features, className, states, smoothing);
} }
// X is nxm where n is the number of features and m the number of samples // X is nxm where n is the number of features and m the number of samples
Classifier& Classifier::fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states) Classifier& Classifier::fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing)
{ {
dataset = X; dataset = X;
buildDataset(y); buildDataset(y);
const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble); const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);
return build(features, className, states, weights); return build(features, className, states, weights, smoothing);
} }
// X is nxm where n is the number of features and m the number of samples // X is nxm where n is the number of features and m the number of samples
Classifier& Classifier::fit(std::vector<std::vector<int>>& X, std::vector<int>& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states) Classifier& Classifier::fit(std::vector<std::vector<int>>& X, std::vector<int>& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing)
{ {
dataset = torch::zeros({ static_cast<int>(X.size()), static_cast<int>(X[0].size()) }, torch::kInt32); dataset = torch::zeros({ static_cast<int>(X.size()), static_cast<int>(X[0].size()) }, torch::kInt32);
for (int i = 0; i < X.size(); ++i) { for (int i = 0; i < X.size(); ++i) {
@@ -63,18 +62,18 @@ namespace bayesnet {
auto ytmp = torch::tensor(y, torch::kInt32); auto ytmp = torch::tensor(y, torch::kInt32);
buildDataset(ytmp); buildDataset(ytmp);
const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble); const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);
return build(features, className, states, weights); return build(features, className, states, weights, smoothing);
} }
Classifier& Classifier::fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states) Classifier& Classifier::fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing)
{ {
this->dataset = dataset; this->dataset = dataset;
const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble); const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);
return build(features, className, states, weights); return build(features, className, states, weights, smoothing);
} }
Classifier& Classifier::fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights) Classifier& Classifier::fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing)
{ {
this->dataset = dataset; this->dataset = dataset;
return build(features, className, states, weights); return build(features, className, states, weights, smoothing);
} }
void Classifier::checkFitParameters() void Classifier::checkFitParameters()
{ {
@@ -191,4 +190,4 @@ namespace bayesnet {
throw std::invalid_argument("Invalid hyperparameters" + hyperparameters.dump()); throw std::invalid_argument("Invalid hyperparameters" + hyperparameters.dump());
} }
} }
} }

View File

@@ -8,7 +8,6 @@
#define CLASSIFIER_H #define CLASSIFIER_H
#include <torch/torch.h> #include <torch/torch.h>
#include "bayesnet/utils/BayesMetrics.h" #include "bayesnet/utils/BayesMetrics.h"
#include "bayesnet/network/Network.h"
#include "bayesnet/BaseClassifier.h" #include "bayesnet/BaseClassifier.h"
namespace bayesnet { namespace bayesnet {
@@ -16,10 +15,10 @@ namespace bayesnet {
public: public:
Classifier(Network model); Classifier(Network model);
virtual ~Classifier() = default; virtual ~Classifier() = default;
Classifier& fit(std::vector<std::vector<int>>& X, std::vector<int>& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states) override; Classifier& fit(std::vector<std::vector<int>>& X, std::vector<int>& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
Classifier& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states) override; Classifier& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
Classifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states) override; Classifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
Classifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights) override; Classifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing) override;
void addNodes(); void addNodes();
int getNumberOfNodes() const override; int getNumberOfNodes() const override;
int getNumberOfEdges() const override; int getNumberOfEdges() const override;
@@ -47,14 +46,13 @@ namespace bayesnet {
std::string className; std::string className;
std::map<std::string, std::vector<int>> states; std::map<std::string, std::vector<int>> states;
torch::Tensor dataset; // (n+1)xm tensor torch::Tensor dataset; // (n+1)xm tensor
status_t status = NORMAL;
std::vector<std::string> notes; // Used to store messages occurred during the fit process
void checkFitParameters(); void checkFitParameters();
virtual void buildModel(const torch::Tensor& weights) = 0; virtual void buildModel(const torch::Tensor& weights) = 0;
void trainModel(const torch::Tensor& weights) override; void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
void buildDataset(torch::Tensor& y); void buildDataset(torch::Tensor& y);
const std::string CLASSIFIER_NOT_FITTED = "Classifier has not been fitted";
private: private:
Classifier& build(const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights); Classifier& build(const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing);
}; };
} }
#endif #endif

View File

@@ -3,7 +3,7 @@
// SPDX-FileType: SOURCE // SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
// *************************************************************** // ***************************************************************
#include "bayesnet/utils/bayesnetUtils.h"
#include "KDB.h" #include "KDB.h"
namespace bayesnet { namespace bayesnet {

View File

@@ -7,15 +7,14 @@
#ifndef KDB_H #ifndef KDB_H
#define KDB_H #define KDB_H
#include <torch/torch.h> #include <torch/torch.h>
#include "bayesnet/utils/bayesnetUtils.h"
#include "Classifier.h" #include "Classifier.h"
namespace bayesnet { namespace bayesnet {
class KDB : public Classifier { class KDB : public Classifier {
private: private:
int k; int k;
float theta; float theta;
void add_m_edges(int idx, std::vector<int>& S, torch::Tensor& weights);
protected: protected:
void add_m_edges(int idx, std::vector<int>& S, torch::Tensor& weights);
void buildModel(const torch::Tensor& weights) override; void buildModel(const torch::Tensor& weights) override;
public: public:
explicit KDB(int k, float theta = 0.03); explicit KDB(int k, float theta = 0.03);
@@ -24,4 +23,4 @@ namespace bayesnet {
std::vector<std::string> graph(const std::string& name = "KDB") const override; std::vector<std::string> graph(const std::string& name = "KDB") const override;
}; };
} }
#endif #endif

View File

@@ -8,7 +8,7 @@
namespace bayesnet { namespace bayesnet {
KDBLd::KDBLd(int k) : KDB(k), Proposal(dataset, features, className) {} KDBLd::KDBLd(int k) : KDB(k), Proposal(dataset, features, className) {}
KDBLd& KDBLd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_) KDBLd& KDBLd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing)
{ {
checkInput(X_, y_); checkInput(X_, y_);
features = features_; features = features_;
@@ -19,7 +19,7 @@ namespace bayesnet {
states = fit_local_discretization(y); states = fit_local_discretization(y);
// We have discretized the input data // We have discretized the input data
// 1st we need to fit the model to build the normal KDB structure, KDB::fit initializes the base Bayesian network // 1st we need to fit the model to build the normal KDB structure, KDB::fit initializes the base Bayesian network
KDB::fit(dataset, features, className, states); KDB::fit(dataset, features, className, states, smoothing);
states = localDiscretizationProposal(states, model); states = localDiscretizationProposal(states, model);
return *this; return *this;
} }

View File

@@ -15,7 +15,7 @@ namespace bayesnet {
public: public:
explicit KDBLd(int k); explicit KDBLd(int k);
virtual ~KDBLd() = default; virtual ~KDBLd() = default;
KDBLd& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states) override; KDBLd& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
std::vector<std::string> graph(const std::string& name = "KDB") const override; std::vector<std::string> graph(const std::string& name = "KDB") const override;
torch::Tensor predict(torch::Tensor& X) override; torch::Tensor predict(torch::Tensor& X) override;
static inline std::string version() { return "0.0.1"; }; static inline std::string version() { return "0.0.1"; };

View File

@@ -11,7 +11,7 @@ namespace bayesnet {
Proposal::~Proposal() Proposal::~Proposal()
{ {
for (auto& [key, value] : discretizers) { for (auto& [key, value] : discretizers) {
delete value; delete value;
} }
} }
void Proposal::checkInput(const torch::Tensor& X, const torch::Tensor& y) void Proposal::checkInput(const torch::Tensor& X, const torch::Tensor& y)
@@ -70,7 +70,7 @@ namespace bayesnet {
states[pFeatures[index]] = xStates; states[pFeatures[index]] = xStates;
} }
const torch::Tensor weights = torch::full({ pDataset.size(1) }, 1.0 / pDataset.size(1), torch::kDouble); const torch::Tensor weights = torch::full({ pDataset.size(1) }, 1.0 / pDataset.size(1), torch::kDouble);
model.fit(pDataset, weights, pFeatures, pClassName, states); model.fit(pDataset, weights, pFeatures, pClassName, states, Smoothing_t::ORIGINAL);
} }
return states; return states;
} }
@@ -126,4 +126,4 @@ namespace bayesnet {
} }
return yy; return yy;
} }
} }

View File

@@ -9,7 +9,7 @@
#include <string> #include <string>
#include <map> #include <map>
#include <torch/torch.h> #include <torch/torch.h>
#include <CPPFImdlp.h> #include <fimdlp/CPPFImdlp.h>
#include "bayesnet/network/Network.h" #include "bayesnet/network/Network.h"
#include "Classifier.h" #include "Classifier.h"

View File

@@ -8,14 +8,29 @@
namespace bayesnet { namespace bayesnet {
SPODE::SPODE(int root) : Classifier(Network()), root(root) {} SPODE::SPODE(int root) : Classifier(Network()), root(root)
{
validHyperparameters = { "parent" };
}
void SPODE::setHyperparameters(const nlohmann::json& hyperparameters_)
{
auto hyperparameters = hyperparameters_;
if (hyperparameters.contains("parent")) {
root = hyperparameters["parent"];
hyperparameters.erase("parent");
}
Classifier::setHyperparameters(hyperparameters);
}
void SPODE::buildModel(const torch::Tensor& weights) void SPODE::buildModel(const torch::Tensor& weights)
{ {
// 0. Add all nodes to the model // 0. Add all nodes to the model
addNodes(); addNodes();
// 1. Add edges from the class node to all other nodes // 1. Add edges from the class node to all other nodes
// 2. Add edges from the root node to all other nodes // 2. Add edges from the root node to all other nodes
if (root >= static_cast<int>(features.size())) {
throw std::invalid_argument("The parent node is not in the dataset");
}
for (int i = 0; i < static_cast<int>(features.size()); ++i) { for (int i = 0; i < static_cast<int>(features.size()); ++i) {
model.addEdge(className, features[i]); model.addEdge(className, features[i]);
if (i != root) { if (i != root) {

View File

@@ -10,14 +10,15 @@
namespace bayesnet { namespace bayesnet {
class SPODE : public Classifier { class SPODE : public Classifier {
private:
int root;
protected:
void buildModel(const torch::Tensor& weights) override;
public: public:
explicit SPODE(int root); explicit SPODE(int root);
virtual ~SPODE() = default; virtual ~SPODE() = default;
void setHyperparameters(const nlohmann::json& hyperparameters_) override;
std::vector<std::string> graph(const std::string& name = "SPODE") const override; std::vector<std::string> graph(const std::string& name = "SPODE") const override;
protected:
void buildModel(const torch::Tensor& weights) override;
private:
int root;
}; };
} }
#endif #endif

View File

@@ -8,25 +8,25 @@
namespace bayesnet { namespace bayesnet {
SPODELd::SPODELd(int root) : SPODE(root), Proposal(dataset, features, className) {} SPODELd::SPODELd(int root) : SPODE(root), Proposal(dataset, features, className) {}
SPODELd& SPODELd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_) SPODELd& SPODELd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing)
{ {
checkInput(X_, y_); checkInput(X_, y_);
Xf = X_; Xf = X_;
y = y_; y = y_;
return commonFit(features_, className_, states_); return commonFit(features_, className_, states_, smoothing);
} }
SPODELd& SPODELd::fit(torch::Tensor& dataset, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_) SPODELd& SPODELd::fit(torch::Tensor& dataset, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing)
{ {
if (!torch::is_floating_point(dataset)) { if (!torch::is_floating_point(dataset)) {
throw std::runtime_error("Dataset must be a floating point tensor"); throw std::runtime_error("Dataset must be a floating point tensor");
} }
Xf = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), "..." }).clone(); Xf = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), "..." }).clone();
y = dataset.index({ -1, "..." }).clone().to(torch::kInt32); y = dataset.index({ -1, "..." }).clone().to(torch::kInt32);
return commonFit(features_, className_, states_); return commonFit(features_, className_, states_, smoothing);
} }
SPODELd& SPODELd::commonFit(const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_) SPODELd& SPODELd::commonFit(const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing)
{ {
features = features_; features = features_;
className = className_; className = className_;
@@ -34,7 +34,7 @@ namespace bayesnet {
states = fit_local_discretization(y); states = fit_local_discretization(y);
// We have discretized the input data // We have discretized the input data
// 1st we need to fit the model to build the normal SPODE structure, SPODE::fit initializes the base Bayesian network // 1st we need to fit the model to build the normal SPODE structure, SPODE::fit initializes the base Bayesian network
SPODE::fit(dataset, features, className, states); SPODE::fit(dataset, features, className, states, smoothing);
states = localDiscretizationProposal(states, model); states = localDiscretizationProposal(states, model);
return *this; return *this;
} }

View File

@@ -14,10 +14,10 @@ namespace bayesnet {
public: public:
explicit SPODELd(int root); explicit SPODELd(int root);
virtual ~SPODELd() = default; virtual ~SPODELd() = default;
SPODELd& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states) override; SPODELd& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
SPODELd& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states) override; SPODELd& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
SPODELd& commonFit(const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states); SPODELd& commonFit(const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states, const Smoothing_t smoothing);
std::vector<std::string> graph(const std::string& name = "SPODE") const override; std::vector<std::string> graph(const std::string& name = "SPODELd") const override;
torch::Tensor predict(torch::Tensor& X) override; torch::Tensor predict(torch::Tensor& X) override;
static inline std::string version() { return "0.0.1"; }; static inline std::string version() { return "0.0.1"; };
}; };

View File

@@ -35,4 +35,4 @@ namespace bayesnet {
return model.graph(name); return model.graph(name);
} }
} }

View File

@@ -7,8 +7,20 @@
#include "TAN.h" #include "TAN.h"
namespace bayesnet { namespace bayesnet {
TAN::TAN() : Classifier(Network()) {} TAN::TAN() : Classifier(Network())
{
validHyperparameters = { "parent" };
}
void TAN::setHyperparameters(const nlohmann::json& hyperparameters_)
{
auto hyperparameters = hyperparameters_;
if (hyperparameters.contains("parent")) {
parent = hyperparameters["parent"];
hyperparameters.erase("parent");
}
Classifier::setHyperparameters(hyperparameters);
}
void TAN::buildModel(const torch::Tensor& weights) void TAN::buildModel(const torch::Tensor& weights)
{ {
// 0. Add all nodes to the model // 0. Add all nodes to the model
@@ -23,7 +35,10 @@ namespace bayesnet {
mi.push_back({ i, mi_value }); mi.push_back({ i, mi_value });
} }
sort(mi.begin(), mi.end(), [](const auto& left, const auto& right) {return left.second < right.second;}); sort(mi.begin(), mi.end(), [](const auto& left, const auto& right) {return left.second < right.second;});
auto root = mi[mi.size() - 1].first; auto root = parent == -1 ? mi[mi.size() - 1].first : parent;
if (root >= static_cast<int>(features.size())) {
throw std::invalid_argument("The parent node is not in the dataset");
}
// 2. Compute mutual information between each feature and the class // 2. Compute mutual information between each feature and the class
auto weights_matrix = metrics.conditionalEdge(weights); auto weights_matrix = metrics.conditionalEdge(weights);
// 3. Compute the maximum spanning tree // 3. Compute the maximum spanning tree

View File

@@ -9,13 +9,15 @@
#include "Classifier.h" #include "Classifier.h"
namespace bayesnet { namespace bayesnet {
class TAN : public Classifier { class TAN : public Classifier {
private:
protected:
void buildModel(const torch::Tensor& weights) override;
public: public:
TAN(); TAN();
virtual ~TAN() = default; virtual ~TAN() = default;
void setHyperparameters(const nlohmann::json& hyperparameters_) override;
std::vector<std::string> graph(const std::string& name = "TAN") const override; std::vector<std::string> graph(const std::string& name = "TAN") const override;
protected:
void buildModel(const torch::Tensor& weights) override;
private:
int parent = -1;
}; };
} }
#endif #endif

View File

@@ -8,7 +8,7 @@
namespace bayesnet { namespace bayesnet {
TANLd::TANLd() : TAN(), Proposal(dataset, features, className) {} TANLd::TANLd() : TAN(), Proposal(dataset, features, className) {}
TANLd& TANLd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_) TANLd& TANLd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing)
{ {
checkInput(X_, y_); checkInput(X_, y_);
features = features_; features = features_;
@@ -19,7 +19,7 @@ namespace bayesnet {
states = fit_local_discretization(y); states = fit_local_discretization(y);
// We have discretized the input data // We have discretized the input data
// 1st we need to fit the model to build the normal TAN structure, TAN::fit initializes the base Bayesian network // 1st we need to fit the model to build the normal TAN structure, TAN::fit initializes the base Bayesian network
TAN::fit(dataset, features, className, states); TAN::fit(dataset, features, className, states, smoothing);
states = localDiscretizationProposal(states, model); states = localDiscretizationProposal(states, model);
return *this; return *this;

View File

@@ -15,10 +15,9 @@ namespace bayesnet {
public: public:
TANLd(); TANLd();
virtual ~TANLd() = default; virtual ~TANLd() = default;
TANLd& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states) override; TANLd& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
std::vector<std::string> graph(const std::string& name = "TAN") const override; std::vector<std::string> graph(const std::string& name = "TANLd") const override;
torch::Tensor predict(torch::Tensor& X) override; torch::Tensor predict(torch::Tensor& X) override;
static inline std::string version() { return "0.0.1"; };
}; };
} }
#endif // !TANLD_H #endif // !TANLD_H

View File

@@ -0,0 +1,575 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#include "XSP2DE.h"
#include <pthread.h> // for pthread_setname_np on linux
#include <cassert>
#include <cmath>
#include <limits>
#include <stdexcept>
#include <iostream>
#include "bayesnet/utils/TensorUtils.h"
namespace bayesnet {
// --------------------------------------
// Constructor
// --------------------------------------
XSp2de::XSp2de(int spIndex1, int spIndex2)
: superParent1_{ spIndex1 }
, superParent2_{ spIndex2 }
, nFeatures_{0}
, statesClass_{0}
, alpha_{1.0}
, initializer_{1.0}
, semaphore_{ CountingSemaphore::getInstance() }
, Classifier(Network())
{
validHyperparameters = { "parent1", "parent2" };
}
// --------------------------------------
// setHyperparameters
// --------------------------------------
void XSp2de::setHyperparameters(const nlohmann::json &hyperparameters_)
{
auto hyperparameters = hyperparameters_;
if (hyperparameters.contains("parent1")) {
superParent1_ = hyperparameters["parent1"];
hyperparameters.erase("parent1");
}
if (hyperparameters.contains("parent2")) {
superParent2_ = hyperparameters["parent2"];
hyperparameters.erase("parent2");
}
// Hand off anything else to base Classifier
Classifier::setHyperparameters(hyperparameters);
}
// --------------------------------------
// fitx
// --------------------------------------
void XSp2de::fitx(torch::Tensor & X, torch::Tensor & y,
torch::Tensor & weights_, const Smoothing_t smoothing)
{
m = X.size(1); // number of samples
n = X.size(0); // number of features
dataset = X;
// Build the dataset in your environment if needed:
buildDataset(y);
// Construct the data structures needed for counting
buildModel(weights_);
// Accumulate counts & convert to probabilities
trainModel(weights_, smoothing);
fitted = true;
}
// --------------------------------------
// buildModel
// --------------------------------------
void XSp2de::buildModel(const torch::Tensor &weights)
{
nFeatures_ = n;
// Derive the number of states for each feature from the dataset
// states_[f] = max value in dataset[f] + 1.
states_.resize(nFeatures_);
for (int f = 0; f < nFeatures_; f++) {
// This is naive: we take max in feature f. You might adapt for real data.
states_[f] = dataset[f].max().item<int>() + 1;
}
// Class states:
statesClass_ = dataset[-1].max().item<int>() + 1;
// Initialize the class counts
classCounts_.resize(statesClass_, 0.0);
// For sp1 -> p(sp1Val| c)
sp1FeatureCounts_.resize(states_[superParent1_] * statesClass_, 0.0);
// For sp2 -> p(sp2Val| c)
sp2FeatureCounts_.resize(states_[superParent2_] * statesClass_, 0.0);
// For child features, we store p(childVal | c, sp1Val, sp2Val).
// childCounts_ will hold raw counts. Well gather them in one big vector.
// We need an offset for each feature.
childOffsets_.resize(nFeatures_, -1);
int totalSize = 0;
for (int f = 0; f < nFeatures_; f++) {
if (f == superParent1_ || f == superParent2_) {
// skip the superparents
childOffsets_[f] = -1;
continue;
}
childOffsets_[f] = totalSize;
// block size for a single child f: states_[f] * statesClass_
// * states_[superParent1_]
// * states_[superParent2_].
totalSize += (states_[f] * statesClass_
* states_[superParent1_]
* states_[superParent2_]);
}
childCounts_.resize(totalSize, 0.0);
}
// --------------------------------------
// trainModel
// --------------------------------------
void XSp2de::trainModel(const torch::Tensor &weights,
const bayesnet::Smoothing_t smoothing)
{
// Accumulate raw counts
for (int i = 0; i < m; i++) {
std::vector<int> instance(nFeatures_ + 1);
for (int f = 0; f < nFeatures_; f++) {
instance[f] = dataset[f][i].item<int>();
}
instance[nFeatures_] = dataset[-1][i].item<int>(); // class
double w = weights[i].item<double>();
addSample(instance, w);
}
// Choose alpha based on smoothing:
switch (smoothing) {
case bayesnet::Smoothing_t::ORIGINAL:
alpha_ = 1.0 / m;
break;
case bayesnet::Smoothing_t::LAPLACE:
alpha_ = 1.0;
break;
default:
alpha_ = 0.0; // no smoothing
}
// Large initializer factor for numerical stability
initializer_ = std::numeric_limits<double>::max() / (nFeatures_ * nFeatures_);
// Convert raw counts to probabilities
computeProbabilities();
}
// --------------------------------------
// addSample
// --------------------------------------
void XSp2de::addSample(const std::vector<int> &instance, double weight)
{
if (weight <= 0.0)
return;
int c = instance.back();
// increment classCounts
classCounts_[c] += weight;
int sp1Val = instance[superParent1_];
int sp2Val = instance[superParent2_];
// p(sp1|c)
sp1FeatureCounts_[sp1Val * statesClass_ + c] += weight;
// p(sp2|c)
sp2FeatureCounts_[sp2Val * statesClass_ + c] += weight;
// p(childVal| c, sp1Val, sp2Val)
for (int f = 0; f < nFeatures_; f++) {
if (f == superParent1_ || f == superParent2_)
continue;
int childVal = instance[f];
int offset = childOffsets_[f];
// block layout:
// offset + (sp1Val*(states_[sp2_]* states_[f]* statesClass_))
// + (sp2Val*(states_[f]* statesClass_))
// + childVal*(statesClass_)
// + c
int blockSizeSp2 = states_[superParent2_]
* states_[f]
* statesClass_;
int blockSizeChild = states_[f] * statesClass_;
int idx = offset
+ sp1Val*blockSizeSp2
+ sp2Val*blockSizeChild
+ childVal*statesClass_
+ c;
childCounts_[idx] += weight;
}
}
// --------------------------------------
// computeProbabilities
// --------------------------------------
void XSp2de::computeProbabilities()
{
double totalCount = std::accumulate(classCounts_.begin(),
classCounts_.end(), 0.0);
// classPriors_
classPriors_.resize(statesClass_, 0.0);
if (totalCount <= 0.0) {
// fallback => uniform
double unif = 1.0 / static_cast<double>(statesClass_);
for (int c = 0; c < statesClass_; c++) {
classPriors_[c] = unif;
}
} else {
for (int c = 0; c < statesClass_; c++) {
classPriors_[c] =
(classCounts_[c] + alpha_)
/ (totalCount + alpha_ * statesClass_);
}
}
// p(sp1Val| c)
sp1FeatureProbs_.resize(sp1FeatureCounts_.size());
int sp1Card = states_[superParent1_];
for (int spVal = 0; spVal < sp1Card; spVal++) {
for (int c = 0; c < statesClass_; c++) {
double denom = classCounts_[c] + alpha_ * sp1Card;
double num = sp1FeatureCounts_[spVal * statesClass_ + c] + alpha_;
sp1FeatureProbs_[spVal * statesClass_ + c] =
(denom <= 0.0 ? 0.0 : num / denom);
}
}
// p(sp2Val| c)
sp2FeatureProbs_.resize(sp2FeatureCounts_.size());
int sp2Card = states_[superParent2_];
for (int spVal = 0; spVal < sp2Card; spVal++) {
for (int c = 0; c < statesClass_; c++) {
double denom = classCounts_[c] + alpha_ * sp2Card;
double num = sp2FeatureCounts_[spVal * statesClass_ + c] + alpha_;
sp2FeatureProbs_[spVal * statesClass_ + c] =
(denom <= 0.0 ? 0.0 : num / denom);
}
}
// p(childVal| c, sp1Val, sp2Val)
childProbs_.resize(childCounts_.size());
int offset = 0;
for (int f = 0; f < nFeatures_; f++) {
if (f == superParent1_ || f == superParent2_)
continue;
int fCard = states_[f];
int sp1Card_ = states_[superParent1_];
int sp2Card_ = states_[superParent2_];
int childBlockSizeSp2 = sp2Card_ * fCard * statesClass_;
int childBlockSizeF = fCard * statesClass_;
int blockSize = fCard * sp1Card_ * sp2Card_ * statesClass_;
for (int sp1Val = 0; sp1Val < sp1Card_; sp1Val++) {
for (int sp2Val = 0; sp2Val < sp2Card_; sp2Val++) {
for (int childVal = 0; childVal < fCard; childVal++) {
for (int c = 0; c < statesClass_; c++) {
// index in childCounts_
int idx = offset
+ sp1Val*childBlockSizeSp2
+ sp2Val*childBlockSizeF
+ childVal*statesClass_
+ c;
double num = childCounts_[idx] + alpha_;
// denominator is the count of (sp1Val,sp2Val,c) plus alpha * fCard
// We can find that by summing childVal dimension, but we already
// have it in childCounts_[...] or we can re-check the superparent
// counts if your approach is purely hierarchical.
// Here we'll do it like the XSpode approach: sp1&sp2 are
// conditionally independent given c, so denominators come from
// summing the relevant block or we treat sp1,sp2 as "parents."
// A simpler approach:
double sumSp1Sp2C = 0.0;
// sum over all childVal:
for (int cv = 0; cv < fCard; cv++) {
int idx2 = offset
+ sp1Val*childBlockSizeSp2
+ sp2Val*childBlockSizeF
+ cv*statesClass_ + c;
sumSp1Sp2C += childCounts_[idx2];
}
double denom = sumSp1Sp2C + alpha_ * fCard;
childProbs_[idx] = (denom <= 0.0 ? 0.0 : num / denom);
}
}
}
}
offset += blockSize;
}
}
// --------------------------------------
// predict_proba (single instance)
// --------------------------------------
std::vector<double> XSp2de::predict_proba(const std::vector<int> &instance) const
{
if (!fitted) {
throw std::logic_error(CLASSIFIER_NOT_FITTED);
}
std::vector<double> probs(statesClass_, 0.0);
int sp1Val = instance[superParent1_];
int sp2Val = instance[superParent2_];
// Start with p(c) * p(sp1Val| c) * p(sp2Val| c)
for (int c = 0; c < statesClass_; c++) {
double pC = classPriors_[c];
double pSp1C = sp1FeatureProbs_[sp1Val * statesClass_ + c];
double pSp2C = sp2FeatureProbs_[sp2Val * statesClass_ + c];
probs[c] = pC * pSp1C * pSp2C * initializer_;
}
// Multiply by each child feature f
int offset = 0;
for (int f = 0; f < nFeatures_; f++) {
if (f == superParent1_ || f == superParent2_)
continue;
int valF = instance[f];
int fCard = states_[f];
int sp1Card = states_[superParent1_];
int sp2Card = states_[superParent2_];
int blockSizeSp2 = sp2Card * fCard * statesClass_;
int blockSizeF = fCard * statesClass_;
// base index for childProbs_ for this child and sp1Val, sp2Val
int base = offset
+ sp1Val*blockSizeSp2
+ sp2Val*blockSizeF
+ valF*statesClass_;
for (int c = 0; c < statesClass_; c++) {
probs[c] *= childProbs_[base + c];
}
offset += (fCard * sp1Card * sp2Card * statesClass_);
}
// Normalize
normalize(probs);
return probs;
}
// --------------------------------------
// predict_proba (batch)
// --------------------------------------
std::vector<std::vector<double>> XSp2de::predict_proba(std::vector<std::vector<int>> &test_data)
{
int test_size = test_data[0].size(); // each feature is test_data[f], size = #samples
int sample_size = test_data.size(); // = nFeatures_
std::vector<std::vector<double>> probabilities(
test_size, std::vector<double>(statesClass_, 0.0));
// same concurrency approach
int chunk_size = std::min(150, int(test_size / semaphore_.getMaxCount()) + 1);
std::vector<std::thread> threads;
auto worker = [&](const std::vector<std::vector<int>> &samples,
int begin,
int chunk,
int sample_size,
std::vector<std::vector<double>> &predictions) {
std::string threadName =
"XSp2de-" + std::to_string(begin) + "-" + std::to_string(chunk);
#if defined(__linux__)
pthread_setname_np(pthread_self(), threadName.c_str());
#else
pthread_setname_np(threadName.c_str());
#endif
std::vector<int> instance(sample_size);
for (int sample = begin; sample < begin + chunk; ++sample) {
for (int feature = 0; feature < sample_size; ++feature) {
instance[feature] = samples[feature][sample];
}
predictions[sample] = predict_proba(instance);
}
semaphore_.release();
};
for (int begin = 0; begin < test_size; begin += chunk_size) {
int chunk = std::min(chunk_size, test_size - begin);
semaphore_.acquire();
threads.emplace_back(worker, test_data, begin, chunk, sample_size,
std::ref(probabilities));
}
for (auto &th : threads) {
th.join();
}
return probabilities;
}
// --------------------------------------
// predict (single instance)
// --------------------------------------
int XSp2de::predict(const std::vector<int> &instance) const
{
auto p = predict_proba(instance);
return static_cast<int>(
std::distance(p.begin(), std::max_element(p.begin(), p.end()))
);
}
// --------------------------------------
// predict (batch of data)
// --------------------------------------
std::vector<int> XSp2de::predict(std::vector<std::vector<int>> &test_data)
{
auto probabilities = predict_proba(test_data);
std::vector<int> predictions(probabilities.size(), 0);
for (size_t i = 0; i < probabilities.size(); i++) {
predictions[i] = static_cast<int>(
std::distance(probabilities[i].begin(),
std::max_element(probabilities[i].begin(),
probabilities[i].end()))
);
}
return predictions;
}
// --------------------------------------
// predict (torch::Tensor version)
// --------------------------------------
torch::Tensor XSp2de::predict(torch::Tensor &X)
{
auto X_ = TensorUtils::to_matrix(X);
auto result_v = predict(X_);
return torch::tensor(result_v, torch::kInt32);
}
// --------------------------------------
// predict_proba (torch::Tensor version)
// --------------------------------------
torch::Tensor XSp2de::predict_proba(torch::Tensor &X)
{
auto X_ = TensorUtils::to_matrix(X);
auto result_v = predict_proba(X_);
int n_samples = X.size(1);
torch::Tensor result =
torch::zeros({ n_samples, statesClass_ }, torch::kDouble);
for (int i = 0; i < (int)result_v.size(); ++i) {
result.index_put_({ i, "..." }, torch::tensor(result_v[i]));
}
return result;
}
// --------------------------------------
// score (torch::Tensor version)
// --------------------------------------
float XSp2de::score(torch::Tensor &X, torch::Tensor &y)
{
torch::Tensor y_pred = predict(X);
return (y_pred == y).sum().item<float>() / y.size(0);
}
// --------------------------------------
// score (vector version)
// --------------------------------------
float XSp2de::score(std::vector<std::vector<int>> &X, std::vector<int> &y)
{
auto y_pred = predict(X);
int correct = 0;
for (size_t i = 0; i < y_pred.size(); ++i) {
if (y_pred[i] == y[i]) {
correct++;
}
}
return static_cast<float>(correct) / static_cast<float>(y_pred.size());
}
// --------------------------------------
// Utility: normalize
// --------------------------------------
void XSp2de::normalize(std::vector<double> &v) const
{
double sum = 0.0;
for (auto &val : v) {
sum += val;
}
if (sum > 0.0) {
for (auto &val : v) {
val /= sum;
}
}
}
// --------------------------------------
// to_string
// --------------------------------------
std::string XSp2de::to_string() const
{
std::ostringstream oss;
oss << "----- XSp2de Model -----\n"
<< "nFeatures_ = " << nFeatures_ << "\n"
<< "superParent1_ = " << superParent1_ << "\n"
<< "superParent2_ = " << superParent2_ << "\n"
<< "statesClass_ = " << statesClass_ << "\n\n";
oss << "States: [";
for (auto s : states_) oss << s << " ";
oss << "]\n";
oss << "classCounts_:\n";
for (auto v : classCounts_) oss << v << " ";
oss << "\nclassPriors_:\n";
for (auto v : classPriors_) oss << v << " ";
oss << "\nsp1FeatureCounts_ (size=" << sp1FeatureCounts_.size() << ")\n";
for (auto v : sp1FeatureCounts_) oss << v << " ";
oss << "\nsp2FeatureCounts_ (size=" << sp2FeatureCounts_.size() << ")\n";
for (auto v : sp2FeatureCounts_) oss << v << " ";
oss << "\nchildCounts_ (size=" << childCounts_.size() << ")\n";
for (auto v : childCounts_) oss << v << " ";
oss << "\nchildOffsets_:\n";
for (auto c : childOffsets_) oss << c << " ";
oss << "\n----------------------------------------\n";
return oss.str();
}
// --------------------------------------
// Some introspection about the graph
// --------------------------------------
int XSp2de::getNumberOfNodes() const
{
// nFeatures + 1 class node
return nFeatures_ + 1;
}
int XSp2de::getClassNumStates() const
{
return statesClass_;
}
int XSp2de::getNFeatures() const
{
return nFeatures_;
}
int XSp2de::getNumberOfStates() const
{
// purely an example. Possibly you want to sum up actual
// cardinalities or something else.
return std::accumulate(states_.begin(), states_.end(), 0) * nFeatures_;
}
int XSp2de::getNumberOfEdges() const
{
// In an SPNDE with n=2, for each feature we have edges from class, sp1, sp2.
// So thats 3*(nFeatures_) edges, minus the ones for the superparents themselves,
// plus the edges from class->superparent1, class->superparent2.
// For a quick approximation:
// - class->sp1, class->sp2 => 2 edges
// - class->child => (nFeatures -2) edges
// - sp1->child, sp2->child => 2*(nFeatures -2) edges
// total = 2 + (nFeatures-2) + 2*(nFeatures-2) = 2 + 3*(nFeatures-2)
// = 3nFeatures - 4 (just an example).
// You can adapt to your liking:
return 3 * nFeatures_ - 4;
}
} // namespace bayesnet

View File

@@ -0,0 +1,75 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef XSP2DE_H
#define XSP2DE_H
#include "Classifier.h"
#include "bayesnet/utils/CountingSemaphore.h"
#include <torch/torch.h>
#include <vector>
namespace bayesnet {
class XSp2de : public Classifier {
public:
XSp2de(int spIndex1, int spIndex2);
void setHyperparameters(const nlohmann::json &hyperparameters_) override;
void fitx(torch::Tensor &X, torch::Tensor &y, torch::Tensor &weights_, const Smoothing_t smoothing);
std::vector<double> predict_proba(const std::vector<int> &instance) const;
std::vector<std::vector<double>> predict_proba(std::vector<std::vector<int>> &test_data) override;
int predict(const std::vector<int> &instance) const;
std::vector<int> predict(std::vector<std::vector<int>> &test_data) override;
torch::Tensor predict(torch::Tensor &X) override;
torch::Tensor predict_proba(torch::Tensor &X) override;
float score(torch::Tensor &X, torch::Tensor &y) override;
float score(std::vector<std::vector<int>> &X, std::vector<int> &y) override;
std::string to_string() const;
std::vector<std::string> graph(const std::string &title) const override {
return std::vector<std::string>({title});
}
int getNumberOfNodes() const override;
int getNumberOfEdges() const override;
int getNFeatures() const;
int getClassNumStates() const override;
int getNumberOfStates() const override;
protected:
void buildModel(const torch::Tensor &weights) override;
void trainModel(const torch::Tensor &weights, const bayesnet::Smoothing_t smoothing) override;
private:
void addSample(const std::vector<int> &instance, double weight);
void normalize(std::vector<double> &v) const;
void computeProbabilities();
int superParent1_;
int superParent2_;
int nFeatures_;
int statesClass_;
double alpha_;
double initializer_;
std::vector<int> states_;
std::vector<double> classCounts_;
std::vector<double> classPriors_;
std::vector<double> sp1FeatureCounts_, sp1FeatureProbs_;
std::vector<double> sp2FeatureCounts_, sp2FeatureProbs_;
// childOffsets_[f] will be the offset into childCounts_ for feature f.
// If f is either superParent1 or superParent2, childOffsets_[f] = -1
std::vector<int> childOffsets_;
// For each child f, we store p(x_f | c, sp1Val, sp2Val). We'll store the raw
// counts in childCounts_, and the probabilities in childProbs_, with a
// dimension block of size: states_[f]* statesClass_* states_[sp1]* states_[sp2].
std::vector<double> childCounts_;
std::vector<double> childProbs_;
CountingSemaphore &semaphore_;
};
} // namespace bayesnet
#endif // XSP2DE_H

View File

@@ -0,0 +1,450 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#include <algorithm>
#include <cmath>
#include <limits>
#include <numeric>
#include <sstream>
#include <stdexcept>
#include "XSPODE.h"
#include "bayesnet/utils/TensorUtils.h"
namespace bayesnet {
// --------------------------------------
// Constructor
// --------------------------------------
XSpode::XSpode(int spIndex)
: superParent_{ spIndex }, nFeatures_{ 0 }, statesClass_{ 0 }, alpha_{ 1.0 },
initializer_{ 1.0 }, semaphore_{ CountingSemaphore::getInstance() },
Classifier(Network())
{
validHyperparameters = { "parent" };
}
void XSpode::setHyperparameters(const nlohmann::json& hyperparameters_)
{
auto hyperparameters = hyperparameters_;
if (hyperparameters.contains("parent")) {
superParent_ = hyperparameters["parent"];
hyperparameters.erase("parent");
}
Classifier::setHyperparameters(hyperparameters);
}
void XSpode::fitx(torch::Tensor & X, torch::Tensor& y, torch::Tensor& weights_, const Smoothing_t smoothing)
{
m = X.size(1);
n = X.size(0);
dataset = X;
buildDataset(y);
buildModel(weights_);
trainModel(weights_, smoothing);
fitted = true;
}
// --------------------------------------
// trainModel
// --------------------------------------
// Initialize storage needed for the super-parent and child features counts and
// probs.
// --------------------------------------
void XSpode::buildModel(const torch::Tensor& weights)
{
int numInstances = m;
nFeatures_ = n;
// Derive the number of states for each feature and for the class.
// (This is just one approach; adapt to match your environment.)
// Here, we assume the user also gave us the total #states per feature in e.g.
// statesMap. We'll simply reconstruct the integer states_ array. The last
// entry is statesClass_.
states_.resize(nFeatures_);
for (int f = 0; f < nFeatures_; f++) {
// Suppose you look up in “statesMap” by the feature name, or read directly
// from X. We'll assume states_[f] = max value in X[f] + 1.
states_[f] = dataset[f].max().item<int>() + 1;
}
// For the class: states_.back() = max(y)+1
statesClass_ = dataset[-1].max().item<int>() + 1;
// Initialize counts
classCounts_.resize(statesClass_, 0.0);
// p(x_sp = spVal | c)
// We'll store these counts in spFeatureCounts_[spVal * statesClass_ + c].
spFeatureCounts_.resize(states_[superParent_] * statesClass_, 0.0);
// For each child ≠ sp, we store p(childVal| c, spVal) in a separate block of
// childCounts_. childCounts_ will be sized as sum_{child≠sp} (states_[child]
// * statesClass_ * states_[sp]). We also need an offset for each child to
// index into childCounts_.
childOffsets_.resize(nFeatures_, -1);
int totalSize = 0;
for (int f = 0; f < nFeatures_; f++) {
if (f == superParent_)
continue; // skip sp
childOffsets_[f] = totalSize;
// block size for this child's counts: states_[f] * statesClass_ *
// states_[superParent_]
totalSize += (states_[f] * statesClass_ * states_[superParent_]);
}
childCounts_.resize(totalSize, 0.0);
}
// --------------------------------------
// buildModel
// --------------------------------------
//
// We only store conditional probabilities for:
// p(x_sp| c) (the super-parent feature)
// p(x_child| c, x_sp) for all child ≠ sp
//
// --------------------------------------
void XSpode::trainModel(const torch::Tensor& weights,
const bayesnet::Smoothing_t smoothing)
{
// Accumulate raw counts
for (int i = 0; i < m; i++) {
std::vector<int> instance(nFeatures_ + 1);
for (int f = 0; f < nFeatures_; f++) {
instance[f] = dataset[f][i].item<int>();
}
instance[nFeatures_] = dataset[-1][i].item<int>();
addSample(instance, weights[i].item<double>());
}
switch (smoothing) {
case bayesnet::Smoothing_t::ORIGINAL:
alpha_ = 1.0 / m;
break;
case bayesnet::Smoothing_t::LAPLACE:
alpha_ = 1.0;
break;
default:
alpha_ = 0.0; // No smoothing
}
initializer_ = std::numeric_limits<double>::max() /
(nFeatures_ * nFeatures_); // for numerical stability
// Convert raw counts to probabilities
computeProbabilities();
}
// --------------------------------------
// addSample
// --------------------------------------
//
// instance has size nFeatures_ + 1, with the class at the end.
// We add 1 to the appropriate counters for each (c, superParentVal, childVal).
//
void XSpode::addSample(const std::vector<int>& instance, double weight)
{
if (weight <= 0.0)
return;
int c = instance.back();
// (A) increment classCounts
classCounts_[c] += weight;
// (B) increment super-parent counts => p(x_sp | c)
int spVal = instance[superParent_];
spFeatureCounts_[spVal * statesClass_ + c] += weight;
// (C) increment child counts => p(childVal | c, x_sp)
for (int f = 0; f < nFeatures_; f++) {
if (f == superParent_)
continue;
int childVal = instance[f];
int offset = childOffsets_[f];
// Compute index in childCounts_.
// Layout: [ offset + (spVal * states_[f] + childVal) * statesClass_ + c ]
int blockSize = states_[f] * statesClass_;
int idx = offset + spVal * blockSize + childVal * statesClass_ + c;
childCounts_[idx] += weight;
}
}
// --------------------------------------
// computeProbabilities
// --------------------------------------
//
// Once all samples are added in COUNTS mode, call this to:
// p(c)
// p(x_sp = spVal | c)
// p(x_child = v | c, x_sp = s_sp)
//
// --------------------------------------
void XSpode::computeProbabilities()
{
double totalCount =
std::accumulate(classCounts_.begin(), classCounts_.end(), 0.0);
// p(c) => classPriors_
classPriors_.resize(statesClass_, 0.0);
if (totalCount <= 0.0) {
// fallback => uniform
double unif = 1.0 / static_cast<double>(statesClass_);
for (int c = 0; c < statesClass_; c++) {
classPriors_[c] = unif;
}
} else {
for (int c = 0; c < statesClass_; c++) {
classPriors_[c] =
(classCounts_[c] + alpha_) / (totalCount + alpha_ * statesClass_);
}
}
// p(x_sp | c)
spFeatureProbs_.resize(spFeatureCounts_.size());
// denominator for spVal * statesClass_ + c is just classCounts_[c] + alpha_ *
// (#states of sp)
int spCard = states_[superParent_];
for (int spVal = 0; spVal < spCard; spVal++) {
for (int c = 0; c < statesClass_; c++) {
double denom = classCounts_[c] + alpha_ * spCard;
double num = spFeatureCounts_[spVal * statesClass_ + c] + alpha_;
spFeatureProbs_[spVal * statesClass_ + c] = (denom <= 0.0 ? 0.0 : num / denom);
}
}
// p(x_child | c, x_sp)
childProbs_.resize(childCounts_.size());
for (int f = 0; f < nFeatures_; f++) {
if (f == superParent_)
continue;
int offset = childOffsets_[f];
int childCard = states_[f];
// For each spVal, c, childVal in childCounts_:
for (int spVal = 0; spVal < spCard; spVal++) {
for (int childVal = 0; childVal < childCard; childVal++) {
for (int c = 0; c < statesClass_; c++) {
int idx = offset + spVal * (childCard * statesClass_) +
childVal * statesClass_ + c;
double num = childCounts_[idx] + alpha_;
// denominator = spFeatureCounts_[spVal * statesClass_ + c] + alpha_ *
// (#states of child)
double denom =
spFeatureCounts_[spVal * statesClass_ + c] + alpha_ * childCard;
childProbs_[idx] = (denom <= 0.0 ? 0.0 : num / denom);
}
}
}
}
}
// --------------------------------------
// predict_proba
// --------------------------------------
//
// For a single instance x of dimension nFeatures_:
// P(c | x) ∝ p(c) × p(x_sp | c) × ∏(child ≠ sp) p(x_child | c, x_sp).
//
// --------------------------------------
std::vector<double> XSpode::predict_proba(const std::vector<int>& instance) const
{
if (!fitted) {
throw std::logic_error(CLASSIFIER_NOT_FITTED);
}
std::vector<double> probs(statesClass_, 0.0);
// Multiply p(c) × p(x_sp | c)
int spVal = instance[superParent_];
for (int c = 0; c < statesClass_; c++) {
double pc = classPriors_[c];
double pSpC = spFeatureProbs_[spVal * statesClass_ + c];
probs[c] = pc * pSpC * initializer_;
}
// Multiply by each childs probability p(x_child | c, x_sp)
for (int feature = 0; feature < nFeatures_; feature++) {
if (feature == superParent_)
continue; // skip sp
int sf = instance[feature];
int offset = childOffsets_[feature];
int childCard = states_[feature]; // not used directly, but for clarity
// Index into childProbs_ = offset + spVal*(childCard*statesClass_) +
// childVal*statesClass_ + c
int base = offset + spVal * (childCard * statesClass_) + sf * statesClass_;
for (int c = 0; c < statesClass_; c++) {
probs[c] *= childProbs_[base + c];
}
}
// Normalize
normalize(probs);
return probs;
}
std::vector<std::vector<double>> XSpode::predict_proba(std::vector<std::vector<int>>& test_data)
{
int test_size = test_data[0].size();
int sample_size = test_data.size();
auto probabilities = std::vector<std::vector<double>>(
test_size, std::vector<double>(statesClass_));
int chunk_size = std::min(150, int(test_size / semaphore_.getMaxCount()) + 1);
std::vector<std::thread> threads;
auto worker = [&](const std::vector<std::vector<int>>& samples, int begin,
int chunk, int sample_size,
std::vector<std::vector<double>>& predictions) {
std::string threadName =
"(V)PWorker-" + std::to_string(begin) + "-" + std::to_string(chunk);
#if defined(__linux__)
pthread_setname_np(pthread_self(), threadName.c_str());
#else
pthread_setname_np(threadName.c_str());
#endif
std::vector<int> instance(sample_size);
for (int sample = begin; sample < begin + chunk; ++sample) {
for (int feature = 0; feature < sample_size; ++feature) {
instance[feature] = samples[feature][sample];
}
predictions[sample] = predict_proba(instance);
}
semaphore_.release();
};
for (int begin = 0; begin < test_size; begin += chunk_size) {
int chunk = std::min(chunk_size, test_size - begin);
semaphore_.acquire();
threads.emplace_back(worker, test_data, begin, chunk, sample_size, std::ref(probabilities));
}
for (auto& thread : threads) {
thread.join();
}
return probabilities;
}
// --------------------------------------
// Utility: normalize
// --------------------------------------
void XSpode::normalize(std::vector<double>& v) const
{
double sum = 0.0;
for (auto val : v) {
sum += val;
}
if (sum <= 0.0) {
return;
}
for (auto& val : v) {
val /= sum;
}
}
// --------------------------------------
// representation of the model
// --------------------------------------
std::string XSpode::to_string() const
{
std::ostringstream oss;
oss << "----- XSpode Model -----" << std::endl
<< "nFeatures_ = " << nFeatures_ << std::endl
<< "superParent_ = " << superParent_ << std::endl
<< "statesClass_ = " << statesClass_ << std::endl
<< std::endl;
oss << "States: [";
for (int s : states_)
oss << s << " ";
oss << "]" << std::endl;
oss << "classCounts_: [";
for (double c : classCounts_)
oss << c << " ";
oss << "]" << std::endl;
oss << "classPriors_: [";
for (double c : classPriors_)
oss << c << " ";
oss << "]" << std::endl;
oss << "spFeatureCounts_: size = " << spFeatureCounts_.size() << std::endl
<< "[";
for (double c : spFeatureCounts_)
oss << c << " ";
oss << "]" << std::endl;
oss << "spFeatureProbs_: size = " << spFeatureProbs_.size() << std::endl
<< "[";
for (double c : spFeatureProbs_)
oss << c << " ";
oss << "]" << std::endl;
oss << "childCounts_: size = " << childCounts_.size() << std::endl << "[";
for (double cc : childCounts_)
oss << cc << " ";
oss << "]" << std::endl;
for (double cp : childProbs_)
oss << cp << " ";
oss << "]" << std::endl;
oss << "childOffsets_: [";
for (int co : childOffsets_)
oss << co << " ";
oss << "]" << std::endl;
oss << std::string(40,'-') << std::endl;
return oss.str();
}
int XSpode::getNumberOfNodes() const { return nFeatures_ + 1; }
int XSpode::getClassNumStates() const { return statesClass_; }
int XSpode::getNFeatures() const { return nFeatures_; }
int XSpode::getNumberOfStates() const
{
return std::accumulate(states_.begin(), states_.end(), 0) * nFeatures_;
}
int XSpode::getNumberOfEdges() const
{
return 2 * nFeatures_ + 1;
}
// ------------------------------------------------------
// Predict overrides (classifier interface)
// ------------------------------------------------------
int XSpode::predict(const std::vector<int>& instance) const
{
auto p = predict_proba(instance);
return static_cast<int>(std::distance(p.begin(), std::max_element(p.begin(), p.end())));
}
std::vector<int> XSpode::predict(std::vector<std::vector<int>>& test_data)
{
auto probabilities = predict_proba(test_data);
std::vector<int> predictions(probabilities.size(), 0);
for (size_t i = 0; i < probabilities.size(); i++) {
predictions[i] = std::distance(
probabilities[i].begin(),
std::max_element(probabilities[i].begin(), probabilities[i].end()));
}
return predictions;
}
torch::Tensor XSpode::predict(torch::Tensor& X)
{
auto X_ = TensorUtils::to_matrix(X);
auto result_v = predict(X_);
return torch::tensor(result_v, torch::kInt32);
}
torch::Tensor XSpode::predict_proba(torch::Tensor& X)
{
auto X_ = TensorUtils::to_matrix(X);
auto result_v = predict_proba(X_);
int n_samples = X.size(1);
torch::Tensor result =
torch::zeros({ n_samples, statesClass_ }, torch::kDouble);
for (int i = 0; i < result_v.size(); ++i) {
result.index_put_({ i, "..." }, torch::tensor(result_v[i]));
}
return result;
}
float XSpode::score(torch::Tensor& X, torch::Tensor& y)
{
torch::Tensor y_pred = predict(X);
return (y_pred == y).sum().item<float>() / y.size(0);
}
float XSpode::score(std::vector<std::vector<int>>& X, std::vector<int>& y)
{
auto y_pred = this->predict(X);
int correct = 0;
for (int i = 0; i < y_pred.size(); ++i) {
if (y_pred[i] == y[i]) {
correct++;
}
}
return (double)correct / y_pred.size();
}
} // namespace bayesnet

View File

@@ -0,0 +1,76 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef XSPODE_H
#define XSPODE_H
#include <vector>
#include <torch/torch.h>
#include "Classifier.h"
#include "bayesnet/utils/CountingSemaphore.h"
namespace bayesnet {
class XSpode : public Classifier {
public:
explicit XSpode(int spIndex);
std::vector<double> predict_proba(const std::vector<int>& instance) const;
std::vector<std::vector<double>> predict_proba(std::vector<std::vector<int>>& X) override;
int predict(const std::vector<int>& instance) const;
void normalize(std::vector<double>& v) const;
std::string to_string() const;
int getNFeatures() const;
int getNumberOfNodes() const override;
int getNumberOfEdges() const override;
int getNumberOfStates() const override;
int getClassNumStates() const override;
std::vector<int>& getStates();
std::vector<std::string> graph(const std::string& title) const override { return std::vector<std::string>({ title }); }
void fitx(torch::Tensor& X, torch::Tensor& y, torch::Tensor& weights_, const Smoothing_t smoothing);
void setHyperparameters(const nlohmann::json& hyperparameters_) override;
//
// Classifier interface
//
torch::Tensor predict(torch::Tensor& X) override;
std::vector<int> predict(std::vector<std::vector<int>>& X) override;
torch::Tensor predict_proba(torch::Tensor& X) override;
float score(torch::Tensor& X, torch::Tensor& y) override;
float score(std::vector<std::vector<int>>& X, std::vector<int>& y) override;
protected:
void buildModel(const torch::Tensor& weights) override;
void trainModel(const torch::Tensor& weights, const bayesnet::Smoothing_t smoothing) override;
private:
void addSample(const std::vector<int>& instance, double weight);
void computeProbabilities();
int superParent_;
int nFeatures_;
int statesClass_;
std::vector<int> states_; // [states_feat0, ..., states_feat(N-1)] (class not included in this array)
// Class counts
std::vector<double> classCounts_; // [c], accumulative
std::vector<double> classPriors_; // [c], after normalization
// For p(x_sp = spVal | c)
std::vector<double> spFeatureCounts_; // [spVal * statesClass_ + c]
std::vector<double> spFeatureProbs_; // same shape, after normalization
// For p(x_child = childVal | x_sp = spVal, c)
// childCounts_ is big enough to hold all child features except sp:
// For each child f, we store childOffsets_[f] as the start index, then
// childVal, spVal, c => the data.
std::vector<double> childCounts_;
std::vector<double> childProbs_;
std::vector<int> childOffsets_;
double alpha_ = 1.0;
double initializer_; // for numerical stability
CountingSemaphore& semaphore_;
};
}
#endif // XSPODE_H

View File

@@ -10,7 +10,7 @@ namespace bayesnet {
AODELd::AODELd(bool predict_voting) : Ensemble(predict_voting), Proposal(dataset, features, className) AODELd::AODELd(bool predict_voting) : Ensemble(predict_voting), Proposal(dataset, features, className)
{ {
} }
AODELd& AODELd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_) AODELd& AODELd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing)
{ {
checkInput(X_, y_); checkInput(X_, y_);
features = features_; features = features_;
@@ -20,8 +20,9 @@ namespace bayesnet {
// Fills std::vectors Xv & yv with the data from tensors X_ (discretized) & y // Fills std::vectors Xv & yv with the data from tensors X_ (discretized) & y
states = fit_local_discretization(y); states = fit_local_discretization(y);
// We have discretized the input data // We have discretized the input data
// 1st we need to fit the model to build the normal TAN structure, TAN::fit initializes the base Bayesian network // 1st we need to fit the model to build the normal AODE structure, Ensemble::fit
Ensemble::fit(dataset, features, className, states); // calls buildModel to initialize the base models
Ensemble::fit(dataset, features, className, states, smoothing);
return *this; return *this;
} }
@@ -34,10 +35,10 @@ namespace bayesnet {
n_models = models.size(); n_models = models.size();
significanceModels = std::vector<double>(n_models, 1.0); significanceModels = std::vector<double>(n_models, 1.0);
} }
void AODELd::trainModel(const torch::Tensor& weights) void AODELd::trainModel(const torch::Tensor& weights, const Smoothing_t smoothing)
{ {
for (const auto& model : models) { for (const auto& model : models) {
model->fit(Xf, y, features, className, states); model->fit(Xf, y, features, className, states, smoothing);
} }
} }
std::vector<std::string> AODELd::graph(const std::string& name) const std::vector<std::string> AODELd::graph(const std::string& name) const

View File

@@ -15,10 +15,10 @@ namespace bayesnet {
public: public:
AODELd(bool predict_voting = true); AODELd(bool predict_voting = true);
virtual ~AODELd() = default; virtual ~AODELd() = default;
AODELd& fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_) override; AODELd& fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing) override;
std::vector<std::string> graph(const std::string& name = "AODELd") const override; std::vector<std::string> graph(const std::string& name = "AODELd") const override;
protected: protected:
void trainModel(const torch::Tensor& weights) override; void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
void buildModel(const torch::Tensor& weights) override; void buildModel(const torch::Tensor& weights) override;
}; };
} }

View File

@@ -3,244 +3,266 @@
// SPDX-FileType: SOURCE // SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
// *************************************************************** // ***************************************************************
#include <folding.hpp> #include "Boost.h"
#include "bayesnet/feature_selection/CFS.h" #include "bayesnet/feature_selection/CFS.h"
#include "bayesnet/feature_selection/FCBF.h" #include "bayesnet/feature_selection/FCBF.h"
#include "bayesnet/feature_selection/IWSS.h" #include "bayesnet/feature_selection/IWSS.h"
#include "Boost.h" #include <folding.hpp>
namespace bayesnet { namespace bayesnet {
Boost::Boost(bool predict_voting) : Ensemble(predict_voting) Boost::Boost(bool predict_voting) : Ensemble(predict_voting) {
{ validHyperparameters = {"alpha_block", "order", "convergence", "convergence_best", "bisection",
validHyperparameters = { "order", "convergence", "convergence_best", "bisection", "threshold", "maxTolerance", "threshold", "maxTolerance", "predict_voting", "select_features", "block_update"};
"predict_voting", "select_features", "block_update" }; }
void Boost::setHyperparameters(const nlohmann::json &hyperparameters_) {
auto hyperparameters = hyperparameters_;
if (hyperparameters.contains("order")) {
std::vector<std::string> algos = {Orders.ASC, Orders.DESC, Orders.RAND};
order_algorithm = hyperparameters["order"];
if (std::find(algos.begin(), algos.end(), order_algorithm) == algos.end()) {
throw std::invalid_argument("Invalid order algorithm, valid values [" + Orders.ASC + ", " + Orders.DESC +
", " + Orders.RAND + "]");
}
hyperparameters.erase("order");
} }
void Boost::setHyperparameters(const nlohmann::json& hyperparameters_) if (hyperparameters.contains("alpha_block")) {
{ alpha_block = hyperparameters["alpha_block"];
auto hyperparameters = hyperparameters_; hyperparameters.erase("alpha_block");
if (hyperparameters.contains("order")) {
std::vector<std::string> algos = { Orders.ASC, Orders.DESC, Orders.RAND };
order_algorithm = hyperparameters["order"];
if (std::find(algos.begin(), algos.end(), order_algorithm) == algos.end()) {
throw std::invalid_argument("Invalid order algorithm, valid values [" + Orders.ASC + ", " + Orders.DESC + ", " + Orders.RAND + "]");
}
hyperparameters.erase("order");
}
if (hyperparameters.contains("convergence")) {
convergence = hyperparameters["convergence"];
hyperparameters.erase("convergence");
}
if (hyperparameters.contains("convergence_best")) {
convergence_best = hyperparameters["convergence_best"];
hyperparameters.erase("convergence_best");
}
if (hyperparameters.contains("bisection")) {
bisection = hyperparameters["bisection"];
hyperparameters.erase("bisection");
}
if (hyperparameters.contains("threshold")) {
threshold = hyperparameters["threshold"];
hyperparameters.erase("threshold");
}
if (hyperparameters.contains("maxTolerance")) {
maxTolerance = hyperparameters["maxTolerance"];
if (maxTolerance < 1 || maxTolerance > 4)
throw std::invalid_argument("Invalid maxTolerance value, must be greater in [1, 4]");
hyperparameters.erase("maxTolerance");
}
if (hyperparameters.contains("predict_voting")) {
predict_voting = hyperparameters["predict_voting"];
hyperparameters.erase("predict_voting");
}
if (hyperparameters.contains("select_features")) {
auto selectedAlgorithm = hyperparameters["select_features"];
std::vector<std::string> algos = { SelectFeatures.IWSS, SelectFeatures.CFS, SelectFeatures.FCBF };
selectFeatures = true;
select_features_algorithm = selectedAlgorithm;
if (std::find(algos.begin(), algos.end(), selectedAlgorithm) == algos.end()) {
throw std::invalid_argument("Invalid selectFeatures value, valid values [" + SelectFeatures.IWSS + ", " + SelectFeatures.CFS + ", " + SelectFeatures.FCBF + "]");
}
hyperparameters.erase("select_features");
}
if (hyperparameters.contains("block_update")) {
block_update = hyperparameters["block_update"];
hyperparameters.erase("block_update");
}
Classifier::setHyperparameters(hyperparameters);
} }
void Boost::buildModel(const torch::Tensor& weights) if (hyperparameters.contains("convergence")) {
{ convergence = hyperparameters["convergence"];
// Models shall be built in trainModel hyperparameters.erase("convergence");
models.clear();
significanceModels.clear();
n_models = 0;
// Prepare the validation dataset
auto y_ = dataset.index({ -1, "..." });
if (convergence) {
// Prepare train & validation sets from train data
auto fold = folding::StratifiedKFold(5, y_, 271);
auto [train, test] = fold.getFold(0);
auto train_t = torch::tensor(train);
auto test_t = torch::tensor(test);
// Get train and validation sets
X_train = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), train_t });
y_train = dataset.index({ -1, train_t });
X_test = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), test_t });
y_test = dataset.index({ -1, test_t });
dataset = X_train;
m = X_train.size(1);
auto n_classes = states.at(className).size();
// Build dataset with train data
buildDataset(y_train);
metrics = Metrics(dataset, features, className, n_classes);
} else {
// Use all data to train
X_train = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), "..." });
y_train = y_;
}
} }
std::vector<int> Boost::featureSelection(torch::Tensor& weights_) if (hyperparameters.contains("convergence_best")) {
{ convergence_best = hyperparameters["convergence_best"];
int maxFeatures = 0; hyperparameters.erase("convergence_best");
if (select_features_algorithm == SelectFeatures.CFS) {
featureSelector = new CFS(dataset, features, className, maxFeatures, states.at(className).size(), weights_);
} else if (select_features_algorithm == SelectFeatures.IWSS) {
if (threshold < 0 || threshold >0.5) {
throw std::invalid_argument("Invalid threshold value for " + SelectFeatures.IWSS + " [0, 0.5]");
}
featureSelector = new IWSS(dataset, features, className, maxFeatures, states.at(className).size(), weights_, threshold);
} else if (select_features_algorithm == SelectFeatures.FCBF) {
if (threshold < 1e-7 || threshold > 1) {
throw std::invalid_argument("Invalid threshold value for " + SelectFeatures.FCBF + " [1e-7, 1]");
}
featureSelector = new FCBF(dataset, features, className, maxFeatures, states.at(className).size(), weights_, threshold);
}
featureSelector->fit();
auto featuresUsed = featureSelector->getFeatures();
delete featureSelector;
return featuresUsed;
} }
std::tuple<torch::Tensor&, double, bool> Boost::update_weights(torch::Tensor& ytrain, torch::Tensor& ypred, torch::Tensor& weights) if (hyperparameters.contains("bisection")) {
{ bisection = hyperparameters["bisection"];
bool terminate = false; hyperparameters.erase("bisection");
double alpha_t = 0;
auto mask_wrong = ypred != ytrain;
auto mask_right = ypred == ytrain;
auto masked_weights = weights * mask_wrong.to(weights.dtype());
double epsilon_t = masked_weights.sum().item<double>();
if (epsilon_t > 0.5) {
// Inverse the weights policy (plot ln(wt))
// "In each round of AdaBoost, there is a sanity check to ensure that the current base
// learner is better than random guess" (Zhi-Hua Zhou, 2012)
terminate = true;
} else {
double wt = (1 - epsilon_t) / epsilon_t;
alpha_t = epsilon_t == 0 ? 1 : 0.5 * log(wt);
// Step 3.2: Update weights for next classifier
// Step 3.2.1: Update weights of wrong samples
weights += mask_wrong.to(weights.dtype()) * exp(alpha_t) * weights;
// Step 3.2.2: Update weights of right samples
weights += mask_right.to(weights.dtype()) * exp(-alpha_t) * weights;
// Step 3.3: Normalise the weights
double totalWeights = torch::sum(weights).item<double>();
weights = weights / totalWeights;
}
return { weights, alpha_t, terminate };
} }
std::tuple<torch::Tensor&, double, bool> Boost::update_weights_block(int k, torch::Tensor& ytrain, torch::Tensor& weights) if (hyperparameters.contains("threshold")) {
{ threshold = hyperparameters["threshold"];
/* Update Block algorithm hyperparameters.erase("threshold");
k = # of models in block }
n_models = # of models in ensemble to make predictions if (hyperparameters.contains("maxTolerance")) {
n_models_bak = # models saved maxTolerance = hyperparameters["maxTolerance"];
models = vector of models to make predictions if (maxTolerance < 1 || maxTolerance > 6)
models_bak = models not used to make predictions throw std::invalid_argument("Invalid maxTolerance value, must be greater in [1, 6]");
significances_bak = backup of significances vector hyperparameters.erase("maxTolerance");
}
if (hyperparameters.contains("predict_voting")) {
predict_voting = hyperparameters["predict_voting"];
hyperparameters.erase("predict_voting");
}
if (hyperparameters.contains("select_features")) {
auto selectedAlgorithm = hyperparameters["select_features"];
std::vector<std::string> algos = {SelectFeatures.IWSS, SelectFeatures.CFS, SelectFeatures.FCBF};
selectFeatures = true;
select_features_algorithm = selectedAlgorithm;
if (std::find(algos.begin(), algos.end(), selectedAlgorithm) == algos.end()) {
throw std::invalid_argument("Invalid selectFeatures value, valid values [" + SelectFeatures.IWSS + ", " +
SelectFeatures.CFS + ", " + SelectFeatures.FCBF + "]");
}
hyperparameters.erase("select_features");
}
if (hyperparameters.contains("block_update")) {
block_update = hyperparameters["block_update"];
hyperparameters.erase("block_update");
}
if (block_update && alpha_block) {
throw std::invalid_argument("alpha_block and block_update cannot be true at the same time");
}
if (block_update && !bisection) {
throw std::invalid_argument("block_update needs bisection to be true");
}
Classifier::setHyperparameters(hyperparameters);
}
void Boost::add_model(std::unique_ptr<Classifier> model, double significance) {
models.push_back(std::move(model));
n_models++;
significanceModels.push_back(significance);
}
void Boost::remove_last_model() {
models.pop_back();
significanceModels.pop_back();
n_models--;
}
void Boost::buildModel(const torch::Tensor &weights) {
// Models shall be built in trainModel
models.clear();
significanceModels.clear();
n_models = 0;
// Prepare the validation dataset
auto y_ = dataset.index({-1, "..."});
if (convergence) {
// Prepare train & validation sets from train data
auto fold = folding::StratifiedKFold(5, y_, 271);
auto [train, test] = fold.getFold(0);
auto train_t = torch::tensor(train);
auto test_t = torch::tensor(test);
// Get train and validation sets
X_train = dataset.index({torch::indexing::Slice(0, dataset.size(0) - 1), train_t});
y_train = dataset.index({-1, train_t});
X_test = dataset.index({torch::indexing::Slice(0, dataset.size(0) - 1), test_t});
y_test = dataset.index({-1, test_t});
dataset = X_train;
m = X_train.size(1);
auto n_classes = states.at(className).size();
// Build dataset with train data
buildDataset(y_train);
metrics = Metrics(dataset, features, className, n_classes);
} else {
// Use all data to train
X_train = dataset.index({torch::indexing::Slice(0, dataset.size(0) - 1), "..."});
y_train = y_;
}
}
std::vector<int> Boost::featureSelection(torch::Tensor &weights_) {
int maxFeatures = 0;
if (select_features_algorithm == SelectFeatures.CFS) {
featureSelector = new CFS(dataset, features, className, maxFeatures, states.at(className).size(), weights_);
} else if (select_features_algorithm == SelectFeatures.IWSS) {
if (threshold < 0 || threshold > 0.5) {
throw std::invalid_argument("Invalid threshold value for " + SelectFeatures.IWSS + " [0, 0.5]");
}
featureSelector =
new IWSS(dataset, features, className, maxFeatures, states.at(className).size(), weights_, threshold);
} else if (select_features_algorithm == SelectFeatures.FCBF) {
if (threshold < 1e-7 || threshold > 1) {
throw std::invalid_argument("Invalid threshold value for " + SelectFeatures.FCBF + " [1e-7, 1]");
}
featureSelector =
new FCBF(dataset, features, className, maxFeatures, states.at(className).size(), weights_, threshold);
}
featureSelector->fit();
auto featuresUsed = featureSelector->getFeatures();
delete featureSelector;
return featuresUsed;
}
std::tuple<torch::Tensor &, double, bool> Boost::update_weights(torch::Tensor &ytrain, torch::Tensor &ypred,
torch::Tensor &weights) {
bool terminate = false;
double alpha_t = 0;
auto mask_wrong = ypred != ytrain;
auto mask_right = ypred == ytrain;
auto masked_weights = weights * mask_wrong.to(weights.dtype());
double epsilon_t = masked_weights.sum().item<double>();
// std::cout << "epsilon_t: " << epsilon_t << " count wrong: " << mask_wrong.sum().item<int>() << " count right: "
// << mask_right.sum().item<int>() << std::endl;
if (epsilon_t > 0.5) {
// Inverse the weights policy (plot ln(wt))
// "In each round of AdaBoost, there is a sanity check to ensure that the current base
// learner is better than random guess" (Zhi-Hua Zhou, 2012)
terminate = true;
} else {
double wt = (1 - epsilon_t) / epsilon_t;
alpha_t = epsilon_t == 0 ? 1 : 0.5 * log(wt);
// Step 3.2: Update weights for next classifier
// Step 3.2.1: Update weights of wrong samples
weights += mask_wrong.to(weights.dtype()) * exp(alpha_t) * weights;
// Step 3.2.2: Update weights of right samples
weights += mask_right.to(weights.dtype()) * exp(-alpha_t) * weights;
// Step 3.3: Normalise the weights
double totalWeights = torch::sum(weights).item<double>();
weights = weights / totalWeights;
}
return {weights, alpha_t, terminate};
}
std::tuple<torch::Tensor &, double, bool> Boost::update_weights_block(int k, torch::Tensor &ytrain,
torch::Tensor &weights) {
/* Update Block algorithm
k = # of models in block
n_models = # of models in ensemble to make predictions
n_models_bak = # models saved
models = vector of models to make predictions
models_bak = models not used to make predictions
significances_bak = backup of significances vector
Case list Case list
A) k = 1, n_models = 1 => n = 0 , n_models = n + k A) k = 1, n_models = 1 => n = 0 , n_models = n + k
B) k = 1, n_models = n + 1 => n_models = n + k B) k = 1, n_models = n + 1 => n_models = n + k
C) k > 1, n_models = k + 1 => n= 1, n_models = n + k C) k > 1, n_models = k + 1 => n= 1, n_models = n + k
D) k > 1, n_models = k => n = 0, n_models = n + k D) k > 1, n_models = k => n = 0, n_models = n + k
E) k > 1, n_models = k + n => n_models = n + k E) k > 1, n_models = k + n => n_models = n + k
A, D) n=0, k > 0, n_models == k A, D) n=0, k > 0, n_models == k
1. n_models_bak <- n_models 1. n_models_bak <- n_models
2. significances_bak <- significances 2. significances_bak <- significances
3. significances = vector(k, 1) 3. significances = vector(k, 1)
4. Dont move any classifiers out of models 4. Dont move any classifiers out of models
5. n_models <- k 5. n_models <- k
6. Make prediction, compute alpha, update weights 6. Make prediction, compute alpha, update weights
7. Dont restore any classifiers to models 7. Dont restore any classifiers to models
8. significances <- significances_bak 8. significances <- significances_bak
9. Update last k significances 9. Update last k significances
10. n_models <- n_models_bak 10. n_models <- n_models_bak
B, C, E) n > 0, k > 0, n_models == n + k B, C, E) n > 0, k > 0, n_models == n + k
1. n_models_bak <- n_models 1. n_models_bak <- n_models
2. significances_bak <- significances 2. significances_bak <- significances
3. significances = vector(k, 1) 3. significances = vector(k, 1)
4. Move first n classifiers to models_bak 4. Move first n classifiers to models_bak
5. n_models <- k 5. n_models <- k
6. Make prediction, compute alpha, update weights 6. Make prediction, compute alpha, update weights
7. Insert classifiers in models_bak to be the first n models 7. Insert classifiers in models_bak to be the first n models
8. significances <- significances_bak 8. significances <- significances_bak
9. Update last k significances 9. Update last k significances
10. n_models <- n_models_bak 10. n_models <- n_models_bak
*/ */
// //
// Make predict with only the last k models // Make predict with only the last k models
// //
std::unique_ptr<Classifier> model; std::unique_ptr<Classifier> model;
std::vector<std::unique_ptr<Classifier>> models_bak; std::vector<std::unique_ptr<Classifier>> models_bak;
// 1. n_models_bak <- n_models 2. significances_bak <- significances // 1. n_models_bak <- n_models 2. significances_bak <- significances
auto significance_bak = significanceModels; auto significance_bak = significanceModels;
auto n_models_bak = n_models; auto n_models_bak = n_models;
// 3. significances = vector(k, 1) // 3. significances = vector(k, 1)
significanceModels = std::vector<double>(k, 1.0); significanceModels = std::vector<double>(k, 1.0);
// 4. Move first n classifiers to models_bak // 4. Move first n classifiers to models_bak
// backup the first n_models - k models (if n_models == k, don't backup any) // backup the first n_models - k models (if n_models == k, don't backup any)
for (int i = 0; i < n_models - k; ++i) { for (int i = 0; i < n_models - k; ++i) {
model = std::move(models[0]); model = std::move(models[0]);
models.erase(models.begin()); models.erase(models.begin());
models_bak.push_back(std::move(model)); models_bak.push_back(std::move(model));
}
assert(models.size() == k);
// 5. n_models <- k
n_models = k;
// 6. Make prediction, compute alpha, update weights
auto ypred = predict(X_train);
//
// Update weights
//
double alpha_t;
bool terminate;
std::tie(weights, alpha_t, terminate) = update_weights(y_train, ypred, weights);
//
// Restore the models if needed
//
// 7. Insert classifiers in models_bak to be the first n models
// if n_models_bak == k, don't restore any, because none of them were moved
if (k != n_models_bak) {
// Insert in the same order as they were extracted
int bak_size = models_bak.size();
for (int i = 0; i < bak_size; ++i) {
model = std::move(models_bak[bak_size - 1 - i]);
models_bak.erase(models_bak.end() - 1);
models.insert(models.begin(), std::move(model));
}
}
// 8. significances <- significances_bak
significanceModels = significance_bak;
//
// Update the significance of the last k models
//
// 9. Update last k significances
for (int i = 0; i < k; ++i) {
significanceModels[n_models_bak - k + i] = alpha_t;
}
// 10. n_models <- n_models_bak
n_models = n_models_bak;
return { weights, alpha_t, terminate };
} }
} assert(models.size() == k);
// 5. n_models <- k
n_models = k;
// 6. Make prediction, compute alpha, update weights
auto ypred = predict(X_train);
//
// Update weights
//
double alpha_t;
bool terminate;
std::tie(weights, alpha_t, terminate) = update_weights(y_train, ypred, weights);
//
// Restore the models if needed
//
// 7. Insert classifiers in models_bak to be the first n models
// if n_models_bak == k, don't restore any, because none of them were moved
if (k != n_models_bak) {
// Insert in the same order as they were extracted
int bak_size = models_bak.size();
for (int i = 0; i < bak_size; ++i) {
model = std::move(models_bak[bak_size - 1 - i]);
models_bak.erase(models_bak.end() - 1);
models.insert(models.begin(), std::move(model));
}
}
// 8. significances <- significances_bak
significanceModels = significance_bak;
//
// Update the significance of the last k models
//
// 9. Update last k significances
for (int i = 0; i < k; ++i) {
significanceModels[n_models_bak - k + i] = alpha_t;
}
// 10. n_models <- n_models_bak
n_models = n_models_bak;
return {weights, alpha_t, terminate};
}
} // namespace bayesnet

View File

@@ -27,26 +27,31 @@ namespace bayesnet {
class Boost : public Ensemble { class Boost : public Ensemble {
public: public:
explicit Boost(bool predict_voting = false); explicit Boost(bool predict_voting = false);
virtual ~Boost() = default; virtual ~Boost() override = default;
void setHyperparameters(const nlohmann::json& hyperparameters_) override; void setHyperparameters(const nlohmann::json& hyperparameters_) override;
protected: protected:
std::vector<int> featureSelection(torch::Tensor& weights_); std::vector<int> featureSelection(torch::Tensor& weights_);
void buildModel(const torch::Tensor& weights) override; void buildModel(const torch::Tensor& weights) override;
std::tuple<torch::Tensor&, double, bool> update_weights(torch::Tensor& ytrain, torch::Tensor& ypred, torch::Tensor& weights); std::tuple<torch::Tensor&, double, bool> update_weights(torch::Tensor& ytrain, torch::Tensor& ypred, torch::Tensor& weights);
std::tuple<torch::Tensor&, double, bool> update_weights_block(int k, torch::Tensor& ytrain, torch::Tensor& weights); std::tuple<torch::Tensor&, double, bool> update_weights_block(int k, torch::Tensor& ytrain, torch::Tensor& weights);
void add_model(std::unique_ptr<Classifier> model, double significance);
void remove_last_model();
//
// Attributes
//
torch::Tensor X_train, y_train, X_test, y_test; torch::Tensor X_train, y_train, X_test, y_test;
// Hyperparameters // Hyperparameters
bool bisection = true; // if true, use bisection stratety to add k models at once to the ensemble bool bisection = true; // if true, use bisection stratety to add k models at once to the ensemble
int maxTolerance = 3; int maxTolerance = 3;
std::string order_algorithm; // order to process the KBest features asc, desc, rand std::string order_algorithm = Orders.DESC; // order to process the KBest features asc, desc, rand
bool convergence = true; //if true, stop when the model does not improve bool convergence = true; //if true, stop when the model does not improve
bool convergence_best = false; // wether to keep the best accuracy to the moment or the last accuracy as prior accuracy bool convergence_best = false; // wether to keep the best accuracy to the moment or the last accuracy as prior accuracy
bool selectFeatures = false; // if true, use feature selection bool selectFeatures = false; // if true, use feature selection
std::string select_features_algorithm = Orders.DESC; // Selected feature selection algorithm std::string select_features_algorithm; // Selected feature selection algorithm
FeatureSelect* featureSelector = nullptr; FeatureSelect* featureSelector = nullptr;
double threshold = -1; double threshold = -1;
bool block_update = false; bool block_update = false; // if true, use block update algorithm, only meaningful if bisection is true
bool alpha_block = false; // if true, the alpha is computed with the ensemble built so far and the new model
}; };
} }
#endif #endif

View File

@@ -4,14 +4,9 @@
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
// *************************************************************** // ***************************************************************
#include <set>
#include <functional>
#include <limits.h> #include <limits.h>
#include <tuple> #include <tuple>
#include <folding.hpp> #include <folding.hpp>
#include "bayesnet/feature_selection/CFS.h"
#include "bayesnet/feature_selection/FCBF.h"
#include "bayesnet/feature_selection/IWSS.h"
#include "BoostA2DE.h" #include "BoostA2DE.h"
namespace bayesnet { namespace bayesnet {
@@ -19,7 +14,7 @@ namespace bayesnet {
BoostA2DE::BoostA2DE(bool predict_voting) : Boost(predict_voting) BoostA2DE::BoostA2DE(bool predict_voting) : Boost(predict_voting)
{ {
} }
std::vector<int> BoostA2DE::initializeModels() std::vector<int> BoostA2DE::initializeModels(const Smoothing_t smoothing)
{ {
torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64); torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);
std::vector<int> featuresSelected = featureSelection(weights_); std::vector<int> featuresSelected = featureSelection(weights_);
@@ -32,7 +27,7 @@ namespace bayesnet {
for (int j = i + 1; j < featuresSelected.size(); j++) { for (int j = i + 1; j < featuresSelected.size(); j++) {
auto parents = { featuresSelected[i], featuresSelected[j] }; auto parents = { featuresSelected[i], featuresSelected[j] };
std::unique_ptr<Classifier> model = std::make_unique<SPnDE>(parents); std::unique_ptr<Classifier> model = std::make_unique<SPnDE>(parents);
model->fit(dataset, features, className, states, weights_); model->fit(dataset, features, className, states, weights_, smoothing);
models.push_back(std::move(model)); models.push_back(std::move(model));
significanceModels.push_back(1.0); // They will be updated later in trainModel significanceModels.push_back(1.0); // They will be updated later in trainModel
n_models++; n_models++;
@@ -41,7 +36,7 @@ namespace bayesnet {
notes.push_back("Used features in initialization: " + std::to_string(featuresSelected.size()) + " of " + std::to_string(features.size()) + " with " + select_features_algorithm); notes.push_back("Used features in initialization: " + std::to_string(featuresSelected.size()) + " of " + std::to_string(features.size()) + " with " + select_features_algorithm);
return featuresSelected; return featuresSelected;
} }
void BoostA2DE::trainModel(const torch::Tensor& weights) void BoostA2DE::trainModel(const torch::Tensor& weights, const Smoothing_t smoothing)
{ {
// //
// Logging setup // Logging setup
@@ -58,7 +53,10 @@ namespace bayesnet {
bool finished = false; bool finished = false;
std::vector<int> featuresUsed; std::vector<int> featuresUsed;
if (selectFeatures) { if (selectFeatures) {
featuresUsed = initializeModels(); featuresUsed = initializeModels(smoothing);
if (featuresUsed.size() == 0) {
return;
}
auto ypred = predict(X_train); auto ypred = predict(X_train);
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_); std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
// Update significance of the models // Update significance of the models
@@ -96,7 +94,7 @@ namespace bayesnet {
pairSelection.erase(pairSelection.begin()); pairSelection.erase(pairSelection.begin());
std::unique_ptr<Classifier> model; std::unique_ptr<Classifier> model;
model = std::make_unique<SPnDE>(std::vector<int>({ feature_pair.first, feature_pair.second })); model = std::make_unique<SPnDE>(std::vector<int>({ feature_pair.first, feature_pair.second }));
model->fit(dataset, features, className, states, weights_); model->fit(dataset, features, className, states, weights_, smoothing);
alpha_t = 0.0; alpha_t = 0.0;
if (!block_update) { if (!block_update) {
auto ypred = model->predict(X_train); auto ypred = model->predict(X_train);
@@ -164,4 +162,4 @@ namespace bayesnet {
{ {
return Ensemble::graph(title); return Ensemble::graph(title);
} }
} }

View File

@@ -17,9 +17,9 @@ namespace bayesnet {
virtual ~BoostA2DE() = default; virtual ~BoostA2DE() = default;
std::vector<std::string> graph(const std::string& title = "BoostA2DE") const override; std::vector<std::string> graph(const std::string& title = "BoostA2DE") const override;
protected: protected:
void trainModel(const torch::Tensor& weights) override; void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
private: private:
std::vector<int> initializeModels(); std::vector<int> initializeModels(const Smoothing_t smoothing);
}; };
} }
#endif #endif

View File

@@ -4,25 +4,27 @@
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
// *************************************************************** // ***************************************************************
#include <random>
#include <set>
#include <functional>
#include <limits.h>
#include <tuple>
#include "BoostAODE.h" #include "BoostAODE.h"
#include "bayesnet/classifiers/SPODE.h"
#include <limits.h>
// #include <loguru.cpp>
// #include <loguru.hpp>
#include <random>
#include <set>
#include <tuple>
namespace bayesnet { namespace bayesnet {
BoostAODE::BoostAODE(bool predict_voting) : Boost(predict_voting) BoostAODE::BoostAODE(bool predict_voting) : Boost(predict_voting)
{ {
} }
std::vector<int> BoostAODE::initializeModels() std::vector<int> BoostAODE::initializeModels(const Smoothing_t smoothing)
{ {
torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64); torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);
std::vector<int> featuresSelected = featureSelection(weights_); std::vector<int> featuresSelected = featureSelection(weights_);
for (const int& feature : featuresSelected) { for (const int& feature : featuresSelected) {
std::unique_ptr<Classifier> model = std::make_unique<SPODE>(feature); std::unique_ptr<Classifier> model = std::make_unique<SPODE>(feature);
model->fit(dataset, features, className, states, weights_); model->fit(dataset, features, className, states, weights_, smoothing);
models.push_back(std::move(model)); models.push_back(std::move(model));
significanceModels.push_back(1.0); // They will be updated later in trainModel significanceModels.push_back(1.0); // They will be updated later in trainModel
n_models++; n_models++;
@@ -30,7 +32,7 @@ namespace bayesnet {
notes.push_back("Used features in initialization: " + std::to_string(featuresSelected.size()) + " of " + std::to_string(features.size()) + " with " + select_features_algorithm); notes.push_back("Used features in initialization: " + std::to_string(featuresSelected.size()) + " of " + std::to_string(features.size()) + " with " + select_features_algorithm);
return featuresSelected; return featuresSelected;
} }
void BoostAODE::trainModel(const torch::Tensor& weights) void BoostAODE::trainModel(const torch::Tensor& weights, const Smoothing_t smoothing)
{ {
// //
// Logging setup // Logging setup
@@ -46,14 +48,16 @@ namespace bayesnet {
torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64); torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);
bool finished = false; bool finished = false;
std::vector<int> featuresUsed; std::vector<int> featuresUsed;
n_models = 0;
if (selectFeatures) { if (selectFeatures) {
featuresUsed = initializeModels(); featuresUsed = initializeModels(smoothing);
auto ypred = predict(X_train); auto ypred = predict(X_train);
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_); std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
// Update significance of the models // Update significance of the models
for (int i = 0; i < n_models; ++i) { for (int i = 0; i < n_models; ++i) {
significanceModels[i] = alpha_t; significanceModels.push_back(alpha_t);
} }
// VLOG_SCOPE_F(1, "SelectFeatures. alpha_t: %f n_models: %d", alpha_t, n_models);
if (finished) { if (finished) {
return; return;
} }
@@ -77,10 +81,8 @@ namespace bayesnet {
std::shuffle(featureSelection.begin(), featureSelection.end(), g); std::shuffle(featureSelection.begin(), featureSelection.end(), g);
} }
// Remove used features // Remove used features
featureSelection.erase(remove_if(begin(featureSelection), end(featureSelection), [&](auto x) featureSelection.erase(remove_if(begin(featureSelection), end(featureSelection), [&](auto x) { return std::find(begin(featuresUsed), end(featuresUsed), x) != end(featuresUsed); }),
{ return std::find(begin(featuresUsed), end(featuresUsed), x) != end(featuresUsed);}), end(featureSelection));
end(featureSelection)
);
int k = bisection ? pow(2, tolerance) : 1; int k = bisection ? pow(2, tolerance) : 1;
int counter = 0; // The model counter of the current pack int counter = 0; // The model counter of the current pack
// VLOG_SCOPE_F(1, "counter=%d k=%d featureSelection.size: %zu", counter, k, featureSelection.size()); // VLOG_SCOPE_F(1, "counter=%d k=%d featureSelection.size: %zu", counter, k, featureSelection.size());
@@ -89,10 +91,28 @@ namespace bayesnet {
featureSelection.erase(featureSelection.begin()); featureSelection.erase(featureSelection.begin());
std::unique_ptr<Classifier> model; std::unique_ptr<Classifier> model;
model = std::make_unique<SPODE>(feature); model = std::make_unique<SPODE>(feature);
model->fit(dataset, features, className, states, weights_); model->fit(dataset, features, className, states, weights_, smoothing);
alpha_t = 0.0; alpha_t = 0.0;
if (!block_update) { if (!block_update) {
auto ypred = model->predict(X_train); torch::Tensor ypred;
if (alpha_block) {
//
// Compute the prediction with the current ensemble + model
//
// Add the model to the ensemble
n_models++;
models.push_back(std::move(model));
significanceModels.push_back(1);
// Compute the prediction
ypred = predict(X_train);
// Remove the model from the ensemble
model = std::move(models.back());
models.pop_back();
significanceModels.pop_back();
n_models--;
} else {
ypred = model->predict(X_train);
}
// Step 3.1: Compute the classifier amout of say // Step 3.1: Compute the classifier amout of say
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_); std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
} }
@@ -102,7 +122,7 @@ namespace bayesnet {
models.push_back(std::move(model)); models.push_back(std::move(model));
significanceModels.push_back(alpha_t); significanceModels.push_back(alpha_t);
n_models++; n_models++;
// VLOG_SCOPE_F(2, "numItemsPack: %d n_models: %d featuresUsed: %zu", numItemsPack, n_models, featuresUsed.size()); // VLOG_SCOPE_F(2, "finished: %d numItemsPack: %d n_models: %d featuresUsed: %zu", finished, numItemsPack, n_models, featuresUsed.size());
} }
if (block_update) { if (block_update) {
std::tie(weights_, alpha_t, finished) = update_weights_block(k, y_train, weights_); std::tie(weights_, alpha_t, finished) = update_weights_block(k, y_train, weights_);
@@ -145,7 +165,7 @@ namespace bayesnet {
} }
} else { } else {
notes.push_back("Convergence threshold reached & 0 models eliminated"); notes.push_back("Convergence threshold reached & 0 models eliminated");
// VLOG_SCOPE_F(4, "Convergence threshold reached & 0 models eliminated n_models=%d numItemsPack=%d", n_models, numItemsPack); // VLG_SCOPE_F(4, "Convergence threshold reached & 0 models eliminated n_models=%d numItemsPack=%d", n_models, numItemsPack);
} }
} }
if (featuresUsed.size() != features.size()) { if (featuresUsed.size() != features.size()) {
@@ -158,4 +178,4 @@ namespace bayesnet {
{ {
return Ensemble::graph(title); return Ensemble::graph(title);
} }
} }

View File

@@ -8,7 +8,6 @@
#define BOOSTAODE_H #define BOOSTAODE_H
#include <string> #include <string>
#include <vector> #include <vector>
#include "bayesnet/classifiers/SPODE.h"
#include "Boost.h" #include "Boost.h"
namespace bayesnet { namespace bayesnet {
@@ -18,9 +17,9 @@ namespace bayesnet {
virtual ~BoostAODE() = default; virtual ~BoostAODE() = default;
std::vector<std::string> graph(const std::string& title = "BoostAODE") const override; std::vector<std::string> graph(const std::string& title = "BoostAODE") const override;
protected: protected:
void trainModel(const torch::Tensor& weights) override; void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
private: private:
std::vector<int> initializeModels(); std::vector<int> initializeModels(const Smoothing_t smoothing);
}; };
} }
#endif #endif

View File

@@ -3,22 +3,20 @@
// SPDX-FileType: SOURCE // SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
// *************************************************************** // ***************************************************************
#include "Ensemble.h" #include "Ensemble.h"
namespace bayesnet { namespace bayesnet {
Ensemble::Ensemble(bool predict_voting) : Classifier(Network()), n_models(0), predict_voting(predict_voting) Ensemble::Ensemble(bool predict_voting) : Classifier(Network()), n_models(0), predict_voting(predict_voting)
{ {
}; };
const std::string ENSEMBLE_NOT_FITTED = "Ensemble has not been fitted"; const std::string ENSEMBLE_NOT_FITTED = "Ensemble has not been fitted";
void Ensemble::trainModel(const torch::Tensor& weights) void Ensemble::trainModel(const torch::Tensor& weights, const Smoothing_t smoothing)
{ {
n_models = models.size(); n_models = models.size();
for (auto i = 0; i < n_models; ++i) { for (auto i = 0; i < n_models; ++i) {
// fit with std::vectors // fit with std::vectors
models[i]->fit(dataset, features, className, states); models[i]->fit(dataset, features, className, states, smoothing);
} }
} }
std::vector<int> Ensemble::compute_arg_max(std::vector<std::vector<double>>& X) std::vector<int> Ensemble::compute_arg_max(std::vector<std::vector<double>>& X)
@@ -85,17 +83,10 @@ namespace bayesnet {
{ {
auto n_states = models[0]->getClassNumStates(); auto n_states = models[0]->getClassNumStates();
torch::Tensor y_pred = torch::zeros({ X.size(1), n_states }, torch::kFloat32); torch::Tensor y_pred = torch::zeros({ X.size(1), n_states }, torch::kFloat32);
auto threads{ std::vector<std::thread>() };
std::mutex mtx;
for (auto i = 0; i < n_models; ++i) { for (auto i = 0; i < n_models; ++i) {
threads.push_back(std::thread([&, i]() { auto ypredict = models[i]->predict_proba(X);
auto ypredict = models[i]->predict_proba(X); /*std::cout << "model " << i << " prediction: " << ypredict << " significance " << significanceModels[i] << std::endl;*/
std::lock_guard<std::mutex> lock(mtx); y_pred += ypredict * significanceModels[i];
y_pred += ypredict * significanceModels[i];
}));
}
for (auto& thread : threads) {
thread.join();
} }
auto sum = std::reduce(significanceModels.begin(), significanceModels.end()); auto sum = std::reduce(significanceModels.begin(), significanceModels.end());
y_pred /= sum; y_pred /= sum;
@@ -105,23 +96,15 @@ namespace bayesnet {
{ {
auto n_states = models[0]->getClassNumStates(); auto n_states = models[0]->getClassNumStates();
std::vector<std::vector<double>> y_pred(X[0].size(), std::vector<double>(n_states, 0.0)); std::vector<std::vector<double>> y_pred(X[0].size(), std::vector<double>(n_states, 0.0));
auto threads{ std::vector<std::thread>() };
std::mutex mtx;
for (auto i = 0; i < n_models; ++i) { for (auto i = 0; i < n_models; ++i) {
threads.push_back(std::thread([&, i]() { auto ypredict = models[i]->predict_proba(X);
auto ypredict = models[i]->predict_proba(X); assert(ypredict.size() == y_pred.size());
assert(ypredict.size() == y_pred.size()); assert(ypredict[0].size() == y_pred[0].size());
assert(ypredict[0].size() == y_pred[0].size()); // Multiply each prediction by the significance of the model and then add it to the final prediction
std::lock_guard<std::mutex> lock(mtx); for (auto j = 0; j < ypredict.size(); ++j) {
// Multiply each prediction by the significance of the model and then add it to the final prediction std::transform(y_pred[j].begin(), y_pred[j].end(), ypredict[j].begin(), y_pred[j].begin(),
for (auto j = 0; j < ypredict.size(); ++j) { [significanceModels = significanceModels[i]](double x, double y) { return x + y * significanceModels; });
std::transform(y_pred[j].begin(), y_pred[j].end(), ypredict[j].begin(), y_pred[j].begin(), }
[significanceModels = significanceModels[i]](double x, double y) { return x + y * significanceModels; });
}
}));
}
for (auto& thread : threads) {
thread.join();
} }
auto sum = std::reduce(significanceModels.begin(), significanceModels.end()); auto sum = std::reduce(significanceModels.begin(), significanceModels.end());
//Divide each element of the prediction by the sum of the significances //Divide each element of the prediction by the sum of the significances
@@ -141,17 +124,9 @@ namespace bayesnet {
{ {
// Build a m x n_models tensor with the predictions of each model // Build a m x n_models tensor with the predictions of each model
torch::Tensor y_pred = torch::zeros({ X.size(1), n_models }, torch::kInt32); torch::Tensor y_pred = torch::zeros({ X.size(1), n_models }, torch::kInt32);
auto threads{ std::vector<std::thread>() };
std::mutex mtx;
for (auto i = 0; i < n_models; ++i) { for (auto i = 0; i < n_models; ++i) {
threads.push_back(std::thread([&, i]() { auto ypredict = models[i]->predict(X);
auto ypredict = models[i]->predict(X); y_pred.index_put_({ "...", i }, ypredict);
std::lock_guard<std::mutex> lock(mtx);
y_pred.index_put_({ "...", i }, ypredict);
}));
}
for (auto& thread : threads) {
thread.join();
} }
return voting(y_pred); return voting(y_pred);
} }
@@ -219,4 +194,4 @@ namespace bayesnet {
} }
return nstates; return nstates;
} }
} }

View File

@@ -33,9 +33,15 @@ namespace bayesnet {
} }
std::string dump_cpt() const override std::string dump_cpt() const override
{ {
return ""; std::string output;
for (auto& model : models) {
output += model->dump_cpt();
output += std::string(80, '-') + "\n";
}
return output;
} }
protected: protected:
void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
torch::Tensor predict_average_voting(torch::Tensor& X); torch::Tensor predict_average_voting(torch::Tensor& X);
std::vector<std::vector<double>> predict_average_voting(std::vector<std::vector<int>>& X); std::vector<std::vector<double>> predict_average_voting(std::vector<std::vector<int>>& X);
torch::Tensor predict_average_proba(torch::Tensor& X); torch::Tensor predict_average_proba(torch::Tensor& X);
@@ -43,10 +49,10 @@ namespace bayesnet {
torch::Tensor compute_arg_max(torch::Tensor& X); torch::Tensor compute_arg_max(torch::Tensor& X);
std::vector<int> compute_arg_max(std::vector<std::vector<double>>& X); std::vector<int> compute_arg_max(std::vector<std::vector<double>>& X);
torch::Tensor voting(torch::Tensor& votes); torch::Tensor voting(torch::Tensor& votes);
// Attributes
unsigned n_models; unsigned n_models;
std::vector<std::unique_ptr<Classifier>> models; std::vector<std::unique_ptr<Classifier>> models;
std::vector<double> significanceModels; std::vector<double> significanceModels;
void trainModel(const torch::Tensor& weights) override;
bool predict_voting; bool predict_voting;
}; };
} }

View File

@@ -0,0 +1,168 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#include <folding.hpp>
#include <limits.h>
#include "XBA2DE.h"
#include "bayesnet/classifiers/XSP2DE.h"
#include "bayesnet/utils/TensorUtils.h"
namespace bayesnet {
XBA2DE::XBA2DE(bool predict_voting) : Boost(predict_voting) {}
std::vector<int> XBA2DE::initializeModels(const Smoothing_t smoothing) {
torch::Tensor weights_ = torch::full({m}, 1.0 / m, torch::kFloat64);
std::vector<int> featuresSelected = featureSelection(weights_);
if (featuresSelected.size() < 2) {
notes.push_back("No features selected in initialization");
status = ERROR;
return std::vector<int>();
}
for (int i = 0; i < featuresSelected.size() - 1; i++) {
for (int j = i + 1; j < featuresSelected.size(); j++) {
std::unique_ptr<Classifier> model = std::make_unique<XSp2de>(featuresSelected[i], featuresSelected[j]);
model->fit(dataset, features, className, states, weights_, smoothing);
add_model(std::move(model), 1.0);
}
}
notes.push_back("Used features in initialization: " + std::to_string(featuresSelected.size()) + " of " +
std::to_string(features.size()) + " with " + select_features_algorithm);
return featuresSelected;
}
void XBA2DE::trainModel(const torch::Tensor &weights, const Smoothing_t smoothing) {
//
// Logging setup
//
// loguru::set_thread_name("XBA2DE");
// loguru::g_stderr_verbosity = loguru::Verbosity_OFF;
// loguru::add_file("boostA2DE.log", loguru::Truncate, loguru::Verbosity_MAX);
// Algorithm based on the adaboost algorithm for classification
// as explained in Ensemble methods (Zhi-Hua Zhou, 2012)
X_train_ = TensorUtils::to_matrix(X_train);
y_train_ = TensorUtils::to_vector<int>(y_train);
if (convergence) {
X_test_ = TensorUtils::to_matrix(X_test);
y_test_ = TensorUtils::to_vector<int>(y_test);
}
fitted = true;
double alpha_t = 0;
torch::Tensor weights_ = torch::full({m}, 1.0 / m, torch::kFloat64);
bool finished = false;
std::vector<int> featuresUsed;
if (selectFeatures) {
featuresUsed = initializeModels(smoothing);
if (featuresUsed.size() == 0) {
return;
}
auto ypred = predict(X_train);
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
// Update significance of the models
for (int i = 0; i < n_models; ++i) {
significanceModels[i] = alpha_t;
}
if (finished) {
return;
}
}
int numItemsPack = 0; // The counter of the models inserted in the current pack
// Variables to control the accuracy finish condition
double priorAccuracy = 0.0;
double improvement = 1.0;
double convergence_threshold = 1e-4;
int tolerance = 0; // number of times the accuracy is lower than the convergence_threshold
// Step 0: Set the finish condition
// epsilon sub t > 0.5 => inverse the weights policy
// validation error is not decreasing
// run out of features
bool ascending = order_algorithm == Orders.ASC;
std::mt19937 g{173};
std::vector<std::pair<int, int>> pairSelection;
while (!finished) {
// Step 1: Build ranking with mutual information
pairSelection = metrics.SelectKPairs(weights_, featuresUsed, ascending, 0); // Get all the pairs sorted
if (order_algorithm == Orders.RAND) {
std::shuffle(pairSelection.begin(), pairSelection.end(), g);
}
int k = bisection ? pow(2, tolerance) : 1;
int counter = 0; // The model counter of the current pack
// VLOG_SCOPE_F(1, "counter=%d k=%d featureSelection.size: %zu", counter, k, featureSelection.size());
while (counter++ < k && pairSelection.size() > 0) {
auto feature_pair = pairSelection[0];
pairSelection.erase(pairSelection.begin());
std::unique_ptr<Classifier> model;
model = std::make_unique<XSp2de>(feature_pair.first, feature_pair.second);
model->fit(dataset, features, className, states, weights_, smoothing);
alpha_t = 0.0;
if (!block_update) {
auto ypred = model->predict(X_train);
// Step 3.1: Compute the classifier amout of say
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
}
// Step 3.4: Store classifier and its accuracy to weigh its future vote
numItemsPack++;
models.push_back(std::move(model));
significanceModels.push_back(alpha_t);
n_models++;
// VLOG_SCOPE_F(2, "numItemsPack: %d n_models: %d featuresUsed: %zu", numItemsPack, n_models,
// featuresUsed.size());
}
if (block_update) {
std::tie(weights_, alpha_t, finished) = update_weights_block(k, y_train, weights_);
}
if (convergence && !finished) {
auto y_val_predict = predict(X_test);
double accuracy = (y_val_predict == y_test).sum().item<double>() / (double)y_test.size(0);
if (priorAccuracy == 0) {
priorAccuracy = accuracy;
} else {
improvement = accuracy - priorAccuracy;
}
if (improvement < convergence_threshold) {
// VLOG_SCOPE_F(3, " (improvement<threshold) tolerance: %d numItemsPack: %d improvement: %f prior: %f
// current: %f", tolerance, numItemsPack, improvement, priorAccuracy, accuracy);
tolerance++;
} else {
// VLOG_SCOPE_F(3, "* (improvement>=threshold) Reset. tolerance: %d numItemsPack: %d improvement: %f
// prior: %f current: %f", tolerance, numItemsPack, improvement, priorAccuracy, accuracy);
tolerance = 0; // Reset the counter if the model performs better
numItemsPack = 0;
}
if (convergence_best) {
// Keep the best accuracy until now as the prior accuracy
priorAccuracy = std::max(accuracy, priorAccuracy);
} else {
// Keep the last accuray obtained as the prior accuracy
priorAccuracy = accuracy;
}
}
// VLOG_SCOPE_F(1, "tolerance: %d featuresUsed.size: %zu features.size: %zu", tolerance, featuresUsed.size(),
// features.size());
finished = finished || tolerance > maxTolerance || pairSelection.size() == 0;
}
if (tolerance > maxTolerance) {
if (numItemsPack < n_models) {
notes.push_back("Convergence threshold reached & " + std::to_string(numItemsPack) + " models eliminated");
// VLOG_SCOPE_F(4, "Convergence threshold reached & %d models eliminated of %d", numItemsPack, n_models);
for (int i = 0; i < numItemsPack; ++i) {
significanceModels.pop_back();
models.pop_back();
n_models--;
}
} else {
notes.push_back("Convergence threshold reached & 0 models eliminated");
// VLOG_SCOPE_F(4, "Convergence threshold reached & 0 models eliminated n_models=%d numItemsPack=%d",
// n_models, numItemsPack);
}
}
if (pairSelection.size() > 0) {
notes.push_back("Pairs not used in train: " + std::to_string(pairSelection.size()));
status = WARNING;
}
notes.push_back("Number of models: " + std::to_string(n_models));
}
std::vector<std::string> XBA2DE::graph(const std::string &title) const { return Ensemble::graph(title); }
} // namespace bayesnet

View File

@@ -0,0 +1,28 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef XBA2DE_H
#define XBA2DE_H
#include <string>
#include <vector>
#include "Boost.h"
namespace bayesnet {
class XBA2DE : public Boost {
public:
explicit XBA2DE(bool predict_voting = false);
virtual ~XBA2DE() = default;
std::vector<std::string> graph(const std::string& title = "XBA2DE") const override;
std::string getVersion() override { return version; };
protected:
void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
private:
std::vector<int> initializeModels(const Smoothing_t smoothing);
std::vector<std::vector<int>> X_train_, X_test_;
std::vector<int> y_train_, y_test_;
std::string version = "0.9.7";
};
}
#endif

View File

@@ -0,0 +1,184 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#include "XBAODE.h"
#include "bayesnet/classifiers/XSPODE.h"
#include "bayesnet/utils/TensorUtils.h"
#include <limits.h>
#include <random>
#include <tuple>
namespace bayesnet
{
XBAODE::XBAODE() : Boost(false) {}
std::vector<int> XBAODE::initializeModels(const Smoothing_t smoothing)
{
torch::Tensor weights_ = torch::full({m}, 1.0 / m, torch::kFloat64);
std::vector<int> featuresSelected = featureSelection(weights_);
for (const int &feature : featuresSelected) {
std::unique_ptr<Classifier> model = std::make_unique<XSpode>(feature);
model->fit(dataset, features, className, states, weights_, smoothing);
add_model(std::move(model), 1.0);
}
notes.push_back("Used features in initialization: " + std::to_string(featuresSelected.size()) + " of " +
std::to_string(features.size()) + " with " + select_features_algorithm);
return featuresSelected;
}
void XBAODE::trainModel(const torch::Tensor &weights, const bayesnet::Smoothing_t smoothing)
{
X_train_ = TensorUtils::to_matrix(X_train);
y_train_ = TensorUtils::to_vector<int>(y_train);
if (convergence) {
X_test_ = TensorUtils::to_matrix(X_test);
y_test_ = TensorUtils::to_vector<int>(y_test);
}
fitted = true;
double alpha_t;
torch::Tensor weights_ = torch::full({m}, 1.0 / m, torch::kFloat64);
bool finished = false;
std::vector<int> featuresUsed;
n_models = 0;
if (selectFeatures) {
featuresUsed = initializeModels(smoothing);
auto ypred = predict(X_train_);
auto ypred_t = torch::tensor(ypred);
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred_t, weights_);
// Update significance of the models
for (const int &feature : featuresUsed) {
significanceModels.pop_back();
}
for (const int &feature : featuresUsed) {
significanceModels.push_back(alpha_t);
}
// VLOG_SCOPE_F(1, "SelectFeatures. alpha_t: %f n_models: %d", alpha_t,
// n_models);
if (finished) {
return;
}
}
int numItemsPack = 0; // The counter of the models inserted in the current pack
// Variables to control the accuracy finish condition
double priorAccuracy = 0.0;
double improvement = 1.0;
double convergence_threshold = 1e-4;
int tolerance = 0; // number of times the accuracy is lower than the convergence_threshold
// Step 0: Set the finish condition
// epsilon sub t > 0.5 => inverse the weights_ policy
// validation error is not decreasing
// run out of features
bool ascending = order_algorithm == bayesnet::Orders.ASC;
std::mt19937 g{173};
while (!finished) {
// Step 1: Build ranking with mutual information
auto featureSelection = metrics.SelectKBestWeighted(weights_, ascending, n); // Get all the features sorted
if (order_algorithm == bayesnet::Orders.RAND) {
std::shuffle(featureSelection.begin(), featureSelection.end(), g);
}
// Remove used features
featureSelection.erase(remove_if(featureSelection.begin(), featureSelection.end(),
[&](auto x) {
return std::find(featuresUsed.begin(), featuresUsed.end(), x) !=
featuresUsed.end();
}),
featureSelection.end());
int k = bisection ? pow(2, tolerance) : 1;
int counter = 0; // The model counter of the current pack
// VLOG_SCOPE_F(1, "counter=%d k=%d featureSelection.size: %zu", counter, k,
// featureSelection.size());
while (counter++ < k && featureSelection.size() > 0) {
auto feature = featureSelection[0];
featureSelection.erase(featureSelection.begin());
std::unique_ptr<Classifier> model;
model = std::make_unique<XSpode>(feature);
model->fit(dataset, features, className, states, weights_, smoothing);
/*dynamic_cast<XSpode*>(model.get())->fitx(X_train, y_train, weights_,
* smoothing); // using exclusive XSpode fit method*/
// DEBUG
/*std::cout << dynamic_cast<XSpode*>(model.get())->to_string() <<
* std::endl;*/
// DEBUG
std::vector<int> ypred;
if (alpha_block) {
//
// Compute the prediction with the current ensemble + model
//
// Add the model to the ensemble
add_model(std::move(model), 1.0);
// Compute the prediction
ypred = predict(X_train_);
model = std::move(models.back());
// Remove the model from the ensemble
remove_last_model();
} else {
ypred = model->predict(X_train_);
}
// Step 3.1: Compute the classifier amout of say
auto ypred_t = torch::tensor(ypred);
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred_t, weights_);
// Step 3.4: Store classifier and its accuracy to weigh its future vote
numItemsPack++;
featuresUsed.push_back(feature);
add_model(std::move(model), alpha_t);
// VLOG_SCOPE_F(2, "finished: %d numItemsPack: %d n_models: %d
// featuresUsed: %zu", finished, numItemsPack, n_models,
// featuresUsed.size());
} // End of the pack
if (convergence && !finished) {
auto y_val_predict = predict(X_test);
double accuracy = (y_val_predict == y_test).sum().item<double>() / (double)y_test.size(0);
if (priorAccuracy == 0) {
priorAccuracy = accuracy;
} else {
improvement = accuracy - priorAccuracy;
}
if (improvement < convergence_threshold) {
// VLOG_SCOPE_F(3, " (improvement<threshold) tolerance: %d
// numItemsPack: %d improvement: %f prior: %f current: %f", tolerance,
// numItemsPack, improvement, priorAccuracy, accuracy);
tolerance++;
} else {
// VLOG_SCOPE_F(3, "* (improvement>=threshold) Reset. tolerance: %d
// numItemsPack: %d improvement: %f prior: %f current: %f", tolerance,
// numItemsPack, improvement, priorAccuracy, accuracy);
tolerance = 0; // Reset the counter if the model performs better
numItemsPack = 0;
}
if (convergence_best) {
// Keep the best accuracy until now as the prior accuracy
priorAccuracy = std::max(accuracy, priorAccuracy);
} else {
// Keep the last accuray obtained as the prior accuracy
priorAccuracy = accuracy;
}
}
// VLOG_SCOPE_F(1, "tolerance: %d featuresUsed.size: %zu features.size:
// %zu", tolerance, featuresUsed.size(), features.size());
finished = finished || tolerance > maxTolerance || featuresUsed.size() == features.size();
}
if (tolerance > maxTolerance) {
if (numItemsPack < n_models) {
notes.push_back("Convergence threshold reached & " + std::to_string(numItemsPack) + " models eliminated");
// VLOG_SCOPE_F(4, "Convergence threshold reached & %d models eliminated
// of %d", numItemsPack, n_models);
for (int i = featuresUsed.size() - 1; i >= featuresUsed.size() - numItemsPack; --i) {
remove_last_model();
}
// VLOG_SCOPE_F(4, "*Convergence threshold %d models left & %d features
// used.", n_models, featuresUsed.size());
} else {
notes.push_back("Convergence threshold reached & 0 models eliminated");
// VLOG_SCOPE_F(4, "Convergence threshold reached & 0 models eliminated
// n_models=%d numItemsPack=%d", n_models, numItemsPack);
}
}
if (featuresUsed.size() != features.size()) {
notes.push_back("Used features in train: " + std::to_string(featuresUsed.size()) + " of " +
std::to_string(features.size()));
status = bayesnet::WARNING;
}
notes.push_back("Number of models: " + std::to_string(n_models));
return;
}
} // namespace bayesnet

View File

@@ -0,0 +1,27 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef XBAODE_H
#define XBAODE_H
#include <vector>
#include <cmath>
#include "Boost.h"
namespace bayesnet {
class XBAODE : public Boost {
public:
XBAODE();
std::string getVersion() override { return version; };
protected:
void trainModel(const torch::Tensor& weights, const bayesnet::Smoothing_t smoothing) override;
private:
std::vector<int> initializeModels(const Smoothing_t smoothing);
std::vector<std::vector<int>> X_train_, X_test_;
std::vector<int> y_train_, y_test_;
std::string version = "0.9.7";
};
}
#endif // XBAODE_H

View File

@@ -5,20 +5,20 @@
// *************************************************************** // ***************************************************************
#include <thread> #include <thread>
#include <mutex>
#include <sstream> #include <sstream>
#include <numeric>
#include <algorithm>
#include "Network.h" #include "Network.h"
#include "bayesnet/utils/bayesnetUtils.h" #include "bayesnet/utils/bayesnetUtils.h"
#include "bayesnet/utils/CountingSemaphore.h"
#include <pthread.h>
#include <fstream>
namespace bayesnet { namespace bayesnet {
Network::Network() : fitted{ false }, maxThreads{ 0.95 }, classNumStates{ 0 }, laplaceSmoothing{ 0 } Network::Network() : fitted{ false }, classNumStates{ 0 }
{ {
} }
Network::Network(float maxT) : fitted{ false }, maxThreads{ maxT }, classNumStates{ 0 }, laplaceSmoothing{ 0 } Network::Network(const Network& other) : features(other.features), className(other.className), classNumStates(other.getClassNumStates()),
{ fitted(other.fitted), samples(other.samples)
}
Network::Network(const Network& other) : laplaceSmoothing(other.laplaceSmoothing), features(other.features), className(other.className), classNumStates(other.getClassNumStates()),
maxThreads(other.getMaxThreads()), fitted(other.fitted), samples(other.samples)
{ {
if (samples.defined()) if (samples.defined())
samples = samples.clone(); samples = samples.clone();
@@ -35,16 +35,15 @@ namespace bayesnet {
nodes.clear(); nodes.clear();
samples = torch::Tensor(); samples = torch::Tensor();
} }
float Network::getMaxThreads() const
{
return maxThreads;
}
torch::Tensor& Network::getSamples() torch::Tensor& Network::getSamples()
{ {
return samples; return samples;
} }
void Network::addNode(const std::string& name) void Network::addNode(const std::string& name)
{ {
if (fitted) {
throw std::invalid_argument("Cannot add node to a fitted network. Initialize first.");
}
if (name == "") { if (name == "") {
throw std::invalid_argument("Node name cannot be empty"); throw std::invalid_argument("Node name cannot be empty");
} }
@@ -94,12 +93,21 @@ namespace bayesnet {
} }
void Network::addEdge(const std::string& parent, const std::string& child) void Network::addEdge(const std::string& parent, const std::string& child)
{ {
if (fitted) {
throw std::invalid_argument("Cannot add edge to a fitted network. Initialize first.");
}
if (nodes.find(parent) == nodes.end()) { if (nodes.find(parent) == nodes.end()) {
throw std::invalid_argument("Parent node " + parent + " does not exist"); throw std::invalid_argument("Parent node " + parent + " does not exist");
} }
if (nodes.find(child) == nodes.end()) { if (nodes.find(child) == nodes.end()) {
throw std::invalid_argument("Child node " + child + " does not exist"); throw std::invalid_argument("Child node " + child + " does not exist");
} }
// Check if the edge is already in the graph
for (auto& node : nodes[parent]->getChildren()) {
if (node->getName() == child) {
throw std::invalid_argument("Edge " + parent + " -> " + child + " already exists");
}
}
// Temporarily add edge to check for cycles // Temporarily add edge to check for cycles
nodes[parent]->addChild(nodes[child].get()); nodes[parent]->addChild(nodes[child].get());
nodes[child]->addParent(nodes[parent].get()); nodes[child]->addParent(nodes[parent].get());
@@ -155,7 +163,7 @@ namespace bayesnet {
classNumStates = nodes.at(className)->getNumStates(); classNumStates = nodes.at(className)->getNumStates();
} }
// X comes in nxm, where n is the number of features and m the number of samples // X comes in nxm, where n is the number of features and m the number of samples
void Network::fit(const torch::Tensor& X, const torch::Tensor& y, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states) void Network::fit(const torch::Tensor& X, const torch::Tensor& y, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing)
{ {
checkFitData(X.size(1), X.size(0), y.size(0), featureNames, className, states, weights); checkFitData(X.size(1), X.size(0), y.size(0), featureNames, className, states, weights);
this->className = className; this->className = className;
@@ -164,17 +172,17 @@ namespace bayesnet {
for (int i = 0; i < featureNames.size(); ++i) { for (int i = 0; i < featureNames.size(); ++i) {
auto row_feature = X.index({ i, "..." }); auto row_feature = X.index({ i, "..." });
} }
completeFit(states, weights); completeFit(states, weights, smoothing);
} }
void Network::fit(const torch::Tensor& samples, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states) void Network::fit(const torch::Tensor& samples, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing)
{ {
checkFitData(samples.size(1), samples.size(0) - 1, samples.size(1), featureNames, className, states, weights); checkFitData(samples.size(1), samples.size(0) - 1, samples.size(1), featureNames, className, states, weights);
this->className = className; this->className = className;
this->samples = samples; this->samples = samples;
completeFit(states, weights); completeFit(states, weights, smoothing);
} }
// input_data comes in nxm, where n is the number of features and m the number of samples // input_data comes in nxm, where n is the number of features and m the number of samples
void Network::fit(const std::vector<std::vector<int>>& input_data, const std::vector<int>& labels, const std::vector<double>& weights_, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states) void Network::fit(const std::vector<std::vector<int>>& input_data, const std::vector<int>& labels, const std::vector<double>& weights_, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing)
{ {
const torch::Tensor weights = torch::tensor(weights_, torch::kFloat64); const torch::Tensor weights = torch::tensor(weights_, torch::kFloat64);
checkFitData(input_data[0].size(), input_data.size(), labels.size(), featureNames, className, states, weights); checkFitData(input_data[0].size(), input_data.size(), labels.size(), featureNames, className, states, weights);
@@ -185,17 +193,43 @@ namespace bayesnet {
samples.index_put_({ i, "..." }, torch::tensor(input_data[i], torch::kInt32)); samples.index_put_({ i, "..." }, torch::tensor(input_data[i], torch::kInt32));
} }
samples.index_put_({ -1, "..." }, torch::tensor(labels, torch::kInt32)); samples.index_put_({ -1, "..." }, torch::tensor(labels, torch::kInt32));
completeFit(states, weights); completeFit(states, weights, smoothing);
} }
void Network::completeFit(const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights) void Network::completeFit(const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing)
{ {
setStates(states); setStates(states);
laplaceSmoothing = 1.0 / samples.size(1); // To use in CPT computation
std::vector<std::thread> threads; std::vector<std::thread> threads;
auto& semaphore = CountingSemaphore::getInstance();
const double n_samples = static_cast<double>(samples.size(1));
auto worker = [&](std::pair<const std::string, std::unique_ptr<Node>>& node, int i) {
std::string threadName = "FitWorker-" + std::to_string(i);
#if defined(__linux__)
pthread_setname_np(pthread_self(), threadName.c_str());
#else
pthread_setname_np(threadName.c_str());
#endif
double numStates = static_cast<double>(node.second->getNumStates());
double smoothing_factor;
switch (smoothing) {
case Smoothing_t::ORIGINAL:
smoothing_factor = 1.0 / n_samples;
break;
case Smoothing_t::LAPLACE:
smoothing_factor = 1.0;
break;
case Smoothing_t::CESTNIK:
smoothing_factor = 1 / numStates;
break;
default:
smoothing_factor = 0.0; // No smoothing
}
node.second->computeCPT(samples, features, smoothing_factor, weights);
semaphore.release();
};
int i = 0;
for (auto& node : nodes) { for (auto& node : nodes) {
threads.emplace_back([this, &node, &weights]() { semaphore.acquire();
node.second->computeCPT(samples, features, laplaceSmoothing, weights); threads.emplace_back(worker, std::ref(node), i++);
});
} }
for (auto& thread : threads) { for (auto& thread : threads) {
thread.join(); thread.join();
@@ -207,14 +241,38 @@ namespace bayesnet {
if (!fitted) { if (!fitted) {
throw std::logic_error("You must call fit() before calling predict()"); throw std::logic_error("You must call fit() before calling predict()");
} }
// Ensure the sample size is equal to the number of features
if (samples.size(0) != features.size() - 1) {
throw std::invalid_argument("(T) Sample size (" + std::to_string(samples.size(0)) +
") does not match the number of features (" + std::to_string(features.size() - 1) + ")");
}
torch::Tensor result; torch::Tensor result;
std::vector<std::thread> threads;
std::mutex mtx;
auto& semaphore = CountingSemaphore::getInstance();
result = torch::zeros({ samples.size(1), classNumStates }, torch::kFloat64); result = torch::zeros({ samples.size(1), classNumStates }, torch::kFloat64);
for (int i = 0; i < samples.size(1); ++i) { auto worker = [&](const torch::Tensor& sample, int i) {
const torch::Tensor sample = samples.index({ "...", i }); std::string threadName = "PredictWorker-" + std::to_string(i);
#if defined(__linux__)
pthread_setname_np(pthread_self(), threadName.c_str());
#else
pthread_setname_np(threadName.c_str());
#endif
auto psample = predict_sample(sample); auto psample = predict_sample(sample);
auto temp = torch::tensor(psample, torch::kFloat64); auto temp = torch::tensor(psample, torch::kFloat64);
// result.index_put_({ i, "..." }, torch::tensor(predict_sample(sample), torch::kFloat64)); {
result.index_put_({ i, "..." }, temp); std::lock_guard<std::mutex> lock(mtx);
result.index_put_({ i, "..." }, temp);
}
semaphore.release();
};
for (int i = 0; i < samples.size(1); ++i) {
semaphore.acquire();
const torch::Tensor sample = samples.index({ "...", i });
threads.emplace_back(worker, sample, i);
}
for (auto& thread : threads) {
thread.join();
} }
if (proba) if (proba)
return result; return result;
@@ -239,18 +297,38 @@ namespace bayesnet {
if (!fitted) { if (!fitted) {
throw std::logic_error("You must call fit() before calling predict()"); throw std::logic_error("You must call fit() before calling predict()");
} }
std::vector<int> predictions; // Ensure the sample size is equal to the number of features
if (tsamples.size() != features.size() - 1) {
throw std::invalid_argument("(V) Sample size (" + std::to_string(tsamples.size()) +
") does not match the number of features (" + std::to_string(features.size() - 1) + ")");
}
std::vector<int> predictions(tsamples[0].size(), 0);
std::vector<int> sample; std::vector<int> sample;
std::vector<std::thread> threads;
auto& semaphore = CountingSemaphore::getInstance();
auto worker = [&](const std::vector<int>& sample, const int row, int& prediction) {
std::string threadName = "(V)PWorker-" + std::to_string(row);
#if defined(__linux__)
pthread_setname_np(pthread_self(), threadName.c_str());
#else
pthread_setname_np(threadName.c_str());
#endif
auto classProbabilities = predict_sample(sample);
auto maxElem = max_element(classProbabilities.begin(), classProbabilities.end());
int predictedClass = distance(classProbabilities.begin(), maxElem);
prediction = predictedClass;
semaphore.release();
};
for (int row = 0; row < tsamples[0].size(); ++row) { for (int row = 0; row < tsamples[0].size(); ++row) {
sample.clear(); sample.clear();
for (int col = 0; col < tsamples.size(); ++col) { for (int col = 0; col < tsamples.size(); ++col) {
sample.push_back(tsamples[col][row]); sample.push_back(tsamples[col][row]);
} }
std::vector<double> classProbabilities = predict_sample(sample); semaphore.acquire();
// Find the class with the maximum posterior probability threads.emplace_back(worker, sample, row, std::ref(predictions[row]));
auto maxElem = max_element(classProbabilities.begin(), classProbabilities.end()); }
int predictedClass = distance(classProbabilities.begin(), maxElem); for (auto& thread : threads) {
predictions.push_back(predictedClass); thread.join();
} }
return predictions; return predictions;
} }
@@ -261,14 +339,36 @@ namespace bayesnet {
if (!fitted) { if (!fitted) {
throw std::logic_error("You must call fit() before calling predict_proba()"); throw std::logic_error("You must call fit() before calling predict_proba()");
} }
std::vector<std::vector<double>> predictions; // Ensure the sample size is equal to the number of features
if (tsamples.size() != features.size() - 1) {
throw std::invalid_argument("(V) Sample size (" + std::to_string(tsamples.size()) +
") does not match the number of features (" + std::to_string(features.size() - 1) + ")");
}
std::vector<std::vector<double>> predictions(tsamples[0].size(), std::vector<double>(classNumStates, 0.0));
std::vector<int> sample; std::vector<int> sample;
std::vector<std::thread> threads;
auto& semaphore = CountingSemaphore::getInstance();
auto worker = [&](const std::vector<int>& sample, int row, std::vector<double>& predictions) {
std::string threadName = "(V)PWorker-" + std::to_string(row);
#if defined(__linux__)
pthread_setname_np(pthread_self(), threadName.c_str());
#else
pthread_setname_np(threadName.c_str());
#endif
std::vector<double> classProbabilities = predict_sample(sample);
predictions = classProbabilities;
semaphore.release();
};
for (int row = 0; row < tsamples[0].size(); ++row) { for (int row = 0; row < tsamples[0].size(); ++row) {
sample.clear(); sample.clear();
for (int col = 0; col < tsamples.size(); ++col) { for (int col = 0; col < tsamples.size(); ++col) {
sample.push_back(tsamples[col][row]); sample.push_back(tsamples[col][row]);
} }
predictions.push_back(predict_sample(sample)); semaphore.acquire();
threads.emplace_back(worker, sample, row, std::ref(predictions[row]));
}
for (auto& thread : threads) {
thread.join();
} }
return predictions; return predictions;
} }
@@ -286,11 +386,6 @@ namespace bayesnet {
// Return 1xn std::vector of probabilities // Return 1xn std::vector of probabilities
std::vector<double> Network::predict_sample(const std::vector<int>& sample) std::vector<double> Network::predict_sample(const std::vector<int>& sample)
{ {
// Ensure the sample size is equal to the number of features
if (sample.size() != features.size() - 1) {
throw std::invalid_argument("Sample size (" + std::to_string(sample.size()) +
") does not match the number of features (" + std::to_string(features.size() - 1) + ")");
}
std::map<std::string, int> evidence; std::map<std::string, int> evidence;
for (int i = 0; i < sample.size(); ++i) { for (int i = 0; i < sample.size(); ++i) {
evidence[features[i]] = sample[i]; evidence[features[i]] = sample[i];
@@ -300,44 +395,26 @@ namespace bayesnet {
// Return 1xn std::vector of probabilities // Return 1xn std::vector of probabilities
std::vector<double> Network::predict_sample(const torch::Tensor& sample) std::vector<double> Network::predict_sample(const torch::Tensor& sample)
{ {
// Ensure the sample size is equal to the number of features
if (sample.size(0) != features.size() - 1) {
throw std::invalid_argument("Sample size (" + std::to_string(sample.size(0)) +
") does not match the number of features (" + std::to_string(features.size() - 1) + ")");
}
std::map<std::string, int> evidence; std::map<std::string, int> evidence;
for (int i = 0; i < sample.size(0); ++i) { for (int i = 0; i < sample.size(0); ++i) {
evidence[features[i]] = sample[i].item<int>(); evidence[features[i]] = sample[i].item<int>();
} }
return exactInference(evidence); return exactInference(evidence);
} }
double Network::computeFactor(std::map<std::string, int>& completeEvidence)
{
double result = 1.0;
for (auto& node : getNodes()) {
result *= node.second->getFactorValue(completeEvidence);
}
return result;
}
std::vector<double> Network::exactInference(std::map<std::string, int>& evidence) std::vector<double> Network::exactInference(std::map<std::string, int>& evidence)
{ {
std::vector<double> result(classNumStates, 0.0); std::vector<double> result(classNumStates, 0.0);
std::vector<std::thread> threads; auto completeEvidence = std::map<std::string, int>(evidence);
std::mutex mtx;
for (int i = 0; i < classNumStates; ++i) { for (int i = 0; i < classNumStates; ++i) {
threads.emplace_back([this, &result, &evidence, i, &mtx]() { completeEvidence[getClassName()] = i;
auto completeEvidence = std::map<std::string, int>(evidence); double partial = 1.0;
completeEvidence[getClassName()] = i; for (auto& node : getNodes()) {
double factor = computeFactor(completeEvidence); partial *= node.second->getFactorValue(completeEvidence);
std::lock_guard<std::mutex> lock(mtx); }
result[i] = factor; result[i] = partial;
});
}
for (auto& thread : threads) {
thread.join();
} }
// Normalize result // Normalize result
double sum = accumulate(result.begin(), result.end(), 0.0); double sum = std::accumulate(result.begin(), result.end(), 0.0);
transform(result.begin(), result.end(), result.begin(), [sum](const double& value) { return value / sum; }); transform(result.begin(), result.end(), result.begin(), [sum](const double& value) { return value / sum; });
return result; return result;
} }

View File

@@ -10,16 +10,16 @@
#include <vector> #include <vector>
#include "bayesnet/config.h" #include "bayesnet/config.h"
#include "Node.h" #include "Node.h"
#include "Smoothing.h"
namespace bayesnet { namespace bayesnet {
class Network { class Network {
public: public:
Network(); Network();
explicit Network(float);
explicit Network(const Network&); explicit Network(const Network&);
~Network() = default; ~Network() = default;
torch::Tensor& getSamples(); torch::Tensor& getSamples();
float getMaxThreads() const;
void addNode(const std::string&); void addNode(const std::string&);
void addEdge(const std::string&, const std::string&); void addEdge(const std::string&, const std::string&);
std::map<std::string, std::unique_ptr<Node>>& getNodes(); std::map<std::string, std::unique_ptr<Node>>& getNodes();
@@ -32,9 +32,9 @@ namespace bayesnet {
/* /*
Notice: Nodes have to be inserted in the same order as they are in the dataset, i.e., first node is first column and so on. Notice: Nodes have to be inserted in the same order as they are in the dataset, i.e., first node is first column and so on.
*/ */
void fit(const std::vector<std::vector<int>>& input_data, const std::vector<int>& labels, const std::vector<double>& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states); void fit(const std::vector<std::vector<int>>& input_data, const std::vector<int>& labels, const std::vector<double>& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing);
void fit(const torch::Tensor& X, const torch::Tensor& y, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states); void fit(const torch::Tensor& X, const torch::Tensor& y, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing);
void fit(const torch::Tensor& samples, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states); void fit(const torch::Tensor& samples, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing);
std::vector<int> predict(const std::vector<std::vector<int>>&); // Return mx1 std::vector of predictions std::vector<int> predict(const std::vector<std::vector<int>>&); // Return mx1 std::vector of predictions
torch::Tensor predict(const torch::Tensor&); // Return mx1 tensor of predictions torch::Tensor predict(const torch::Tensor&); // Return mx1 tensor of predictions
torch::Tensor predict_tensor(const torch::Tensor& samples, const bool proba); torch::Tensor predict_tensor(const torch::Tensor& samples, const bool proba);
@@ -50,19 +50,16 @@ namespace bayesnet {
private: private:
std::map<std::string, std::unique_ptr<Node>> nodes; std::map<std::string, std::unique_ptr<Node>> nodes;
bool fitted; bool fitted;
float maxThreads = 0.95;
int classNumStates; int classNumStates;
std::vector<std::string> features; // Including classname std::vector<std::string> features; // Including classname
std::string className; std::string className;
double laplaceSmoothing;
torch::Tensor samples; // n+1xm tensor used to fit the model torch::Tensor samples; // n+1xm tensor used to fit the model
bool isCyclic(const std::string&, std::unordered_set<std::string>&, std::unordered_set<std::string>&); bool isCyclic(const std::string&, std::unordered_set<std::string>&, std::unordered_set<std::string>&);
std::vector<double> predict_sample(const std::vector<int>&); std::vector<double> predict_sample(const std::vector<int>&);
std::vector<double> predict_sample(const torch::Tensor&); std::vector<double> predict_sample(const torch::Tensor&);
std::vector<double> exactInference(std::map<std::string, int>&); std::vector<double> exactInference(std::map<std::string, int>&);
double computeFactor(std::map<std::string, int>&); void completeFit(const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing);
void completeFit(const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights); void checkFitData(int n_samples, int n_features, int n_samples_y, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights);
void checkFitData(int n_features, int n_samples, int n_samples_y, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights);
void setStates(const std::map<std::string, std::vector<int>>&); void setStates(const std::map<std::string, std::vector<int>>&);
}; };
} }

View File

@@ -90,51 +90,60 @@ namespace bayesnet {
} }
return result; return result;
} }
void Node::computeCPT(const torch::Tensor& dataset, const std::vector<std::string>& features, const double laplaceSmoothing, const torch::Tensor& weights) void Node::computeCPT(const torch::Tensor& dataset, const std::vector<std::string>& features, const double smoothing, const torch::Tensor& weights)
{ {
dimensions.clear(); dimensions.clear();
dimensions.reserve(parents.size() + 1);
// Get dimensions of the CPT // Get dimensions of the CPT
dimensions.push_back(numStates); dimensions.push_back(numStates);
transform(parents.begin(), parents.end(), back_inserter(dimensions), [](const auto& parent) { return parent->getNumStates(); }); for (const auto& parent : parents) {
// Create a tensor of zeros with the dimensions of the CPT dimensions.push_back(parent->getNumStates());
cpTable = torch::zeros(dimensions, torch::kFloat) + laplaceSmoothing;
// Fill table with counts
auto pos = find(features.begin(), features.end(), name);
if (pos == features.end()) {
throw std::logic_error("Feature " + name + " not found in dataset");
} }
int name_index = pos - features.begin(); //transform(parents.begin(), parents.end(), back_inserter(dimensions), [](const auto& parent) { return parent->getNumStates(); });
// Create a tensor initialized with smoothing
cpTable = torch::full(dimensions, smoothing, torch::kDouble);
// Create a map for quick feature index lookup
std::unordered_map<std::string, int> featureIndexMap;
for (size_t i = 0; i < features.size(); ++i) {
featureIndexMap[features[i]] = i;
}
// Fill table with counts
// Get the index of this node's feature
int name_index = featureIndexMap[name];
// Get parent indices in dataset
std::vector<int> parent_indices;
parent_indices.reserve(parents.size());
for (const auto& parent : parents) {
parent_indices.push_back(featureIndexMap[parent->getName()]);
}
c10::List<c10::optional<at::Tensor>> coordinates;
for (int n_sample = 0; n_sample < dataset.size(1); ++n_sample) { for (int n_sample = 0; n_sample < dataset.size(1); ++n_sample) {
c10::List<c10::optional<at::Tensor>> coordinates; coordinates.clear();
coordinates.push_back(dataset.index({ name_index, n_sample })); auto sample = dataset.index({ "...", n_sample });
for (auto parent : parents) { coordinates.push_back(sample[name_index]);
pos = find(features.begin(), features.end(), parent->getName()); for (size_t i = 0; i < parent_indices.size(); ++i) {
if (pos == features.end()) { coordinates.push_back(sample[parent_indices[i]]);
throw std::logic_error("Feature parent " + parent->getName() + " not found in dataset");
}
int parent_index = pos - features.begin();
coordinates.push_back(dataset.index({ parent_index, n_sample }));
} }
// Increment the count of the corresponding coordinate // Increment the count of the corresponding coordinate
cpTable.index_put_({ coordinates }, cpTable.index({ coordinates }) + weights.index({ n_sample }).item<double>()); cpTable.index_put_({ coordinates }, weights.index({ n_sample }), true);
} }
// Normalize the counts // Normalize the counts (dividing each row by the sum of the row)
cpTable = cpTable / cpTable.sum(0); cpTable /= cpTable.sum(0, true);
} }
float Node::getFactorValue(std::map<std::string, int>& evidence) double Node::getFactorValue(std::map<std::string, int>& evidence)
{ {
c10::List<c10::optional<at::Tensor>> coordinates; c10::List<c10::optional<at::Tensor>> coordinates;
// following predetermined order of indices in the cpTable (see Node.h) // following predetermined order of indices in the cpTable (see Node.h)
coordinates.push_back(at::tensor(evidence[name])); coordinates.push_back(at::tensor(evidence[name]));
transform(parents.begin(), parents.end(), std::back_inserter(coordinates), [&evidence](const auto& parent) { return at::tensor(evidence[parent->getName()]); }); transform(parents.begin(), parents.end(), std::back_inserter(coordinates), [&evidence](const auto& parent) { return at::tensor(evidence[parent->getName()]); });
return cpTable.index({ coordinates }).item<float>(); return cpTable.index({ coordinates }).item<double>();
} }
std::vector<std::string> Node::graph(const std::string& className) std::vector<std::string> Node::graph(const std::string& className)
{ {
auto output = std::vector<std::string>(); auto output = std::vector<std::string>();
auto suffix = name == className ? ", fontcolor=red, fillcolor=lightblue, style=filled " : ""; auto suffix = name == className ? ", fontcolor=red, fillcolor=lightblue, style=filled " : "";
output.push_back(name + " [shape=circle" + suffix + "] \n"); output.push_back("\"" + name + "\" [shape=circle" + suffix + "] \n");
transform(children.begin(), children.end(), back_inserter(output), [this](const auto& child) { return name + " -> " + child->getName(); }); transform(children.begin(), children.end(), back_inserter(output), [this](const auto& child) { return "\"" + name + "\" -> \"" + child->getName() + "\""; });
return output; return output;
} }
} }

View File

@@ -23,12 +23,12 @@ namespace bayesnet {
std::vector<Node*>& getParents(); std::vector<Node*>& getParents();
std::vector<Node*>& getChildren(); std::vector<Node*>& getChildren();
torch::Tensor& getCPT(); torch::Tensor& getCPT();
void computeCPT(const torch::Tensor& dataset, const std::vector<std::string>& features, const double laplaceSmoothing, const torch::Tensor& weights); void computeCPT(const torch::Tensor& dataset, const std::vector<std::string>& features, const double smoothing, const torch::Tensor& weights);
int getNumStates() const; int getNumStates() const;
void setNumStates(int); void setNumStates(int);
unsigned minFill(); unsigned minFill();
std::vector<std::string> graph(const std::string& clasName); // Returns a std::vector of std::strings representing the graph in graphviz format std::vector<std::string> graph(const std::string& clasName); // Returns a std::vector of std::strings representing the graph in graphviz format
float getFactorValue(std::map<std::string, int>&); double getFactorValue(std::map<std::string, int>&);
private: private:
std::string name; std::string name;
std::vector<Node*> parents; std::vector<Node*> parents;

View File

@@ -0,0 +1,17 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef SMOOTHING_H
#define SMOOTHING_H
namespace bayesnet {
enum class Smoothing_t {
NONE = -1,
ORIGINAL = 0,
LAPLACE,
CESTNIK
};
}
#endif // SMOOTHING_H

View File

@@ -0,0 +1,54 @@
#ifndef COUNTING_SEMAPHORE_H
#define COUNTING_SEMAPHORE_H
#include <mutex>
#include <condition_variable>
#include <algorithm>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <thread>
class CountingSemaphore {
public:
static CountingSemaphore& getInstance()
{
static CountingSemaphore instance;
return instance;
}
// Delete copy constructor and assignment operator
CountingSemaphore(const CountingSemaphore&) = delete;
CountingSemaphore& operator=(const CountingSemaphore&) = delete;
void acquire()
{
std::unique_lock<std::mutex> lock(mtx_);
cv_.wait(lock, [this]() { return count_ > 0; });
--count_;
}
void release()
{
std::lock_guard<std::mutex> lock(mtx_);
++count_;
if (count_ <= max_count_) {
cv_.notify_one();
}
}
uint getCount() const
{
return count_;
}
uint getMaxCount() const
{
return max_count_;
}
private:
CountingSemaphore()
: max_count_(std::max(1u, static_cast<uint>(0.95 * std::thread::hardware_concurrency()))),
count_(max_count_)
{
}
std::mutex mtx_;
std::condition_variable cv_;
const uint max_count_;
uint count_;
};
#endif

View File

@@ -53,14 +53,14 @@ namespace bayesnet {
} }
} }
void insertElement(std::list<int>& variables, int variable) void MST::insertElement(std::list<int>& variables, int variable)
{ {
if (std::find(variables.begin(), variables.end(), variable) == variables.end()) { if (std::find(variables.begin(), variables.end(), variable) == variables.end()) {
variables.push_front(variable); variables.push_front(variable);
} }
} }
std::vector<std::pair<int, int>> reorder(std::vector<std::pair<float, std::pair<int, int>>> T, int root_original) std::vector<std::pair<int, int>> MST::reorder(std::vector<std::pair<float, std::pair<int, int>>> T, int root_original)
{ {
// Create the edges of a DAG from the MST // Create the edges of a DAG from the MST
// replacing unordered_set with list because unordered_set cannot guarantee the order of the elements inserted // replacing unordered_set with list because unordered_set cannot guarantee the order of the elements inserted

View File

@@ -14,6 +14,8 @@ namespace bayesnet {
public: public:
MST() = default; MST() = default;
MST(const std::vector<std::string>& features, const torch::Tensor& weights, const int root); MST(const std::vector<std::string>& features, const torch::Tensor& weights, const int root);
void insertElement(std::list<int>& variables, int variable);
std::vector<std::pair<int, int>> reorder(std::vector<std::pair<float, std::pair<int, int>>> T, int root_original);
std::vector<std::pair<int, int>> maximumSpanningTree(); std::vector<std::pair<int, int>> maximumSpanningTree();
private: private:
torch::Tensor weights; torch::Tensor weights;

View File

@@ -0,0 +1,51 @@
#ifndef TENSORUTILS_H
#define TENSORUTILS_H
#include <torch/torch.h>
#include <vector>
namespace bayesnet {
class TensorUtils {
public:
static std::vector<std::vector<int>> to_matrix(const torch::Tensor& X)
{
// Ensure tensor is contiguous in memory
auto X_contig = X.contiguous();
// Access tensor data pointer directly
auto data_ptr = X_contig.data_ptr<int>();
// IF you are using int64_t as the data type, use the following line
//auto data_ptr = X_contig.data_ptr<int64_t>();
//std::vector<std::vector<int64_t>> data(X.size(0), std::vector<int64_t>(X.size(1)));
// Prepare output container
std::vector<std::vector<int>> data(X.size(0), std::vector<int>(X.size(1)));
// Fill the 2D vector in a single loop using pointer arithmetic
int rows = X.size(0);
int cols = X.size(1);
for (int i = 0; i < rows; ++i) {
std::copy(data_ptr + i * cols, data_ptr + (i + 1) * cols, data[i].begin());
}
return data;
}
template <typename T>
static std::vector<T> to_vector(const torch::Tensor& y)
{
// Ensure the tensor is contiguous in memory
auto y_contig = y.contiguous();
// Access data pointer
auto data_ptr = y_contig.data_ptr<T>();
// Prepare output container
std::vector<T> data(y.size(0));
// Copy data efficiently
std::copy(data_ptr, data_ptr + y.size(0), data.begin());
return data;
}
};
}
#endif // TENSORUTILS_H

View File

@@ -137,7 +137,7 @@
include(CMakeParseArguments) include(CMakeParseArguments)
option(CODE_COVERAGE_VERBOSE "Verbose information" FALSE) option(CODE_COVERAGE_VERBOSE "Verbose information" TRUE)
# Check prereqs # Check prereqs
find_program( GCOV_PATH gcov ) find_program( GCOV_PATH gcov )
@@ -160,7 +160,11 @@ foreach(LANG ${LANGUAGES})
endif() endif()
elseif(NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "GNU" elseif(NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "GNU"
AND NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(LLVM)?[Ff]lang") AND NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(LLVM)?[Ff]lang")
message(FATAL_ERROR "Compiler is not GNU or Flang! Aborting...") if ("${LANG}" MATCHES "CUDA")
message(STATUS "Ignoring CUDA")
else()
message(FATAL_ERROR "Compiler is not GNU or Flang! Aborting...")
endif()
endif() endif()
endforeach() endforeach()

View File

@@ -1,36 +1,16 @@
@startuml @startuml
title clang-uml class diagram model title clang-uml class diagram model
class "bayesnet::Metrics" as C_0000736965376885623323 class "bayesnet::Node" as C_0010428199432536647474
class C_0000736965376885623323 #aliceblue;line:blue;line.dotted;text:blue { class C_0010428199432536647474 #aliceblue;line:blue;line.dotted;text:blue {
+Metrics() = default : void
+Metrics(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int classNumStates) : void
+Metrics(const std::vector<std::vector<int>> & vsamples, const std::vector<int> & labels, const std::vector<std::string> & features, const std::string & className, const int classNumStates) : void
..
+SelectKBestWeighted(const torch::Tensor & weights, bool ascending = false, unsigned int k = 0) : std::vector<int>
+conditionalEdge(const torch::Tensor & weights) : torch::Tensor
+conditionalEdgeWeights(std::vector<float> & weights) : std::vector<float>
#doCombinations<T>(const std::vector<T> & source) : std::vector<std::pair<T, T> >
#entropy(const torch::Tensor & feature, const torch::Tensor & weights) : double
+getScoresKBest() const : std::vector<double>
+maximumSpanningTree(const std::vector<std::string> & features, const torch::Tensor & weights, const int root) : std::vector<std::pair<int,int>>
+mutualInformation(const torch::Tensor & firstFeature, const torch::Tensor & secondFeature, const torch::Tensor & weights) : double
#pop_first<T>(std::vector<T> & v) : T
__
#className : std::string
#features : std::vector<std::string>
#samples : torch::Tensor
}
class "bayesnet::Node" as C_0001303524929067080934
class C_0001303524929067080934 #aliceblue;line:blue;line.dotted;text:blue {
+Node(const std::string &) : void +Node(const std::string &) : void
.. ..
+addChild(Node *) : void +addChild(Node *) : void
+addParent(Node *) : void +addParent(Node *) : void
+clear() : void +clear() : void
+computeCPT(const torch::Tensor & dataset, const std::vector<std::string> & features, const double laplaceSmoothing, const torch::Tensor & weights) : void +computeCPT(const torch::Tensor & dataset, const std::vector<std::string> & features, const double smoothing, const torch::Tensor & weights) : void
+getCPT() : torch::Tensor & +getCPT() : torch::Tensor &
+getChildren() : std::vector<Node *> & +getChildren() : std::vector<Node *> &
+getFactorValue(std::map<std::string,int> &) : float +getFactorValue(std::map<std::string,int> &) : double
+getName() const : std::string +getName() const : std::string
+getNumStates() const : int +getNumStates() const : int
+getParents() : std::vector<Node *> & +getParents() : std::vector<Node *> &
@@ -41,24 +21,29 @@ class C_0001303524929067080934 #aliceblue;line:blue;line.dotted;text:blue {
+setNumStates(int) : void +setNumStates(int) : void
__ __
} }
class "bayesnet::Network" as C_0001186707649890429575 enum "bayesnet::Smoothing_t" as C_0013393078277439680282
class C_0001186707649890429575 #aliceblue;line:blue;line.dotted;text:blue { enum C_0013393078277439680282 {
NONE
ORIGINAL
LAPLACE
CESTNIK
}
class "bayesnet::Network" as C_0009493661199123436603
class C_0009493661199123436603 #aliceblue;line:blue;line.dotted;text:blue {
+Network() : void +Network() : void
+Network(float) : void
+Network(const Network &) : void +Network(const Network &) : void
+~Network() = default : void +~Network() = default : void
.. ..
+addEdge(const std::string &, const std::string &) : void +addEdge(const std::string &, const std::string &) : void
+addNode(const std::string &) : void +addNode(const std::string &) : void
+dump_cpt() const : std::string +dump_cpt() const : std::string
+fit(const torch::Tensor & samples, const torch::Tensor & weights, const std::vector<std::string> & featureNames, const std::string & className, const std::map<std::string,std::vector<int>> & states) : void +fit(const torch::Tensor & samples, const torch::Tensor & weights, const std::vector<std::string> & featureNames, const std::string & className, const std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : void
+fit(const torch::Tensor & X, const torch::Tensor & y, const torch::Tensor & weights, const std::vector<std::string> & featureNames, const std::string & className, const std::map<std::string,std::vector<int>> & states) : void +fit(const torch::Tensor & X, const torch::Tensor & y, const torch::Tensor & weights, const std::vector<std::string> & featureNames, const std::string & className, const std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : void
+fit(const std::vector<std::vector<int>> & input_data, const std::vector<int> & labels, const std::vector<double> & weights, const std::vector<std::string> & featureNames, const std::string & className, const std::map<std::string,std::vector<int>> & states) : void +fit(const std::vector<std::vector<int>> & input_data, const std::vector<int> & labels, const std::vector<double> & weights, const std::vector<std::string> & featureNames, const std::string & className, const std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : void
+getClassName() const : std::string +getClassName() const : std::string
+getClassNumStates() const : int +getClassNumStates() const : int
+getEdges() const : std::vector<std::pair<std::string,std::string>> +getEdges() const : std::vector<std::pair<std::string,std::string>>
+getFeatures() const : std::vector<std::string> +getFeatures() const : std::vector<std::string>
+getMaxThreads() const : float
+getNodes() : std::map<std::string,std::unique_ptr<Node>> & +getNodes() : std::map<std::string,std::unique_ptr<Node>> &
+getNumEdges() const : int +getNumEdges() const : int
+getSamples() : torch::Tensor & +getSamples() : torch::Tensor &
@@ -76,21 +61,21 @@ class C_0001186707649890429575 #aliceblue;line:blue;line.dotted;text:blue {
+version() : std::string +version() : std::string
__ __
} }
enum "bayesnet::status_t" as C_0000738420730783851375 enum "bayesnet::status_t" as C_0005907365846270811004
enum C_0000738420730783851375 { enum C_0005907365846270811004 {
NORMAL NORMAL
WARNING WARNING
ERROR ERROR
} }
abstract "bayesnet::BaseClassifier" as C_0000327135989451974539 abstract "bayesnet::BaseClassifier" as C_0002617087915615796317
abstract C_0000327135989451974539 #aliceblue;line:blue;line.dotted;text:blue { abstract C_0002617087915615796317 #aliceblue;line:blue;line.dotted;text:blue {
+~BaseClassifier() = default : void +~BaseClassifier() = default : void
.. ..
{abstract} +dump_cpt() const = 0 : std::string {abstract} +dump_cpt() const = 0 : std::string
{abstract} +fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) = 0 : BaseClassifier & {abstract} +fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) = 0 : BaseClassifier &
{abstract} +fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) = 0 : BaseClassifier & {abstract} +fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) = 0 : BaseClassifier &
{abstract} +fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const torch::Tensor & weights) = 0 : BaseClassifier & {abstract} +fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const torch::Tensor & weights, const Smoothing_t smoothing) = 0 : BaseClassifier &
{abstract} +fit(std::vector<std::vector<int>> & X, std::vector<int> & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) = 0 : BaseClassifier & {abstract} +fit(std::vector<std::vector<int>> & X, std::vector<int> & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) = 0 : BaseClassifier &
{abstract} +getClassNumStates() const = 0 : int {abstract} +getClassNumStates() const = 0 : int
{abstract} +getNotes() const = 0 : std::vector<std::string> {abstract} +getNotes() const = 0 : std::vector<std::string>
{abstract} +getNumberOfEdges() const = 0 : int {abstract} +getNumberOfEdges() const = 0 : int
@@ -109,12 +94,37 @@ abstract C_0000327135989451974539 #aliceblue;line:blue;line.dotted;text:blue {
{abstract} +setHyperparameters(const nlohmann::json & hyperparameters) = 0 : void {abstract} +setHyperparameters(const nlohmann::json & hyperparameters) = 0 : void
{abstract} +show() const = 0 : std::vector<std::string> {abstract} +show() const = 0 : std::vector<std::string>
{abstract} +topological_order() = 0 : std::vector<std::string> {abstract} +topological_order() = 0 : std::vector<std::string>
{abstract} #trainModel(const torch::Tensor & weights) = 0 : void {abstract} #trainModel(const torch::Tensor & weights, const Smoothing_t smoothing) = 0 : void
__ __
#notes : std::vector<std::string>
#status : status_t
#validHyperparameters : std::vector<std::string> #validHyperparameters : std::vector<std::string>
} }
abstract "bayesnet::Classifier" as C_0002043996622900301644 class "bayesnet::Metrics" as C_0005895723015084986588
abstract C_0002043996622900301644 #aliceblue;line:blue;line.dotted;text:blue { class C_0005895723015084986588 #aliceblue;line:blue;line.dotted;text:blue {
+Metrics() = default : void
+Metrics(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int classNumStates) : void
+Metrics(const std::vector<std::vector<int>> & vsamples, const std::vector<int> & labels, const std::vector<std::string> & features, const std::string & className, const int classNumStates) : void
..
+SelectKBestWeighted(const torch::Tensor & weights, bool ascending = false, unsigned int k = 0) : std::vector<int>
+SelectKPairs(const torch::Tensor & weights, std::vector<int> & featuresExcluded, bool ascending = false, unsigned int k = 0) : std::vector<std::pair<int,int>>
+conditionalEdge(const torch::Tensor & weights) : torch::Tensor
+conditionalEntropy(const torch::Tensor & firstFeature, const torch::Tensor & secondFeature, const torch::Tensor & labels, const torch::Tensor & weights) : double
+conditionalMutualInformation(const torch::Tensor & firstFeature, const torch::Tensor & secondFeature, const torch::Tensor & labels, const torch::Tensor & weights) : double
#doCombinations<T>(const std::vector<T> & source) : std::vector<std::pair<T, T> >
+entropy(const torch::Tensor & feature, const torch::Tensor & weights) : double
+getScoresKBest() const : std::vector<double>
+getScoresKPairs() const : std::vector<std::pair<std::pair<int,int>,double>>
+maximumSpanningTree(const std::vector<std::string> & features, const torch::Tensor & weights, const int root) : std::vector<std::pair<int,int>>
+mutualInformation(const torch::Tensor & firstFeature, const torch::Tensor & secondFeature, const torch::Tensor & weights) : double
#pop_first<T>(std::vector<T> & v) : T
__
#className : std::string
#features : std::vector<std::string>
#samples : torch::Tensor
}
abstract "bayesnet::Classifier" as C_0016351972983202413152
abstract C_0016351972983202413152 #aliceblue;line:blue;line.dotted;text:blue {
+Classifier(Network model) : void +Classifier(Network model) : void
+~Classifier() = default : void +~Classifier() = default : void
.. ..
@@ -123,10 +133,10 @@ abstract C_0002043996622900301644 #aliceblue;line:blue;line.dotted;text:blue {
{abstract} #buildModel(const torch::Tensor & weights) = 0 : void {abstract} #buildModel(const torch::Tensor & weights) = 0 : void
#checkFitParameters() : void #checkFitParameters() : void
+dump_cpt() const : std::string +dump_cpt() const : std::string
+fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) : Classifier & +fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : Classifier &
+fit(std::vector<std::vector<int>> & X, std::vector<int> & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) : Classifier & +fit(std::vector<std::vector<int>> & X, std::vector<int> & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : Classifier &
+fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) : Classifier & +fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : Classifier &
+fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const torch::Tensor & weights) : Classifier & +fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const torch::Tensor & weights, const Smoothing_t smoothing) : Classifier &
+getClassNumStates() const : int +getClassNumStates() const : int
+getNotes() const : std::vector<std::string> +getNotes() const : std::vector<std::string>
+getNumberOfEdges() const : int +getNumberOfEdges() const : int
@@ -143,8 +153,9 @@ abstract C_0002043996622900301644 #aliceblue;line:blue;line.dotted;text:blue {
+setHyperparameters(const nlohmann::json & hyperparameters) : void +setHyperparameters(const nlohmann::json & hyperparameters) : void
+show() const : std::vector<std::string> +show() const : std::vector<std::string>
+topological_order() : std::vector<std::string> +topological_order() : std::vector<std::string>
#trainModel(const torch::Tensor & weights) : void #trainModel(const torch::Tensor & weights, const Smoothing_t smoothing) : void
__ __
#CLASSIFIER_NOT_FITTED : const std::string
#className : std::string #className : std::string
#dataset : torch::Tensor #dataset : torch::Tensor
#features : std::vector<std::string> #features : std::vector<std::string>
@@ -153,31 +164,10 @@ __
#metrics : Metrics #metrics : Metrics
#model : Network #model : Network
#n : unsigned int #n : unsigned int
#notes : std::vector<std::string>
#states : std::map<std::string,std::vector<int>> #states : std::map<std::string,std::vector<int>>
#status : status_t
} }
class "bayesnet::KDB" as C_0001112865019015250005 class "bayesnet::Proposal" as C_0017759964713298103839
class C_0001112865019015250005 #aliceblue;line:blue;line.dotted;text:blue { class C_0017759964713298103839 #aliceblue;line:blue;line.dotted;text:blue {
+KDB(int k, float theta = 0.03) : void
+~KDB() = default : void
..
#buildModel(const torch::Tensor & weights) : void
+graph(const std::string & name = "KDB") const : std::vector<std::string>
+setHyperparameters(const nlohmann::json & hyperparameters_) : void
__
}
class "bayesnet::TAN" as C_0001760994424884323017
class C_0001760994424884323017 #aliceblue;line:blue;line.dotted;text:blue {
+TAN() : void
+~TAN() = default : void
..
#buildModel(const torch::Tensor & weights) : void
+graph(const std::string & name = "TAN") const : std::vector<std::string>
__
}
class "bayesnet::Proposal" as C_0002219995589162262979
class C_0002219995589162262979 #aliceblue;line:blue;line.dotted;text:blue {
+Proposal(torch::Tensor & pDataset, std::vector<std::string> & features_, std::string & className_) : void +Proposal(torch::Tensor & pDataset, std::vector<std::string> & features_, std::string & className_) : void
+~Proposal() : void +~Proposal() : void
.. ..
@@ -190,74 +180,140 @@ __
#discretizers : map<std::string,mdlp::CPPFImdlp *> #discretizers : map<std::string,mdlp::CPPFImdlp *>
#y : torch::Tensor #y : torch::Tensor
} }
class "bayesnet::TANLd" as C_0001668829096702037834 class "bayesnet::KDB" as C_0008902920152122000044
class C_0001668829096702037834 #aliceblue;line:blue;line.dotted;text:blue { class C_0008902920152122000044 #aliceblue;line:blue;line.dotted;text:blue {
+TANLd() : void +KDB(int k, float theta = 0.03) : void
+~TANLd() = default : void +~KDB() = default : void
.. ..
+fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) : TANLd & #add_m_edges(int idx, std::vector<int> & S, torch::Tensor & weights) : void
+graph(const std::string & name = "TAN") const : std::vector<std::string> #buildModel(const torch::Tensor & weights) : void
+graph(const std::string & name = "KDB") const : std::vector<std::string>
+setHyperparameters(const nlohmann::json & hyperparameters_) : void
__
}
class "bayesnet::KDBLd" as C_0002756018222998454702
class C_0002756018222998454702 #aliceblue;line:blue;line.dotted;text:blue {
+KDBLd(int k) : void
+~KDBLd() = default : void
..
+fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : KDBLd &
+graph(const std::string & name = "KDB") const : std::vector<std::string>
+predict(torch::Tensor & X) : torch::Tensor +predict(torch::Tensor & X) : torch::Tensor
{static} +version() : std::string {static} +version() : std::string
__ __
} }
abstract "bayesnet::FeatureSelect" as C_0001695326193250580823 class "bayesnet::SPODE" as C_0004096182510460307610
abstract C_0001695326193250580823 #aliceblue;line:blue;line.dotted;text:blue { class C_0004096182510460307610 #aliceblue;line:blue;line.dotted;text:blue {
+FeatureSelect(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int maxFeatures, const int classNumStates, const torch::Tensor & weights) : void
+~FeatureSelect() : void
..
#computeMeritCFS() : double
#computeSuFeatures(const int a, const int b) : double
#computeSuLabels() : void
{abstract} +fit() = 0 : void
+getFeatures() const : std::vector<int>
+getScores() const : std::vector<double>
#initialize() : void
#symmetricalUncertainty(int a, int b) : double
__
#fitted : bool
#maxFeatures : int
#selectedFeatures : std::vector<int>
#selectedScores : std::vector<double>
#suFeatures : std::map<std::pair<int,int>,double>
#suLabels : std::vector<double>
#weights : const torch::Tensor &
}
class "bayesnet::CFS" as C_0000011627355691342494
class C_0000011627355691342494 #aliceblue;line:blue;line.dotted;text:blue {
+CFS(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int maxFeatures, const int classNumStates, const torch::Tensor & weights) : void
+~CFS() : void
..
+fit() : void
__
}
class "bayesnet::FCBF" as C_0000144682015341746929
class C_0000144682015341746929 #aliceblue;line:blue;line.dotted;text:blue {
+FCBF(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int maxFeatures, const int classNumStates, const torch::Tensor & weights, const double threshold) : void
+~FCBF() : void
..
+fit() : void
__
}
class "bayesnet::IWSS" as C_0000008268514674428553
class C_0000008268514674428553 #aliceblue;line:blue;line.dotted;text:blue {
+IWSS(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int maxFeatures, const int classNumStates, const torch::Tensor & weights, const double threshold) : void
+~IWSS() : void
..
+fit() : void
__
}
class "bayesnet::SPODE" as C_0000512022813807538451
class C_0000512022813807538451 #aliceblue;line:blue;line.dotted;text:blue {
+SPODE(int root) : void +SPODE(int root) : void
+~SPODE() = default : void +~SPODE() = default : void
.. ..
#buildModel(const torch::Tensor & weights) : void #buildModel(const torch::Tensor & weights) : void
+graph(const std::string & name = "SPODE") const : std::vector<std::string> +graph(const std::string & name = "SPODE") const : std::vector<std::string>
+setHyperparameters(const nlohmann::json & hyperparameters_) : void
__ __
} }
class "bayesnet::Ensemble" as C_0001985241386355360576 class "bayesnet::SPODELd" as C_0010957245114062042836
class C_0001985241386355360576 #aliceblue;line:blue;line.dotted;text:blue { class C_0010957245114062042836 #aliceblue;line:blue;line.dotted;text:blue {
+SPODELd(int root) : void
+~SPODELd() = default : void
..
+commonFit(const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : SPODELd &
+fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : SPODELd &
+fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : SPODELd &
+graph(const std::string & name = "SPODELd") const : std::vector<std::string>
+predict(torch::Tensor & X) : torch::Tensor
{static} +version() : std::string
__
}
class "bayesnet::SPnDE" as C_0016268916386101512883
class C_0016268916386101512883 #aliceblue;line:blue;line.dotted;text:blue {
+SPnDE(std::vector<int> parents) : void
+~SPnDE() = default : void
..
#buildModel(const torch::Tensor & weights) : void
+graph(const std::string & name = "SPnDE") const : std::vector<std::string>
__
}
class "bayesnet::TAN" as C_0014087955399074584137
class C_0014087955399074584137 #aliceblue;line:blue;line.dotted;text:blue {
+TAN() : void
+~TAN() = default : void
..
#buildModel(const torch::Tensor & weights) : void
+graph(const std::string & name = "TAN") const : std::vector<std::string>
+setHyperparameters(const nlohmann::json & hyperparameters_) : void
__
}
class "bayesnet::TANLd" as C_0013350632773616302678
class C_0013350632773616302678 #aliceblue;line:blue;line.dotted;text:blue {
+TANLd() : void
+~TANLd() = default : void
..
+fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states, const Smoothing_t smoothing) : TANLd &
+graph(const std::string & name = "TANLd") const : std::vector<std::string>
+predict(torch::Tensor & X) : torch::Tensor
__
}
class "bayesnet::XSp2de" as C_0007640742442325463418
class C_0007640742442325463418 #aliceblue;line:blue;line.dotted;text:blue {
+XSp2de(int spIndex1, int spIndex2) : void
..
#buildModel(const torch::Tensor & weights) : void
+fitx(torch::Tensor & X, torch::Tensor & y, torch::Tensor & weights_, const Smoothing_t smoothing) : void
+getClassNumStates() const : int
+getNFeatures() const : int
+getNumberOfEdges() const : int
+getNumberOfNodes() const : int
+getNumberOfStates() const : int
+graph(const std::string & title) const : std::vector<std::string>
+predict(const std::vector<int> & instance) const : int
+predict(std::vector<std::vector<int>> & test_data) : std::vector<int>
+predict(torch::Tensor & X) : torch::Tensor
+predict_proba(const std::vector<int> & instance) const : std::vector<double>
+predict_proba(std::vector<std::vector<int>> & test_data) : std::vector<std::vector<double>>
+predict_proba(torch::Tensor & X) : torch::Tensor
+score(std::vector<std::vector<int>> & X, std::vector<int> & y) : float
+score(torch::Tensor & X, torch::Tensor & y) : float
+setHyperparameters(const nlohmann::json & hyperparameters_) : void
+to_string() const : std::string
#trainModel(const torch::Tensor & weights, const bayesnet::Smoothing_t smoothing) : void
__
}
class "bayesnet::XSpode" as C_0015654113248178830206
class C_0015654113248178830206 #aliceblue;line:blue;line.dotted;text:blue {
+XSpode(int spIndex) : void
..
#buildModel(const torch::Tensor & weights) : void
+fitx(torch::Tensor & X, torch::Tensor & y, torch::Tensor & weights_, const Smoothing_t smoothing) : void
+getClassNumStates() const : int
+getNFeatures() const : int
+getNumberOfEdges() const : int
+getNumberOfNodes() const : int
+getNumberOfStates() const : int
+getStates() : std::vector<int> &
+graph(const std::string & title) const : std::vector<std::string>
+normalize(std::vector<double> & v) const : void
+predict(const std::vector<int> & instance) const : int
+predict(std::vector<std::vector<int>> & X) : std::vector<int>
+predict(torch::Tensor & X) : torch::Tensor
+predict_proba(std::vector<std::vector<int>> & X) : std::vector<std::vector<double>>
+predict_proba(torch::Tensor & X) : torch::Tensor
+predict_proba(const std::vector<int> & instance) const : std::vector<double>
+score(torch::Tensor & X, torch::Tensor & y) : float
+score(std::vector<std::vector<int>> & X, std::vector<int> & y) : float
+setHyperparameters(const nlohmann::json & hyperparameters_) : void
+to_string() const : std::string
#trainModel(const torch::Tensor & weights, const bayesnet::Smoothing_t smoothing) : void
__
}
class "bayesnet::TensorUtils" as C_0010304804115474100819
class C_0010304804115474100819 #aliceblue;line:blue;line.dotted;text:blue {
{static} +to_matrix(const torch::Tensor & X) : std::vector<std::vector<int>>
{static} +to_vector<T>(const torch::Tensor & y) : std::vector<T>
__
}
class "bayesnet::Ensemble" as C_0015881931090842884611
class C_0015881931090842884611 #aliceblue;line:blue;line.dotted;text:blue {
+Ensemble(bool predict_voting = true) : void +Ensemble(bool predict_voting = true) : void
+~Ensemble() = default : void +~Ensemble() = default : void
.. ..
@@ -280,7 +336,7 @@ class C_0001985241386355360576 #aliceblue;line:blue;line.dotted;text:blue {
+score(torch::Tensor & X, torch::Tensor & y) : float +score(torch::Tensor & X, torch::Tensor & y) : float
+show() const : std::vector<std::string> +show() const : std::vector<std::string>
+topological_order() : std::vector<std::string> +topological_order() : std::vector<std::string>
#trainModel(const torch::Tensor & weights) : void #trainModel(const torch::Tensor & weights, const Smoothing_t smoothing) : void
#voting(torch::Tensor & votes) : torch::Tensor #voting(torch::Tensor & votes) : torch::Tensor
__ __
#models : std::vector<std::unique_ptr<Classifier>> #models : std::vector<std::unique_ptr<Classifier>>
@@ -288,41 +344,244 @@ __
#predict_voting : bool #predict_voting : bool
#significanceModels : std::vector<double> #significanceModels : std::vector<double>
} }
class "bayesnet::(anonymous_45089536)" as C_0001186398587753535158 class "bayesnet::A2DE" as C_0001410789567057647859
class C_0001186398587753535158 #aliceblue;line:blue;line.dotted;text:blue { class C_0001410789567057647859 #aliceblue;line:blue;line.dotted;text:blue {
+A2DE(bool predict_voting = false) : void
+~A2DE() : void
..
#buildModel(const torch::Tensor & weights) : void
+graph(const std::string & title = "A2DE") const : std::vector<std::string>
+setHyperparameters(const nlohmann::json & hyperparameters) : void
__
}
class "bayesnet::AODE" as C_0006288892608974306258
class C_0006288892608974306258 #aliceblue;line:blue;line.dotted;text:blue {
+AODE(bool predict_voting = false) : void
+~AODE() : void
..
#buildModel(const torch::Tensor & weights) : void
+graph(const std::string & title = "AODE") const : std::vector<std::string>
+setHyperparameters(const nlohmann::json & hyperparameters) : void
__
}
class "bayesnet::AODELd" as C_0003898187834670349177
class C_0003898187834670349177 #aliceblue;line:blue;line.dotted;text:blue {
+AODELd(bool predict_voting = true) : void
+~AODELd() = default : void
..
#buildModel(const torch::Tensor & weights) : void
+fit(torch::Tensor & X_, torch::Tensor & y_, const std::vector<std::string> & features_, const std::string & className_, std::map<std::string,std::vector<int>> & states_, const Smoothing_t smoothing) : AODELd &
+graph(const std::string & name = "AODELd") const : std::vector<std::string>
#trainModel(const torch::Tensor & weights, const Smoothing_t smoothing) : void
__
}
abstract "bayesnet::FeatureSelect" as C_0013562609546004646591
abstract C_0013562609546004646591 #aliceblue;line:blue;line.dotted;text:blue {
+FeatureSelect(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int maxFeatures, const int classNumStates, const torch::Tensor & weights) : void
+~FeatureSelect() : void
..
#computeMeritCFS() : double
#computeSuFeatures(const int a, const int b) : double
#computeSuLabels() : void
{abstract} +fit() = 0 : void
+getFeatures() const : std::vector<int>
+getScores() const : std::vector<double>
#initialize() : void
#symmetricalUncertainty(int a, int b) : double
__
#fitted : bool
#maxFeatures : int
#selectedFeatures : std::vector<int>
#selectedScores : std::vector<double>
#suFeatures : std::map<std::pair<int,int>,double>
#suLabels : std::vector<double>
#weights : const torch::Tensor &
}
class "bayesnet::(anonymous_60357672)" as C_0006397015156479549697
class C_0006397015156479549697 #aliceblue;line:blue;line.dotted;text:blue {
__ __
+CFS : std::string +CFS : std::string
+FCBF : std::string +FCBF : std::string
+IWSS : std::string +IWSS : std::string
} }
class "bayesnet::(anonymous_45090163)" as C_0000602764946063116717 class "bayesnet::(anonymous_60358326)" as C_0013066254331852347304
class C_0000602764946063116717 #aliceblue;line:blue;line.dotted;text:blue { class C_0013066254331852347304 #aliceblue;line:blue;line.dotted;text:blue {
__ __
+ASC : std::string +ASC : std::string
+DESC : std::string +DESC : std::string
+RAND : std::string +RAND : std::string
} }
class "bayesnet::BoostAODE" as C_0000358471592399852382 class "bayesnet::Boost" as C_0009819322948617116148
class C_0000358471592399852382 #aliceblue;line:blue;line.dotted;text:blue { class C_0009819322948617116148 #aliceblue;line:blue;line.dotted;text:blue {
+Boost(bool predict_voting = false) : void
+~Boost() = default : void
..
#add_model(std::unique_ptr<Classifier> model, double significance) : void
#buildModel(const torch::Tensor & weights) : void
#featureSelection(torch::Tensor & weights_) : std::vector<int>
#remove_last_model() : void
+setHyperparameters(const nlohmann::json & hyperparameters_) : void
#update_weights(torch::Tensor & ytrain, torch::Tensor & ypred, torch::Tensor & weights) : std::tuple<torch::Tensor &,double,bool>
#update_weights_block(int k, torch::Tensor & ytrain, torch::Tensor & weights) : std::tuple<torch::Tensor &,double,bool>
__
#X_test : torch::Tensor
#X_train : torch::Tensor
#alpha_block : bool
#bisection : bool
#block_update : bool
#convergence : bool
#convergence_best : bool
#featureSelector : FeatureSelect *
#maxTolerance : int
#order_algorithm : std::string
#selectFeatures : bool
#select_features_algorithm : std::string
#threshold : double
#y_test : torch::Tensor
#y_train : torch::Tensor
}
class "bayesnet::BoostA2DE" as C_0000272055465257861326
class C_0000272055465257861326 #aliceblue;line:blue;line.dotted;text:blue {
+BoostA2DE(bool predict_voting = false) : void
+~BoostA2DE() = default : void
..
+graph(const std::string & title = "BoostA2DE") const : std::vector<std::string>
#trainModel(const torch::Tensor & weights, const Smoothing_t smoothing) : void
__
}
class "bayesnet::(anonymous_60425028)" as C_0000461144706913711531
class C_0000461144706913711531 #aliceblue;line:blue;line.dotted;text:blue {
__
+CFS : std::string
+FCBF : std::string
+IWSS : std::string
}
class "bayesnet::(anonymous_60425682)" as C_0014849589915262463453
class C_0014849589915262463453 #aliceblue;line:blue;line.dotted;text:blue {
__
+ASC : std::string
+DESC : std::string
+RAND : std::string
}
class "bayesnet::BoostAODE" as C_0002867772739198819061
class C_0002867772739198819061 #aliceblue;line:blue;line.dotted;text:blue {
+BoostAODE(bool predict_voting = false) : void +BoostAODE(bool predict_voting = false) : void
+~BoostAODE() = default : void +~BoostAODE() = default : void
.. ..
#buildModel(const torch::Tensor & weights) : void
+graph(const std::string & title = "BoostAODE") const : std::vector<std::string> +graph(const std::string & title = "BoostAODE") const : std::vector<std::string>
+setHyperparameters(const nlohmann::json & hyperparameters_) : void #trainModel(const torch::Tensor & weights, const Smoothing_t smoothing) : void
#trainModel(const torch::Tensor & weights) : void
__ __
} }
class "bayesnet::MST" as C_0000131858426172291700 class "bayesnet::XBA2DE" as C_0008480973840710001141
class C_0000131858426172291700 #aliceblue;line:blue;line.dotted;text:blue { class C_0008480973840710001141 #aliceblue;line:blue;line.dotted;text:blue {
+XBA2DE(bool predict_voting = false) : void
+~XBA2DE() = default : void
..
+getVersion() : std::string
+graph(const std::string & title = "XBA2DE") const : std::vector<std::string>
#trainModel(const torch::Tensor & weights, const Smoothing_t smoothing) : void
__
}
class "bayesnet::(anonymous_60414016)" as C_0008746994658440620779
class C_0008746994658440620779 #aliceblue;line:blue;line.dotted;text:blue {
__
+CFS : std::string
+FCBF : std::string
+IWSS : std::string
}
class "bayesnet::(anonymous_60414670)" as C_0008030559132212449356
class C_0008030559132212449356 #aliceblue;line:blue;line.dotted;text:blue {
__
+ASC : std::string
+DESC : std::string
+RAND : std::string
}
class "bayesnet::XBAODE" as C_0005198482342493966768
class C_0005198482342493966768 #aliceblue;line:blue;line.dotted;text:blue {
+XBAODE() : void
..
+getVersion() : std::string
#trainModel(const torch::Tensor & weights, const bayesnet::Smoothing_t smoothing) : void
__
}
class "bayesnet::CFS" as C_0000093018845530739957
class C_0000093018845530739957 #aliceblue;line:blue;line.dotted;text:blue {
+CFS(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int maxFeatures, const int classNumStates, const torch::Tensor & weights) : void
+~CFS() : void
..
+fit() : void
__
}
class "bayesnet::FCBF" as C_0001157456122733975432
class C_0001157456122733975432 #aliceblue;line:blue;line.dotted;text:blue {
+FCBF(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int maxFeatures, const int classNumStates, const torch::Tensor & weights, const double threshold) : void
+~FCBF() : void
..
+fit() : void
__
}
class "bayesnet::IWSS" as C_0000066148117395428429
class C_0000066148117395428429 #aliceblue;line:blue;line.dotted;text:blue {
+IWSS(const torch::Tensor & samples, const std::vector<std::string> & features, const std::string & className, const int maxFeatures, const int classNumStates, const torch::Tensor & weights, const double threshold) : void
+~IWSS() : void
..
+fit() : void
__
}
class "bayesnet::(anonymous_60810808)" as C_0012002108046995621535
class C_0012002108046995621535 #aliceblue;line:blue;line.dotted;text:blue {
__
+CFS : std::string
+FCBF : std::string
+IWSS : std::string
}
class "bayesnet::(anonymous_60811462)" as C_0004735044229422764240
class C_0004735044229422764240 #aliceblue;line:blue;line.dotted;text:blue {
__
+ASC : std::string
+DESC : std::string
+RAND : std::string
}
class "bayesnet::(anonymous_60804220)" as C_0007082100550474633839
class C_0007082100550474633839 #aliceblue;line:blue;line.dotted;text:blue {
__
+CFS : std::string
+FCBF : std::string
+IWSS : std::string
}
class "bayesnet::(anonymous_60804874)" as C_0003669430095936529648
class C_0003669430095936529648 #aliceblue;line:blue;line.dotted;text:blue {
__
+ASC : std::string
+DESC : std::string
+RAND : std::string
}
class "bayesnet::(anonymous_60809706)" as C_0012336951062058157227
class C_0012336951062058157227 #aliceblue;line:blue;line.dotted;text:blue {
__
+CFS : std::string
+FCBF : std::string
+IWSS : std::string
}
class "bayesnet::(anonymous_60810360)" as C_0002435892998884329673
class C_0002435892998884329673 #aliceblue;line:blue;line.dotted;text:blue {
__
+ASC : std::string
+DESC : std::string
+RAND : std::string
}
class "bayesnet::MST" as C_0001054867409378333602
class C_0001054867409378333602 #aliceblue;line:blue;line.dotted;text:blue {
+MST() = default : void +MST() = default : void
+MST(const std::vector<std::string> & features, const torch::Tensor & weights, const int root) : void +MST(const std::vector<std::string> & features, const torch::Tensor & weights, const int root) : void
.. ..
+insertElement(std::list<int> & variables, int variable) : void
+maximumSpanningTree() : std::vector<std::pair<int,int>> +maximumSpanningTree() : std::vector<std::pair<int,int>>
+reorder(std::vector<std::pair<float,std::pair<int,int>>> T, int root_original) : std::vector<std::pair<int,int>>
__ __
} }
class "bayesnet::Graph" as C_0001197041682001898467 class "bayesnet::Graph" as C_0009576333456015187741
class C_0001197041682001898467 #aliceblue;line:blue;line.dotted;text:blue { class C_0009576333456015187741 #aliceblue;line:blue;line.dotted;text:blue {
+Graph(int V) : void +Graph(int V) : void
.. ..
+addEdge(int u, int v, float wt) : void +addEdge(int u, int v, float wt) : void
@@ -332,81 +591,86 @@ class C_0001197041682001898467 #aliceblue;line:blue;line.dotted;text:blue {
+union_set(int u, int v) : void +union_set(int u, int v) : void
__ __
} }
class "bayesnet::KDBLd" as C_0000344502277874806837 C_0010428199432536647474 --> C_0010428199432536647474 : -parents
class C_0000344502277874806837 #aliceblue;line:blue;line.dotted;text:blue { C_0010428199432536647474 --> C_0010428199432536647474 : -children
+KDBLd(int k) : void C_0009493661199123436603 ..> C_0013393078277439680282
+~KDBLd() = default : void C_0009493661199123436603 o-- C_0010428199432536647474 : -nodes
.. C_0002617087915615796317 ..> C_0013393078277439680282
+fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) : KDBLd & C_0002617087915615796317 o-- C_0005907365846270811004 : #status
+graph(const std::string & name = "KDB") const : std::vector<std::string> C_0016351972983202413152 ..> C_0013393078277439680282
+predict(torch::Tensor & X) : torch::Tensor C_0016351972983202413152 ..> C_0005907365846270811004
{static} +version() : std::string C_0016351972983202413152 o-- C_0009493661199123436603 : #model
__ C_0016351972983202413152 o-- C_0005895723015084986588 : #metrics
} C_0002617087915615796317 <|-- C_0016351972983202413152
class "bayesnet::AODE" as C_0000786111576121788282
class C_0000786111576121788282 #aliceblue;line:blue;line.dotted;text:blue {
+AODE(bool predict_voting = false) : void
+~AODE() : void
..
#buildModel(const torch::Tensor & weights) : void
+graph(const std::string & title = "AODE") const : std::vector<std::string>
+setHyperparameters(const nlohmann::json & hyperparameters) : void
__
}
class "bayesnet::SPODELd" as C_0001369655639257755354
class C_0001369655639257755354 #aliceblue;line:blue;line.dotted;text:blue {
+SPODELd(int root) : void
+~SPODELd() = default : void
..
+commonFit(const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) : SPODELd &
+fit(torch::Tensor & X, torch::Tensor & y, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) : SPODELd &
+fit(torch::Tensor & dataset, const std::vector<std::string> & features, const std::string & className, std::map<std::string,std::vector<int>> & states) : SPODELd &
+graph(const std::string & name = "SPODE") const : std::vector<std::string>
+predict(torch::Tensor & X) : torch::Tensor
{static} +version() : std::string
__
}
class "bayesnet::AODELd" as C_0000487273479333793647
class C_0000487273479333793647 #aliceblue;line:blue;line.dotted;text:blue {
+AODELd(bool predict_voting = true) : void
+~AODELd() = default : void
..
#buildModel(const torch::Tensor & weights) : void
+fit(torch::Tensor & X_, torch::Tensor & y_, const std::vector<std::string> & features_, const std::string & className_, std::map<std::string,std::vector<int>> & states_) : AODELd &
+graph(const std::string & name = "AODELd") const : std::vector<std::string>
#trainModel(const torch::Tensor & weights) : void
__
}
C_0001303524929067080934 --> C_0001303524929067080934 : -parents
C_0001303524929067080934 --> C_0001303524929067080934 : -children
C_0001186707649890429575 o-- C_0001303524929067080934 : -nodes
C_0000327135989451974539 ..> C_0000738420730783851375
C_0002043996622900301644 o-- C_0001186707649890429575 : #model
C_0002043996622900301644 o-- C_0000736965376885623323 : #metrics
C_0002043996622900301644 o-- C_0000738420730783851375 : #status
C_0000327135989451974539 <|-- C_0002043996622900301644
C_0002043996622900301644 <|-- C_0001112865019015250005
C_0002043996622900301644 <|-- C_0001760994424884323017
C_0002219995589162262979 ..> C_0001186707649890429575
C_0001760994424884323017 <|-- C_0001668829096702037834
C_0002219995589162262979 <|-- C_0001668829096702037834
C_0000736965376885623323 <|-- C_0001695326193250580823
C_0001695326193250580823 <|-- C_0000011627355691342494
C_0001695326193250580823 <|-- C_0000144682015341746929
C_0001695326193250580823 <|-- C_0000008268514674428553
C_0002043996622900301644 <|-- C_0000512022813807538451
C_0001985241386355360576 o-- C_0002043996622900301644 : #models
C_0002043996622900301644 <|-- C_0001985241386355360576
C_0000358471592399852382 --> C_0001695326193250580823 : -featureSelector
C_0001985241386355360576 <|-- C_0000358471592399852382
C_0001112865019015250005 <|-- C_0000344502277874806837
C_0002219995589162262979 <|-- C_0000344502277874806837
C_0001985241386355360576 <|-- C_0000786111576121788282
C_0000512022813807538451 <|-- C_0001369655639257755354
C_0002219995589162262979 <|-- C_0001369655639257755354
C_0001985241386355360576 <|-- C_0000487273479333793647
C_0002219995589162262979 <|-- C_0000487273479333793647
'Generated with clang-uml, version 0.5.1 C_0017759964713298103839 ..> C_0009493661199123436603
'LLVM version clang version 17.0.6 (Fedora 17.0.6-2.fc39) C_0016351972983202413152 <|-- C_0008902920152122000044
C_0002756018222998454702 ..> C_0013393078277439680282
C_0008902920152122000044 <|-- C_0002756018222998454702
C_0017759964713298103839 <|-- C_0002756018222998454702
C_0016351972983202413152 <|-- C_0004096182510460307610
C_0010957245114062042836 ..> C_0013393078277439680282
C_0004096182510460307610 <|-- C_0010957245114062042836
C_0017759964713298103839 <|-- C_0010957245114062042836
C_0016351972983202413152 <|-- C_0016268916386101512883
C_0016351972983202413152 <|-- C_0014087955399074584137
C_0013350632773616302678 ..> C_0013393078277439680282
C_0014087955399074584137 <|-- C_0013350632773616302678
C_0017759964713298103839 <|-- C_0013350632773616302678
C_0007640742442325463418 ..> C_0013393078277439680282
C_0016351972983202413152 <|-- C_0007640742442325463418
C_0015654113248178830206 ..> C_0013393078277439680282
C_0016351972983202413152 <|-- C_0015654113248178830206
C_0015881931090842884611 ..> C_0013393078277439680282
C_0015881931090842884611 o-- C_0016351972983202413152 : #models
C_0016351972983202413152 <|-- C_0015881931090842884611
C_0015881931090842884611 <|-- C_0001410789567057647859
C_0015881931090842884611 <|-- C_0006288892608974306258
C_0003898187834670349177 ..> C_0013393078277439680282
C_0015881931090842884611 <|-- C_0003898187834670349177
C_0017759964713298103839 <|-- C_0003898187834670349177
C_0005895723015084986588 <|-- C_0013562609546004646591
C_0009819322948617116148 ..> C_0016351972983202413152
C_0009819322948617116148 --> C_0013562609546004646591 : #featureSelector
C_0015881931090842884611 <|-- C_0009819322948617116148
C_0000272055465257861326 ..> C_0013393078277439680282
C_0009819322948617116148 <|-- C_0000272055465257861326
C_0002867772739198819061 ..> C_0013393078277439680282
C_0009819322948617116148 <|-- C_0002867772739198819061
C_0008480973840710001141 ..> C_0013393078277439680282
C_0009819322948617116148 <|-- C_0008480973840710001141
C_0005198482342493966768 ..> C_0013393078277439680282
C_0009819322948617116148 <|-- C_0005198482342493966768
C_0013562609546004646591 <|-- C_0000093018845530739957
C_0013562609546004646591 <|-- C_0001157456122733975432
C_0013562609546004646591 <|-- C_0000066148117395428429
'Generated with clang-uml, version 0.5.5
'LLVM version clang version 18.1.8 (Fedora 18.1.8-5.fc41)
@enduml @enduml

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 139 KiB

After

Width:  |  Height:  |  Size: 229 KiB

View File

@@ -1,128 +1,314 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?> <?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" <!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"> "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Generated by graphviz version 8.1.0 (20230707.0739) <!-- Generated by graphviz version 12.1.0 (20240811.2233)
--> -->
<!-- Title: BayesNet Pages: 1 --> <!-- Title: BayesNet Pages: 1 -->
<svg width="1632pt" height="288pt" <svg width="3725pt" height="432pt"
viewBox="0.00 0.00 1631.95 287.80" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"> viewBox="0.00 0.00 3724.84 431.80" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 283.8)"> <g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 427.8)">
<title>BayesNet</title> <title>BayesNet</title>
<polygon fill="white" stroke="none" points="-4,4 -4,-283.8 1627.95,-283.8 1627.95,4 -4,4"/> <polygon fill="white" stroke="none" points="-4,4 -4,-427.8 3720.84,-427.8 3720.84,4 -4,4"/>
<!-- node1 --> <!-- node0 -->
<g id="node1" class="node"> <g id="node1" class="node">
<title>node0</title>
<polygon fill="none" stroke="black" points="1655.43,-398.35 1655.43,-413.26 1625.69,-423.8 1583.63,-423.8 1553.89,-413.26 1553.89,-398.35 1583.63,-387.8 1625.69,-387.8 1655.43,-398.35"/>
<text text-anchor="middle" x="1604.66" y="-401.53" font-family="Times,serif" font-size="12.00">BayesNet</text>
</g>
<!-- node1 -->
<g id="node2" class="node">
<title>node1</title> <title>node1</title>
<polygon fill="none" stroke="black" points="826.43,-254.35 826.43,-269.26 796.69,-279.8 754.63,-279.8 724.89,-269.26 724.89,-254.35 754.63,-243.8 796.69,-243.8 826.43,-254.35"/> <polygon fill="none" stroke="black" points="413.32,-257.8 372.39,-273.03 206.66,-279.8 40.93,-273.03 0,-257.8 114.69,-245.59 298.64,-245.59 413.32,-257.8"/>
<text text-anchor="middle" x="775.66" y="-257.53" font-family="Times,serif" font-size="12.00">BayesNet</text> <text text-anchor="middle" x="206.66" y="-257.53" font-family="Times,serif" font-size="12.00">/home/rmontanana/Code/libtorch/lib/libc10.so</text>
</g>
<!-- node0&#45;&gt;node1 -->
<g id="edge1" class="edge">
<title>node0&#45;&gt;node1</title>
<path fill="none" stroke="black" d="M1553.59,-400.53C1451.65,-391.91 1215.69,-371.61 1017.66,-351.8 773.36,-327.37 488.07,-295.22 329.31,-277.01"/>
<polygon fill="black" stroke="black" points="329.93,-273.56 319.6,-275.89 329.14,-280.51 329.93,-273.56"/>
</g> </g>
<!-- node2 --> <!-- node2 -->
<g id="node2" class="node"> <g id="node3" class="node">
<title>node2</title> <title>node2</title>
<polygon fill="none" stroke="black" points="413.32,-185.8 372.39,-201.03 206.66,-207.8 40.93,-201.03 0,-185.8 114.69,-173.59 298.64,-173.59 413.32,-185.8"/> <polygon fill="none" stroke="black" points="894.21,-257.8 848.35,-273.03 662.66,-279.8 476.98,-273.03 431.12,-257.8 559.61,-245.59 765.71,-245.59 894.21,-257.8"/>
<text text-anchor="middle" x="206.66" y="-185.53" font-family="Times,serif" font-size="12.00">/home/rmontanana/Code/libtorch/lib/libc10.so</text> <text text-anchor="middle" x="662.66" y="-257.53" font-family="Times,serif" font-size="12.00">/home/rmontanana/Code/libtorch/lib/libc10_cuda.so</text>
</g> </g>
<!-- node1&#45;&gt;node2 --> <!-- node0&#45;&gt;node2 -->
<g id="edge1" class="edge"> <g id="edge2" class="edge">
<title>node1&#45;&gt;node2</title> <title>node0&#45;&gt;node2</title>
<path fill="none" stroke="black" d="M724.41,-254.5C634.7,-243.46 447.04,-220.38 324.01,-205.24"/> <path fill="none" stroke="black" d="M1555.34,-397.37C1408.12,-375.18 969.52,-309.06 767.13,-278.55"/>
<polygon fill="black" stroke="black" points="324.77,-201.69 314.42,-203.94 323.92,-208.63 324.77,-201.69"/> <polygon fill="black" stroke="black" points="767.81,-275.12 757.4,-277.09 766.77,-282.04 767.81,-275.12"/>
</g> </g>
<!-- node3 --> <!-- node3 -->
<g id="node3" class="node"> <g id="node4" class="node">
<title>node3</title> <title>node3</title>
<polygon fill="none" stroke="black" points="857.68,-185.8 815.49,-201.03 644.66,-207.8 473.84,-201.03 431.65,-185.8 549.86,-173.59 739.46,-173.59 857.68,-185.8"/> <polygon fill="none" stroke="black" points="1338.68,-257.8 1296.49,-273.03 1125.66,-279.8 954.84,-273.03 912.65,-257.8 1030.86,-245.59 1220.46,-245.59 1338.68,-257.8"/>
<text text-anchor="middle" x="644.66" y="-185.53" font-family="Times,serif" font-size="12.00">/home/rmontanana/Code/libtorch/lib/libkineto.a</text> <text text-anchor="middle" x="1125.66" y="-257.53" font-family="Times,serif" font-size="12.00">/home/rmontanana/Code/libtorch/lib/libkineto.a</text>
</g> </g>
<!-- node1&#45;&gt;node3 --> <!-- node0&#45;&gt;node3 -->
<g id="edge2" class="edge"> <g id="edge3" class="edge">
<title>node1&#45;&gt;node3</title> <title>node0&#45;&gt;node3</title>
<path fill="none" stroke="black" d="M747.56,-245.79C729.21,-235.98 704.97,-223.03 684.63,-212.16"/> <path fill="none" stroke="black" d="M1566.68,-393.54C1484.46,-369.17 1289.3,-311.32 1188.44,-281.41"/>
<polygon fill="black" stroke="black" points="686.47,-208.64 676,-207.02 683.17,-214.82 686.47,-208.64"/> <polygon fill="black" stroke="black" points="1189.53,-278.09 1178.95,-278.6 1187.54,-284.8 1189.53,-278.09"/>
</g> </g>
<!-- node4 --> <!-- node4 -->
<g id="node4" class="node">
<title>node4</title>
<polygon fill="none" stroke="black" points="939.33,-182.35 939.33,-197.26 920.78,-207.8 894.54,-207.8 875.99,-197.26 875.99,-182.35 894.54,-171.8 920.78,-171.8 939.33,-182.35"/>
<text text-anchor="middle" x="907.66" y="-185.53" font-family="Times,serif" font-size="12.00">mdlp</text>
</g>
<!-- node1&#45;&gt;node4 -->
<g id="edge3" class="edge">
<title>node1&#45;&gt;node4</title>
<path fill="none" stroke="black" d="M803.66,-245.96C824.66,-234.82 853.45,-219.56 875.41,-207.91"/>
<polygon fill="black" stroke="black" points="876.78,-210.61 883.97,-202.84 873.5,-204.43 876.78,-210.61"/>
</g>
<!-- node9 -->
<g id="node5" class="node"> <g id="node5" class="node">
<title>node9</title> <title>node4</title>
<polygon fill="none" stroke="black" points="1107.74,-195.37 1032.66,-207.8 957.58,-195.37 986.26,-175.24 1079.06,-175.24 1107.74,-195.37"/> <polygon fill="none" stroke="black" points="1552.26,-257.8 1532.93,-273.03 1454.66,-279.8 1376.4,-273.03 1357.07,-257.8 1411.23,-245.59 1498.1,-245.59 1552.26,-257.8"/>
<text text-anchor="middle" x="1032.66" y="-185.53" font-family="Times,serif" font-size="12.00">torch_library</text> <text text-anchor="middle" x="1454.66" y="-257.53" font-family="Times,serif" font-size="12.00">/usr/lib64/libcuda.so</text>
</g> </g>
<!-- node1&#45;&gt;node9 --> <!-- node0&#45;&gt;node4 -->
<g id="edge4" class="edge"> <g id="edge4" class="edge">
<title>node1&#45;&gt;node9</title> <title>node0&#45;&gt;node4</title>
<path fill="none" stroke="black" d="M815.25,-250.02C860.25,-237.77 933.77,-217.74 982.68,-204.42"/> <path fill="none" stroke="black" d="M1586.27,-387.39C1559.5,-362.05 1509.72,-314.92 1479.65,-286.46"/>
<polygon fill="black" stroke="black" points="983.3,-207.61 992.02,-201.6 981.46,-200.85 983.3,-207.61"/> <polygon fill="black" stroke="black" points="1482.13,-283.99 1472.46,-279.65 1477.31,-289.07 1482.13,-283.99"/>
</g>
<!-- node10 -->
<g id="node6" class="node">
<title>node10</title>
<polygon fill="none" stroke="black" points="1159.81,-113.8 1086.89,-129.03 791.66,-135.8 496.43,-129.03 423.52,-113.8 627.82,-101.59 955.5,-101.59 1159.81,-113.8"/>
<text text-anchor="middle" x="791.66" y="-113.53" font-family="Times,serif" font-size="12.00">&#45;Wl,&#45;&#45;no&#45;as&#45;needed,&quot;/home/rmontanana/Code/libtorch/lib/libtorch.so&quot; &#45;Wl,&#45;&#45;as&#45;needed</text>
</g>
<!-- node9&#45;&gt;node10 -->
<g id="edge5" class="edge">
<title>node9&#45;&gt;node10</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M985.62,-175.14C949.2,-164.56 898.31,-149.78 857.79,-138.01"/>
<polygon fill="black" stroke="black" points="859.04,-134.44 848.46,-135.01 857.09,-141.16 859.04,-134.44"/>
</g> </g>
<!-- node5 --> <!-- node5 -->
<g id="node7" class="node"> <g id="node6" class="node">
<title>node5</title> <title>node5</title>
<polygon fill="none" stroke="black" points="1371.56,-123.37 1274.66,-135.8 1177.77,-123.37 1214.78,-103.24 1334.55,-103.24 1371.56,-123.37"/> <polygon fill="none" stroke="black" points="1873.26,-257.8 1843.23,-273.03 1721.66,-279.8 1600.09,-273.03 1570.06,-257.8 1654.19,-245.59 1789.13,-245.59 1873.26,-257.8"/>
<text text-anchor="middle" x="1274.66" y="-113.53" font-family="Times,serif" font-size="12.00">torch_cpu_library</text> <text text-anchor="middle" x="1721.66" y="-257.53" font-family="Times,serif" font-size="12.00">/usr/local/cuda/lib64/libcudart.so</text>
</g> </g>
<!-- node9&#45;&gt;node5 --> <!-- node0&#45;&gt;node5 -->
<g id="edge6" class="edge"> <g id="edge5" class="edge">
<title>node9&#45;&gt;node5</title> <title>node0&#45;&gt;node5</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M1079.61,-175.22C1120.66,-163.35 1180.2,-146.13 1222.68,-133.84"/> <path fill="none" stroke="black" d="M1619.76,-387.77C1628.83,-377.46 1640.53,-363.98 1650.66,-351.8 1668.32,-330.59 1687.84,-306.03 1701.94,-288.1"/>
<polygon fill="black" stroke="black" points="1223.46,-136.97 1232.09,-130.83 1221.51,-130.24 1223.46,-136.97"/> <polygon fill="black" stroke="black" points="1704.43,-290.59 1707.84,-280.56 1698.92,-286.27 1704.43,-290.59"/>
</g> </g>
<!-- node6 --> <!-- node6 -->
<g id="node8" class="node"> <g id="node7" class="node">
<title>node6</title> <title>node6</title>
<polygon fill="none" stroke="black" points="1191.4,-27.9 1114.6,-43.12 803.66,-49.9 492.72,-43.12 415.93,-27.9 631.1,-15.68 976.22,-15.68 1191.4,-27.9"/> <polygon fill="none" stroke="black" points="2231.79,-257.8 2198.1,-273.03 2061.66,-279.8 1925.23,-273.03 1891.53,-257.8 1985.95,-245.59 2137.38,-245.59 2231.79,-257.8"/>
<text text-anchor="middle" x="803.66" y="-27.63" font-family="Times,serif" font-size="12.00">&#45;Wl,&#45;&#45;no&#45;as&#45;needed,&quot;/home/rmontanana/Code/libtorch/lib/libtorch_cpu.so&quot; &#45;Wl,&#45;&#45;as&#45;needed</text> <text text-anchor="middle" x="2061.66" y="-257.53" font-family="Times,serif" font-size="12.00">/usr/local/cuda/lib64/libnvToolsExt.so</text>
</g> </g>
<!-- node5&#45;&gt;node6 --> <!-- node0&#45;&gt;node6 -->
<g id="edge7" class="edge"> <g id="edge6" class="edge">
<title>node5&#45;&gt;node6</title> <title>node0&#45;&gt;node6</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M1210.16,-105.31C1130.55,-91.13 994.37,-66.87 901.77,-50.38"/> <path fill="none" stroke="black" d="M1642.06,-393.18C1721.31,-368.56 1906.71,-310.95 2002.32,-281.24"/>
<polygon fill="black" stroke="black" points="902.44,-46.77 891.98,-48.46 901.22,-53.66 902.44,-46.77"/> <polygon fill="black" stroke="black" points="2003.28,-284.61 2011.79,-278.3 2001.21,-277.92 2003.28,-284.61"/>
</g> </g>
<!-- node7 --> <!-- node7 -->
<g id="node9" class="node"> <g id="node8" class="node">
<title>node7</title> <title>node7</title>
<polygon fill="none" stroke="black" points="1339.72,-37.46 1274.66,-49.9 1209.61,-37.46 1234.46,-17.34 1314.87,-17.34 1339.72,-37.46"/> <polygon fill="none" stroke="black" points="2541.44,-257.8 2512.56,-273.03 2395.66,-279.8 2278.76,-273.03 2249.89,-257.8 2330.79,-245.59 2460.54,-245.59 2541.44,-257.8"/>
<text text-anchor="middle" x="1274.66" y="-27.63" font-family="Times,serif" font-size="12.00">caffe2::mkl</text> <text text-anchor="middle" x="2395.66" y="-257.53" font-family="Times,serif" font-size="12.00">/usr/local/cuda/lib64/libnvrtc.so</text>
</g> </g>
<!-- node5&#45;&gt;node7 --> <!-- node0&#45;&gt;node7 -->
<g id="edge8" class="edge"> <g id="edge7" class="edge">
<title>node5&#45;&gt;node7</title> <title>node0&#45;&gt;node7</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M1274.66,-102.95C1274.66,-91.56 1274.66,-75.07 1274.66,-60.95"/> <path fill="none" stroke="black" d="M1651.19,-396.45C1780.36,-373.26 2144.76,-307.85 2311.05,-277.99"/>
<polygon fill="black" stroke="black" points="1278.16,-61.27 1274.66,-51.27 1271.16,-61.27 1278.16,-61.27"/> <polygon fill="black" stroke="black" points="2311.47,-281.47 2320.7,-276.26 2310.24,-274.58 2311.47,-281.47"/>
</g> </g>
<!-- node8 --> <!-- node8 -->
<g id="node10" class="node"> <g id="node9" class="node">
<title>node8</title> <title>node8</title>
<polygon fill="none" stroke="black" points="1623.95,-41.76 1490.66,-63.8 1357.37,-41.76 1408.28,-6.09 1573.04,-6.09 1623.95,-41.76"/> <polygon fill="none" stroke="black" points="1642.01,-326.35 1642.01,-341.26 1620.13,-351.8 1589.19,-351.8 1567.31,-341.26 1567.31,-326.35 1589.19,-315.8 1620.13,-315.8 1642.01,-326.35"/>
<text text-anchor="middle" x="1490.66" y="-34.75" font-family="Times,serif" font-size="12.00">dummy</text> <text text-anchor="middle" x="1604.66" y="-329.53" font-family="Times,serif" font-size="12.00">fimdlp</text>
<text text-anchor="middle" x="1490.66" y="-20.5" font-family="Times,serif" font-size="12.00">(protobuf::libprotobuf)</text>
</g> </g>
<!-- node5&#45;&gt;node8 --> <!-- node0&#45;&gt;node8 -->
<g id="edge8" class="edge">
<title>node0&#45;&gt;node8</title>
<path fill="none" stroke="black" d="M1604.66,-387.5C1604.66,-380.21 1604.66,-371.53 1604.66,-363.34"/>
<polygon fill="black" stroke="black" points="1608.16,-363.42 1604.66,-353.42 1601.16,-363.42 1608.16,-363.42"/>
</g>
<!-- node19 -->
<g id="node10" class="node">
<title>node19</title>
<polygon fill="none" stroke="black" points="2709.74,-267.37 2634.66,-279.8 2559.58,-267.37 2588.26,-247.24 2681.06,-247.24 2709.74,-267.37"/>
<text text-anchor="middle" x="2634.66" y="-257.53" font-family="Times,serif" font-size="12.00">torch_library</text>
</g>
<!-- node0&#45;&gt;node19 -->
<g id="edge29" class="edge">
<title>node0&#45;&gt;node19</title>
<path fill="none" stroke="black" d="M1655.87,-399.32C1798.23,-383.79 2210.64,-336.94 2550.66,-279.8 2559.43,-278.33 2568.68,-276.62 2577.72,-274.86"/>
<polygon fill="black" stroke="black" points="2578.38,-278.3 2587.5,-272.92 2577.01,-271.43 2578.38,-278.3"/>
</g>
<!-- node8&#45;&gt;node1 -->
<g id="edge9" class="edge"> <g id="edge9" class="edge">
<title>node5&#45;&gt;node8</title> <title>node8&#45;&gt;node1</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M1310.82,-102.76C1341.68,-90.77 1386.88,-73.21 1424.25,-58.7"/> <path fill="none" stroke="black" d="M1566.84,-331.58C1419.81,-326.72 872.06,-307.69 421.66,-279.8 401.07,-278.53 379.38,-277.02 358.03,-275.43"/>
<polygon fill="black" stroke="black" points="1425.01,-61.77 1433.06,-54.89 1422.47,-55.25 1425.01,-61.77"/> <polygon fill="black" stroke="black" points="358.3,-271.94 348.06,-274.67 357.77,-278.92 358.3,-271.94"/>
</g>
<!-- node8&#45;&gt;node2 -->
<g id="edge10" class="edge">
<title>node8&#45;&gt;node2</title>
<path fill="none" stroke="black" d="M1566.86,-330C1445.11,-320.95 1057.97,-292.18 831.67,-275.36"/>
<polygon fill="black" stroke="black" points="832.09,-271.89 821.86,-274.63 831.57,-278.87 832.09,-271.89"/>
</g>
<!-- node8&#45;&gt;node3 -->
<g id="edge11" class="edge">
<title>node8&#45;&gt;node3</title>
<path fill="none" stroke="black" d="M1567.08,-327.31C1495.4,-316.84 1336.86,-293.67 1230.62,-278.14"/>
<polygon fill="black" stroke="black" points="1231.44,-274.72 1221.04,-276.74 1230.42,-281.65 1231.44,-274.72"/>
</g>
<!-- node8&#45;&gt;node4 -->
<g id="edge12" class="edge">
<title>node8&#45;&gt;node4</title>
<path fill="none" stroke="black" d="M1578.53,-320.61C1555.96,-310.08 1522.92,-294.66 1496.64,-282.4"/>
<polygon fill="black" stroke="black" points="1498.12,-279.22 1487.58,-278.17 1495.16,-285.57 1498.12,-279.22"/>
</g>
<!-- node8&#45;&gt;node5 -->
<g id="edge13" class="edge">
<title>node8&#45;&gt;node5</title>
<path fill="none" stroke="black" d="M1627.78,-318.97C1644.15,-309.18 1666.44,-295.84 1685.2,-284.62"/>
<polygon fill="black" stroke="black" points="1686.83,-287.73 1693.61,-279.59 1683.23,-281.72 1686.83,-287.73"/>
</g>
<!-- node8&#45;&gt;node6 -->
<g id="edge14" class="edge">
<title>node8&#45;&gt;node6</title>
<path fill="none" stroke="black" d="M1642.45,-327.02C1712.36,-316.31 1863.89,-293.1 1964.32,-277.71"/>
<polygon fill="black" stroke="black" points="1964.84,-281.18 1974.2,-276.2 1963.78,-274.26 1964.84,-281.18"/>
</g>
<!-- node8&#45;&gt;node7 -->
<g id="edge15" class="edge">
<title>node8&#45;&gt;node7</title>
<path fill="none" stroke="black" d="M1642.33,-330.01C1740.75,-322.64 2013.75,-301.7 2240.66,-279.8 2254.16,-278.5 2268.32,-277.06 2282.35,-275.58"/>
<polygon fill="black" stroke="black" points="2282.49,-279.08 2292.06,-274.54 2281.75,-272.12 2282.49,-279.08"/>
</g>
<!-- node8&#45;&gt;node19 -->
<g id="edge16" class="edge">
<title>node8&#45;&gt;node19</title>
<path fill="none" stroke="black" d="M1642.25,-332.63C1770.06,-331.64 2199.48,-324.94 2550.66,-279.8 2560.1,-278.59 2570.07,-276.92 2579.71,-275.1"/>
<polygon fill="black" stroke="black" points="2580.21,-278.57 2589.34,-273.21 2578.86,-271.7 2580.21,-278.57"/>
</g>
<!-- node20 -->
<g id="node11" class="node">
<title>node20</title>
<polygon fill="none" stroke="black" points="2606.81,-185.8 2533.89,-201.03 2238.66,-207.8 1943.43,-201.03 1870.52,-185.8 2074.82,-173.59 2402.5,-173.59 2606.81,-185.8"/>
<text text-anchor="middle" x="2238.66" y="-185.53" font-family="Times,serif" font-size="12.00">&#45;Wl,&#45;&#45;no&#45;as&#45;needed,&quot;/home/rmontanana/Code/libtorch/lib/libtorch.so&quot; &#45;Wl,&#45;&#45;as&#45;needed</text>
</g>
<!-- node19&#45;&gt;node20 -->
<g id="edge17" class="edge">
<title>node19&#45;&gt;node20</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2583.63,-250.21C2572.76,-248.03 2561.34,-245.79 2550.66,-243.8 2482.14,-231.05 2404.92,-217.93 2344.44,-207.93"/>
<polygon fill="black" stroke="black" points="2345.28,-204.52 2334.84,-206.34 2344.14,-211.42 2345.28,-204.52"/>
</g>
<!-- node9 -->
<g id="node12" class="node">
<title>node9</title>
<polygon fill="none" stroke="black" points="2542.56,-123.37 2445.66,-135.8 2348.77,-123.37 2385.78,-103.24 2505.55,-103.24 2542.56,-123.37"/>
<text text-anchor="middle" x="2445.66" y="-113.53" font-family="Times,serif" font-size="12.00">torch_cpu_library</text>
</g>
<!-- node19&#45;&gt;node9 -->
<g id="edge18" class="edge">
<title>node19&#45;&gt;node9</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2635.72,-246.84C2636.4,-227.49 2634.61,-192.58 2615.66,-171.8 2601.13,-155.87 2551.93,-141.56 2510.18,-131.84"/>
<polygon fill="black" stroke="black" points="2511.2,-128.48 2500.67,-129.68 2509.65,-135.31 2511.2,-128.48"/>
</g>
<!-- node13 -->
<g id="node16" class="node">
<title>node13</title>
<polygon fill="none" stroke="black" points="3056.45,-195.37 2953.66,-207.8 2850.87,-195.37 2890.13,-175.24 3017.19,-175.24 3056.45,-195.37"/>
<text text-anchor="middle" x="2953.66" y="-185.53" font-family="Times,serif" font-size="12.00">torch_cuda_library</text>
</g>
<!-- node19&#45;&gt;node13 -->
<g id="edge22" class="edge">
<title>node19&#45;&gt;node13</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2685.21,-249.71C2741.11,-237.45 2831.21,-217.67 2891.42,-204.46"/>
<polygon fill="black" stroke="black" points="2891.8,-207.96 2900.82,-202.4 2890.3,-201.13 2891.8,-207.96"/>
</g>
<!-- node10 -->
<g id="node13" class="node">
<title>node10</title>
<polygon fill="none" stroke="black" points="2362.4,-27.9 2285.6,-43.12 1974.66,-49.9 1663.72,-43.12 1586.93,-27.9 1802.1,-15.68 2147.22,-15.68 2362.4,-27.9"/>
<text text-anchor="middle" x="1974.66" y="-27.63" font-family="Times,serif" font-size="12.00">&#45;Wl,&#45;&#45;no&#45;as&#45;needed,&quot;/home/rmontanana/Code/libtorch/lib/libtorch_cpu.so&quot; &#45;Wl,&#45;&#45;as&#45;needed</text>
</g>
<!-- node9&#45;&gt;node10 -->
<g id="edge19" class="edge">
<title>node9&#45;&gt;node10</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2381.16,-105.31C2301.63,-91.15 2165.65,-66.92 2073.05,-50.43"/>
<polygon fill="black" stroke="black" points="2073.93,-47.03 2063.48,-48.72 2072.71,-53.92 2073.93,-47.03"/>
</g>
<!-- node11 -->
<g id="node14" class="node">
<title>node11</title>
<polygon fill="none" stroke="black" points="2510.72,-37.46 2445.66,-49.9 2380.61,-37.46 2405.46,-17.34 2485.87,-17.34 2510.72,-37.46"/>
<text text-anchor="middle" x="2445.66" y="-27.63" font-family="Times,serif" font-size="12.00">caffe2::mkl</text>
</g>
<!-- node9&#45;&gt;node11 -->
<g id="edge20" class="edge">
<title>node9&#45;&gt;node11</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2445.66,-102.95C2445.66,-91.68 2445.66,-75.4 2445.66,-61.37"/>
<polygon fill="black" stroke="black" points="2449.16,-61.78 2445.66,-51.78 2442.16,-61.78 2449.16,-61.78"/>
</g>
<!-- node12 -->
<g id="node15" class="node">
<title>node12</title>
<polygon fill="none" stroke="black" points="2794.95,-41.76 2661.66,-63.8 2528.37,-41.76 2579.28,-6.09 2744.04,-6.09 2794.95,-41.76"/>
<text text-anchor="middle" x="2661.66" y="-34.75" font-family="Times,serif" font-size="12.00">dummy</text>
<text text-anchor="middle" x="2661.66" y="-20.5" font-family="Times,serif" font-size="12.00">(protobuf::libprotobuf)</text>
</g>
<!-- node9&#45;&gt;node12 -->
<g id="edge21" class="edge">
<title>node9&#45;&gt;node12</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2481.82,-102.76C2512.55,-90.82 2557.5,-73.36 2594.77,-58.89"/>
<polygon fill="black" stroke="black" points="2595.6,-62.32 2603.65,-55.44 2593.06,-55.79 2595.6,-62.32"/>
</g>
<!-- node13&#45;&gt;node9 -->
<g id="edge28" class="edge">
<title>node13&#45;&gt;node9</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2880.59,-179.79C2799.97,-169.71 2666.42,-152.57 2551.66,-135.8 2540.2,-134.13 2528.06,-132.27 2516.24,-130.41"/>
<polygon fill="black" stroke="black" points="2516.96,-126.98 2506.54,-128.86 2515.87,-133.89 2516.96,-126.98"/>
</g>
<!-- node14 -->
<g id="node17" class="node">
<title>node14</title>
<polygon fill="none" stroke="black" points="3346.69,-113.8 3268.85,-129.03 2953.66,-135.8 2638.48,-129.03 2560.63,-113.8 2778.75,-101.59 3128.58,-101.59 3346.69,-113.8"/>
<text text-anchor="middle" x="2953.66" y="-113.53" font-family="Times,serif" font-size="12.00">&#45;Wl,&#45;&#45;no&#45;as&#45;needed,&quot;/home/rmontanana/Code/libtorch/lib/libtorch_cuda.so&quot; &#45;Wl,&#45;&#45;as&#45;needed</text>
</g>
<!-- node13&#45;&gt;node14 -->
<g id="edge23" class="edge">
<title>node13&#45;&gt;node14</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M2953.66,-174.97C2953.66,-167.13 2953.66,-157.01 2953.66,-147.53"/>
<polygon fill="black" stroke="black" points="2957.16,-147.59 2953.66,-137.59 2950.16,-147.59 2957.16,-147.59"/>
</g>
<!-- node15 -->
<g id="node18" class="node">
<title>node15</title>
<polygon fill="none" stroke="black" points="3514.74,-123.37 3439.66,-135.8 3364.58,-123.37 3393.26,-103.24 3486.06,-103.24 3514.74,-123.37"/>
<text text-anchor="middle" x="3439.66" y="-113.53" font-family="Times,serif" font-size="12.00">torch::cudart</text>
</g>
<!-- node13&#45;&gt;node15 -->
<g id="edge24" class="edge">
<title>node13&#45;&gt;node15</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M3028.35,-180.51C3109.24,-171.17 3241.96,-154.78 3355.66,-135.8 3364.43,-134.34 3373.69,-132.63 3382.72,-130.88"/>
<polygon fill="black" stroke="black" points="3383.38,-134.31 3392.51,-128.93 3382.02,-127.45 3383.38,-134.31"/>
</g>
<!-- node17 -->
<g id="node20" class="node">
<title>node17</title>
<polygon fill="none" stroke="black" points="3716.84,-123.37 3624.66,-135.8 3532.48,-123.37 3567.69,-103.24 3681.63,-103.24 3716.84,-123.37"/>
<text text-anchor="middle" x="3624.66" y="-113.53" font-family="Times,serif" font-size="12.00">torch::nvtoolsext</text>
</g>
<!-- node13&#45;&gt;node17 -->
<g id="edge26" class="edge">
<title>node13&#45;&gt;node17</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M3033.64,-183.25C3144.1,-175.14 3349.47,-158.53 3523.66,-135.8 3534.84,-134.35 3546.67,-132.57 3558.15,-130.72"/>
<polygon fill="black" stroke="black" points="3558.68,-134.18 3567.98,-129.1 3557.54,-127.27 3558.68,-134.18"/>
</g>
<!-- node16 -->
<g id="node19" class="node">
<title>node16</title>
<polygon fill="none" stroke="black" points="3510.78,-27.9 3496.7,-43.12 3439.66,-49.9 3382.63,-43.12 3368.54,-27.9 3408.01,-15.68 3471.31,-15.68 3510.78,-27.9"/>
<text text-anchor="middle" x="3439.66" y="-27.63" font-family="Times,serif" font-size="12.00">CUDA::cudart</text>
</g>
<!-- node15&#45;&gt;node16 -->
<g id="edge25" class="edge">
<title>node15&#45;&gt;node16</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M3439.66,-102.95C3439.66,-91.68 3439.66,-75.4 3439.66,-61.37"/>
<polygon fill="black" stroke="black" points="3443.16,-61.78 3439.66,-51.78 3436.16,-61.78 3443.16,-61.78"/>
</g>
<!-- node18 -->
<g id="node21" class="node">
<title>node18</title>
<polygon fill="none" stroke="black" points="3714.32,-27.9 3696.56,-43.12 3624.66,-49.9 3552.77,-43.12 3535.01,-27.9 3584.76,-15.68 3664.56,-15.68 3714.32,-27.9"/>
<text text-anchor="middle" x="3624.66" y="-27.63" font-family="Times,serif" font-size="12.00">CUDA::nvToolsExt</text>
</g>
<!-- node17&#45;&gt;node18 -->
<g id="edge27" class="edge">
<title>node17&#45;&gt;node18</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M3624.66,-102.95C3624.66,-91.68 3624.66,-75.4 3624.66,-61.37"/>
<polygon fill="black" stroke="black" points="3628.16,-61.78 3624.66,-51.78 3621.16,-61.78 3628.16,-61.78"/>
</g> </g>
</g> </g>
</svg> </svg>

Before

Width:  |  Height:  |  Size: 7.1 KiB

After

Width:  |  Height:  |  Size: 18 KiB

Submodule lib/catch2 deleted from 029fe3b460

Submodule lib/folding deleted from 2ac43e32ac

Submodule lib/json deleted from 8c391e04fe

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Submodule lib/mdlp deleted from 236d1b2f8b

View File

@@ -4,16 +4,19 @@ project(bayesnet_sample)
set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD 17)
find_package(Torch REQUIRED) find_package(Torch CONFIG REQUIRED)
find_library(BayesNet NAMES BayesNet.a libBayesNet.a REQUIRED) find_package(bayesnet CONFIG REQUIRED)
find_package(fimdlp CONFIG REQUIRED)
find_package(folding CONFIG REQUIRED)
find_package(arff-files CONFIG REQUIRED)
find_package(nlohman_json CONFIG REQUIRED)
include_directories(
lib/Files
lib/mdlp
lib/json/include
/usr/local/include
)
add_subdirectory(lib/mdlp)
add_executable(bayesnet_sample sample.cc) add_executable(bayesnet_sample sample.cc)
target_link_libraries(bayesnet_sample mdlp "${TORCH_LIBRARIES}" "${BayesNet}") target_link_libraries(bayesnet_sample PRIVATE
fimdlp::fimdlp
arff-files::arff-files
"${TORCH_LIBRARIES}"
bayesnet::bayesnet
nlohmann_json::nlohmann_json
folding::folding
)

View File

@@ -1,55 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <utility>
#include <nlohmann/detail/abi_macros.hpp>
#include <nlohmann/detail/conversions/from_json.hpp>
#include <nlohmann/detail/conversions/to_json.hpp>
#include <nlohmann/detail/meta/identity_tag.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
/// @sa https://json.nlohmann.me/api/adl_serializer/
template<typename ValueType, typename>
struct adl_serializer
{
/// @brief convert a JSON value to any value type
/// @sa https://json.nlohmann.me/api/adl_serializer/from_json/
template<typename BasicJsonType, typename TargetType = ValueType>
static auto from_json(BasicJsonType && j, TargetType& val) noexcept(
noexcept(::nlohmann::from_json(std::forward<BasicJsonType>(j), val)))
-> decltype(::nlohmann::from_json(std::forward<BasicJsonType>(j), val), void())
{
::nlohmann::from_json(std::forward<BasicJsonType>(j), val);
}
/// @brief convert a JSON value to any value type
/// @sa https://json.nlohmann.me/api/adl_serializer/from_json/
template<typename BasicJsonType, typename TargetType = ValueType>
static auto from_json(BasicJsonType && j) noexcept(
noexcept(::nlohmann::from_json(std::forward<BasicJsonType>(j), detail::identity_tag<TargetType> {})))
-> decltype(::nlohmann::from_json(std::forward<BasicJsonType>(j), detail::identity_tag<TargetType> {}))
{
return ::nlohmann::from_json(std::forward<BasicJsonType>(j), detail::identity_tag<TargetType> {});
}
/// @brief convert any value type to a JSON value
/// @sa https://json.nlohmann.me/api/adl_serializer/to_json/
template<typename BasicJsonType, typename TargetType = ValueType>
static auto to_json(BasicJsonType& j, TargetType && val) noexcept(
noexcept(::nlohmann::to_json(j, std::forward<TargetType>(val))))
-> decltype(::nlohmann::to_json(j, std::forward<TargetType>(val)), void())
{
::nlohmann::to_json(j, std::forward<TargetType>(val));
}
};
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,103 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <cstdint> // uint8_t, uint64_t
#include <tuple> // tie
#include <utility> // move
#include <nlohmann/detail/abi_macros.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
/// @brief an internal type for a backed binary type
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/
template<typename BinaryType>
class byte_container_with_subtype : public BinaryType
{
public:
using container_type = BinaryType;
using subtype_type = std::uint64_t;
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/byte_container_with_subtype/
byte_container_with_subtype() noexcept(noexcept(container_type()))
: container_type()
{}
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/byte_container_with_subtype/
byte_container_with_subtype(const container_type& b) noexcept(noexcept(container_type(b)))
: container_type(b)
{}
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/byte_container_with_subtype/
byte_container_with_subtype(container_type&& b) noexcept(noexcept(container_type(std::move(b))))
: container_type(std::move(b))
{}
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/byte_container_with_subtype/
byte_container_with_subtype(const container_type& b, subtype_type subtype_) noexcept(noexcept(container_type(b)))
: container_type(b)
, m_subtype(subtype_)
, m_has_subtype(true)
{}
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/byte_container_with_subtype/
byte_container_with_subtype(container_type&& b, subtype_type subtype_) noexcept(noexcept(container_type(std::move(b))))
: container_type(std::move(b))
, m_subtype(subtype_)
, m_has_subtype(true)
{}
bool operator==(const byte_container_with_subtype& rhs) const
{
return std::tie(static_cast<const BinaryType&>(*this), m_subtype, m_has_subtype) ==
std::tie(static_cast<const BinaryType&>(rhs), rhs.m_subtype, rhs.m_has_subtype);
}
bool operator!=(const byte_container_with_subtype& rhs) const
{
return !(rhs == *this);
}
/// @brief sets the binary subtype
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/set_subtype/
void set_subtype(subtype_type subtype_) noexcept
{
m_subtype = subtype_;
m_has_subtype = true;
}
/// @brief return the binary subtype
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/subtype/
constexpr subtype_type subtype() const noexcept
{
return m_has_subtype ? m_subtype : static_cast<subtype_type>(-1);
}
/// @brief return whether the value has a subtype
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/has_subtype/
constexpr bool has_subtype() const noexcept
{
return m_has_subtype;
}
/// @brief clears the binary subtype
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/clear_subtype/
void clear_subtype() noexcept
{
m_subtype = 0;
m_has_subtype = false;
}
private:
subtype_type m_subtype = 0;
bool m_has_subtype = false;
};
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,100 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
// This file contains all macro definitions affecting or depending on the ABI
#ifndef JSON_SKIP_LIBRARY_VERSION_CHECK
#if defined(NLOHMANN_JSON_VERSION_MAJOR) && defined(NLOHMANN_JSON_VERSION_MINOR) && defined(NLOHMANN_JSON_VERSION_PATCH)
#if NLOHMANN_JSON_VERSION_MAJOR != 3 || NLOHMANN_JSON_VERSION_MINOR != 11 || NLOHMANN_JSON_VERSION_PATCH != 3
#warning "Already included a different version of the library!"
#endif
#endif
#endif
#define NLOHMANN_JSON_VERSION_MAJOR 3 // NOLINT(modernize-macro-to-enum)
#define NLOHMANN_JSON_VERSION_MINOR 11 // NOLINT(modernize-macro-to-enum)
#define NLOHMANN_JSON_VERSION_PATCH 3 // NOLINT(modernize-macro-to-enum)
#ifndef JSON_DIAGNOSTICS
#define JSON_DIAGNOSTICS 0
#endif
#ifndef JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON
#define JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON 0
#endif
#if JSON_DIAGNOSTICS
#define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS _diag
#else
#define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS
#endif
#if JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON
#define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON _ldvcmp
#else
#define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON
#endif
#ifndef NLOHMANN_JSON_NAMESPACE_NO_VERSION
#define NLOHMANN_JSON_NAMESPACE_NO_VERSION 0
#endif
// Construct the namespace ABI tags component
#define NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b) json_abi ## a ## b
#define NLOHMANN_JSON_ABI_TAGS_CONCAT(a, b) \
NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b)
#define NLOHMANN_JSON_ABI_TAGS \
NLOHMANN_JSON_ABI_TAGS_CONCAT( \
NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS, \
NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON)
// Construct the namespace version component
#define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch) \
_v ## major ## _ ## minor ## _ ## patch
#define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(major, minor, patch) \
NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch)
#if NLOHMANN_JSON_NAMESPACE_NO_VERSION
#define NLOHMANN_JSON_NAMESPACE_VERSION
#else
#define NLOHMANN_JSON_NAMESPACE_VERSION \
NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(NLOHMANN_JSON_VERSION_MAJOR, \
NLOHMANN_JSON_VERSION_MINOR, \
NLOHMANN_JSON_VERSION_PATCH)
#endif
// Combine namespace components
#define NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b) a ## b
#define NLOHMANN_JSON_NAMESPACE_CONCAT(a, b) \
NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b)
#ifndef NLOHMANN_JSON_NAMESPACE
#define NLOHMANN_JSON_NAMESPACE \
nlohmann::NLOHMANN_JSON_NAMESPACE_CONCAT( \
NLOHMANN_JSON_ABI_TAGS, \
NLOHMANN_JSON_NAMESPACE_VERSION)
#endif
#ifndef NLOHMANN_JSON_NAMESPACE_BEGIN
#define NLOHMANN_JSON_NAMESPACE_BEGIN \
namespace nlohmann \
{ \
inline namespace NLOHMANN_JSON_NAMESPACE_CONCAT( \
NLOHMANN_JSON_ABI_TAGS, \
NLOHMANN_JSON_NAMESPACE_VERSION) \
{
#endif
#ifndef NLOHMANN_JSON_NAMESPACE_END
#define NLOHMANN_JSON_NAMESPACE_END \
} /* namespace (inline namespace) NOLINT(readability/namespace) */ \
} // namespace nlohmann
#endif

View File

@@ -1,497 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <algorithm> // transform
#include <array> // array
#include <forward_list> // forward_list
#include <iterator> // inserter, front_inserter, end
#include <map> // map
#include <string> // string
#include <tuple> // tuple, make_tuple
#include <type_traits> // is_arithmetic, is_same, is_enum, underlying_type, is_convertible
#include <unordered_map> // unordered_map
#include <utility> // pair, declval
#include <valarray> // valarray
#include <nlohmann/detail/exceptions.hpp>
#include <nlohmann/detail/macro_scope.hpp>
#include <nlohmann/detail/meta/cpp_future.hpp>
#include <nlohmann/detail/meta/identity_tag.hpp>
#include <nlohmann/detail/meta/std_fs.hpp>
#include <nlohmann/detail/meta/type_traits.hpp>
#include <nlohmann/detail/string_concat.hpp>
#include <nlohmann/detail/value_t.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
template<typename BasicJsonType>
inline void from_json(const BasicJsonType& j, typename std::nullptr_t& n)
{
if (JSON_HEDLEY_UNLIKELY(!j.is_null()))
{
JSON_THROW(type_error::create(302, concat("type must be null, but is ", j.type_name()), &j));
}
n = nullptr;
}
// overloads for basic_json template parameters
template < typename BasicJsonType, typename ArithmeticType,
enable_if_t < std::is_arithmetic<ArithmeticType>::value&&
!std::is_same<ArithmeticType, typename BasicJsonType::boolean_t>::value,
int > = 0 >
void get_arithmetic_value(const BasicJsonType& j, ArithmeticType& val)
{
switch (static_cast<value_t>(j))
{
case value_t::number_unsigned:
{
val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_unsigned_t*>());
break;
}
case value_t::number_integer:
{
val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_integer_t*>());
break;
}
case value_t::number_float:
{
val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_float_t*>());
break;
}
case value_t::null:
case value_t::object:
case value_t::array:
case value_t::string:
case value_t::boolean:
case value_t::binary:
case value_t::discarded:
default:
JSON_THROW(type_error::create(302, concat("type must be number, but is ", j.type_name()), &j));
}
}
template<typename BasicJsonType>
inline void from_json(const BasicJsonType& j, typename BasicJsonType::boolean_t& b)
{
if (JSON_HEDLEY_UNLIKELY(!j.is_boolean()))
{
JSON_THROW(type_error::create(302, concat("type must be boolean, but is ", j.type_name()), &j));
}
b = *j.template get_ptr<const typename BasicJsonType::boolean_t*>();
}
template<typename BasicJsonType>
inline void from_json(const BasicJsonType& j, typename BasicJsonType::string_t& s)
{
if (JSON_HEDLEY_UNLIKELY(!j.is_string()))
{
JSON_THROW(type_error::create(302, concat("type must be string, but is ", j.type_name()), &j));
}
s = *j.template get_ptr<const typename BasicJsonType::string_t*>();
}
template <
typename BasicJsonType, typename StringType,
enable_if_t <
std::is_assignable<StringType&, const typename BasicJsonType::string_t>::value
&& is_detected_exact<typename BasicJsonType::string_t::value_type, value_type_t, StringType>::value
&& !std::is_same<typename BasicJsonType::string_t, StringType>::value
&& !is_json_ref<StringType>::value, int > = 0 >
inline void from_json(const BasicJsonType& j, StringType& s)
{
if (JSON_HEDLEY_UNLIKELY(!j.is_string()))
{
JSON_THROW(type_error::create(302, concat("type must be string, but is ", j.type_name()), &j));
}
s = *j.template get_ptr<const typename BasicJsonType::string_t*>();
}
template<typename BasicJsonType>
inline void from_json(const BasicJsonType& j, typename BasicJsonType::number_float_t& val)
{
get_arithmetic_value(j, val);
}
template<typename BasicJsonType>
inline void from_json(const BasicJsonType& j, typename BasicJsonType::number_unsigned_t& val)
{
get_arithmetic_value(j, val);
}
template<typename BasicJsonType>
inline void from_json(const BasicJsonType& j, typename BasicJsonType::number_integer_t& val)
{
get_arithmetic_value(j, val);
}
#if !JSON_DISABLE_ENUM_SERIALIZATION
template<typename BasicJsonType, typename EnumType,
enable_if_t<std::is_enum<EnumType>::value, int> = 0>
inline void from_json(const BasicJsonType& j, EnumType& e)
{
typename std::underlying_type<EnumType>::type val;
get_arithmetic_value(j, val);
e = static_cast<EnumType>(val);
}
#endif // JSON_DISABLE_ENUM_SERIALIZATION
// forward_list doesn't have an insert method
template<typename BasicJsonType, typename T, typename Allocator,
enable_if_t<is_getable<BasicJsonType, T>::value, int> = 0>
inline void from_json(const BasicJsonType& j, std::forward_list<T, Allocator>& l)
{
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
{
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
}
l.clear();
std::transform(j.rbegin(), j.rend(),
std::front_inserter(l), [](const BasicJsonType & i)
{
return i.template get<T>();
});
}
// valarray doesn't have an insert method
template<typename BasicJsonType, typename T,
enable_if_t<is_getable<BasicJsonType, T>::value, int> = 0>
inline void from_json(const BasicJsonType& j, std::valarray<T>& l)
{
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
{
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
}
l.resize(j.size());
std::transform(j.begin(), j.end(), std::begin(l),
[](const BasicJsonType & elem)
{
return elem.template get<T>();
});
}
template<typename BasicJsonType, typename T, std::size_t N>
auto from_json(const BasicJsonType& j, T (&arr)[N]) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays)
-> decltype(j.template get<T>(), void())
{
for (std::size_t i = 0; i < N; ++i)
{
arr[i] = j.at(i).template get<T>();
}
}
template<typename BasicJsonType>
inline void from_json_array_impl(const BasicJsonType& j, typename BasicJsonType::array_t& arr, priority_tag<3> /*unused*/)
{
arr = *j.template get_ptr<const typename BasicJsonType::array_t*>();
}
template<typename BasicJsonType, typename T, std::size_t N>
auto from_json_array_impl(const BasicJsonType& j, std::array<T, N>& arr,
priority_tag<2> /*unused*/)
-> decltype(j.template get<T>(), void())
{
for (std::size_t i = 0; i < N; ++i)
{
arr[i] = j.at(i).template get<T>();
}
}
template<typename BasicJsonType, typename ConstructibleArrayType,
enable_if_t<
std::is_assignable<ConstructibleArrayType&, ConstructibleArrayType>::value,
int> = 0>
auto from_json_array_impl(const BasicJsonType& j, ConstructibleArrayType& arr, priority_tag<1> /*unused*/)
-> decltype(
arr.reserve(std::declval<typename ConstructibleArrayType::size_type>()),
j.template get<typename ConstructibleArrayType::value_type>(),
void())
{
using std::end;
ConstructibleArrayType ret;
ret.reserve(j.size());
std::transform(j.begin(), j.end(),
std::inserter(ret, end(ret)), [](const BasicJsonType & i)
{
// get<BasicJsonType>() returns *this, this won't call a from_json
// method when value_type is BasicJsonType
return i.template get<typename ConstructibleArrayType::value_type>();
});
arr = std::move(ret);
}
template<typename BasicJsonType, typename ConstructibleArrayType,
enable_if_t<
std::is_assignable<ConstructibleArrayType&, ConstructibleArrayType>::value,
int> = 0>
inline void from_json_array_impl(const BasicJsonType& j, ConstructibleArrayType& arr,
priority_tag<0> /*unused*/)
{
using std::end;
ConstructibleArrayType ret;
std::transform(
j.begin(), j.end(), std::inserter(ret, end(ret)),
[](const BasicJsonType & i)
{
// get<BasicJsonType>() returns *this, this won't call a from_json
// method when value_type is BasicJsonType
return i.template get<typename ConstructibleArrayType::value_type>();
});
arr = std::move(ret);
}
template < typename BasicJsonType, typename ConstructibleArrayType,
enable_if_t <
is_constructible_array_type<BasicJsonType, ConstructibleArrayType>::value&&
!is_constructible_object_type<BasicJsonType, ConstructibleArrayType>::value&&
!is_constructible_string_type<BasicJsonType, ConstructibleArrayType>::value&&
!std::is_same<ConstructibleArrayType, typename BasicJsonType::binary_t>::value&&
!is_basic_json<ConstructibleArrayType>::value,
int > = 0 >
auto from_json(const BasicJsonType& j, ConstructibleArrayType& arr)
-> decltype(from_json_array_impl(j, arr, priority_tag<3> {}),
j.template get<typename ConstructibleArrayType::value_type>(),
void())
{
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
{
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
}
from_json_array_impl(j, arr, priority_tag<3> {});
}
template < typename BasicJsonType, typename T, std::size_t... Idx >
std::array<T, sizeof...(Idx)> from_json_inplace_array_impl(BasicJsonType&& j,
identity_tag<std::array<T, sizeof...(Idx)>> /*unused*/, index_sequence<Idx...> /*unused*/)
{
return { { std::forward<BasicJsonType>(j).at(Idx).template get<T>()... } };
}
template < typename BasicJsonType, typename T, std::size_t N >
auto from_json(BasicJsonType&& j, identity_tag<std::array<T, N>> tag)
-> decltype(from_json_inplace_array_impl(std::forward<BasicJsonType>(j), tag, make_index_sequence<N> {}))
{
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
{
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
}
return from_json_inplace_array_impl(std::forward<BasicJsonType>(j), tag, make_index_sequence<N> {});
}
template<typename BasicJsonType>
inline void from_json(const BasicJsonType& j, typename BasicJsonType::binary_t& bin)
{
if (JSON_HEDLEY_UNLIKELY(!j.is_binary()))
{
JSON_THROW(type_error::create(302, concat("type must be binary, but is ", j.type_name()), &j));
}
bin = *j.template get_ptr<const typename BasicJsonType::binary_t*>();
}
template<typename BasicJsonType, typename ConstructibleObjectType,
enable_if_t<is_constructible_object_type<BasicJsonType, ConstructibleObjectType>::value, int> = 0>
inline void from_json(const BasicJsonType& j, ConstructibleObjectType& obj)
{
if (JSON_HEDLEY_UNLIKELY(!j.is_object()))
{
JSON_THROW(type_error::create(302, concat("type must be object, but is ", j.type_name()), &j));
}
ConstructibleObjectType ret;
const auto* inner_object = j.template get_ptr<const typename BasicJsonType::object_t*>();
using value_type = typename ConstructibleObjectType::value_type;
std::transform(
inner_object->begin(), inner_object->end(),
std::inserter(ret, ret.begin()),
[](typename BasicJsonType::object_t::value_type const & p)
{
return value_type(p.first, p.second.template get<typename ConstructibleObjectType::mapped_type>());
});
obj = std::move(ret);
}
// overload for arithmetic types, not chosen for basic_json template arguments
// (BooleanType, etc..); note: Is it really necessary to provide explicit
// overloads for boolean_t etc. in case of a custom BooleanType which is not
// an arithmetic type?
template < typename BasicJsonType, typename ArithmeticType,
enable_if_t <
std::is_arithmetic<ArithmeticType>::value&&
!std::is_same<ArithmeticType, typename BasicJsonType::number_unsigned_t>::value&&
!std::is_same<ArithmeticType, typename BasicJsonType::number_integer_t>::value&&
!std::is_same<ArithmeticType, typename BasicJsonType::number_float_t>::value&&
!std::is_same<ArithmeticType, typename BasicJsonType::boolean_t>::value,
int > = 0 >
inline void from_json(const BasicJsonType& j, ArithmeticType& val)
{
switch (static_cast<value_t>(j))
{
case value_t::number_unsigned:
{
val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_unsigned_t*>());
break;
}
case value_t::number_integer:
{
val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_integer_t*>());
break;
}
case value_t::number_float:
{
val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_float_t*>());
break;
}
case value_t::boolean:
{
val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::boolean_t*>());
break;
}
case value_t::null:
case value_t::object:
case value_t::array:
case value_t::string:
case value_t::binary:
case value_t::discarded:
default:
JSON_THROW(type_error::create(302, concat("type must be number, but is ", j.type_name()), &j));
}
}
template<typename BasicJsonType, typename... Args, std::size_t... Idx>
std::tuple<Args...> from_json_tuple_impl_base(BasicJsonType&& j, index_sequence<Idx...> /*unused*/)
{
return std::make_tuple(std::forward<BasicJsonType>(j).at(Idx).template get<Args>()...);
}
template < typename BasicJsonType, class A1, class A2 >
std::pair<A1, A2> from_json_tuple_impl(BasicJsonType&& j, identity_tag<std::pair<A1, A2>> /*unused*/, priority_tag<0> /*unused*/)
{
return {std::forward<BasicJsonType>(j).at(0).template get<A1>(),
std::forward<BasicJsonType>(j).at(1).template get<A2>()};
}
template<typename BasicJsonType, typename A1, typename A2>
inline void from_json_tuple_impl(BasicJsonType&& j, std::pair<A1, A2>& p, priority_tag<1> /*unused*/)
{
p = from_json_tuple_impl(std::forward<BasicJsonType>(j), identity_tag<std::pair<A1, A2>> {}, priority_tag<0> {});
}
template<typename BasicJsonType, typename... Args>
std::tuple<Args...> from_json_tuple_impl(BasicJsonType&& j, identity_tag<std::tuple<Args...>> /*unused*/, priority_tag<2> /*unused*/)
{
return from_json_tuple_impl_base<BasicJsonType, Args...>(std::forward<BasicJsonType>(j), index_sequence_for<Args...> {});
}
template<typename BasicJsonType, typename... Args>
inline void from_json_tuple_impl(BasicJsonType&& j, std::tuple<Args...>& t, priority_tag<3> /*unused*/)
{
t = from_json_tuple_impl_base<BasicJsonType, Args...>(std::forward<BasicJsonType>(j), index_sequence_for<Args...> {});
}
template<typename BasicJsonType, typename TupleRelated>
auto from_json(BasicJsonType&& j, TupleRelated&& t)
-> decltype(from_json_tuple_impl(std::forward<BasicJsonType>(j), std::forward<TupleRelated>(t), priority_tag<3> {}))
{
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
{
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
}
return from_json_tuple_impl(std::forward<BasicJsonType>(j), std::forward<TupleRelated>(t), priority_tag<3> {});
}
template < typename BasicJsonType, typename Key, typename Value, typename Compare, typename Allocator,
typename = enable_if_t < !std::is_constructible <
typename BasicJsonType::string_t, Key >::value >>
inline void from_json(const BasicJsonType& j, std::map<Key, Value, Compare, Allocator>& m)
{
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
{
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
}
m.clear();
for (const auto& p : j)
{
if (JSON_HEDLEY_UNLIKELY(!p.is_array()))
{
JSON_THROW(type_error::create(302, concat("type must be array, but is ", p.type_name()), &j));
}
m.emplace(p.at(0).template get<Key>(), p.at(1).template get<Value>());
}
}
template < typename BasicJsonType, typename Key, typename Value, typename Hash, typename KeyEqual, typename Allocator,
typename = enable_if_t < !std::is_constructible <
typename BasicJsonType::string_t, Key >::value >>
inline void from_json(const BasicJsonType& j, std::unordered_map<Key, Value, Hash, KeyEqual, Allocator>& m)
{
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
{
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
}
m.clear();
for (const auto& p : j)
{
if (JSON_HEDLEY_UNLIKELY(!p.is_array()))
{
JSON_THROW(type_error::create(302, concat("type must be array, but is ", p.type_name()), &j));
}
m.emplace(p.at(0).template get<Key>(), p.at(1).template get<Value>());
}
}
#if JSON_HAS_FILESYSTEM || JSON_HAS_EXPERIMENTAL_FILESYSTEM
template<typename BasicJsonType>
inline void from_json(const BasicJsonType& j, std_fs::path& p)
{
if (JSON_HEDLEY_UNLIKELY(!j.is_string()))
{
JSON_THROW(type_error::create(302, concat("type must be string, but is ", j.type_name()), &j));
}
p = *j.template get_ptr<const typename BasicJsonType::string_t*>();
}
#endif
struct from_json_fn
{
template<typename BasicJsonType, typename T>
auto operator()(const BasicJsonType& j, T&& val) const
noexcept(noexcept(from_json(j, std::forward<T>(val))))
-> decltype(from_json(j, std::forward<T>(val)))
{
return from_json(j, std::forward<T>(val));
}
};
} // namespace detail
#ifndef JSON_HAS_CPP_17
/// namespace to hold default `from_json` function
/// to see why this is required:
/// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4381.html
namespace // NOLINT(cert-dcl59-cpp,fuchsia-header-anon-namespaces,google-build-namespaces)
{
#endif
JSON_INLINE_VARIABLE constexpr const auto& from_json = // NOLINT(misc-definitions-in-headers)
detail::static_const<detail::from_json_fn>::value;
#ifndef JSON_HAS_CPP_17
} // namespace
#endif
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,447 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <algorithm> // copy
#include <iterator> // begin, end
#include <string> // string
#include <tuple> // tuple, get
#include <type_traits> // is_same, is_constructible, is_floating_point, is_enum, underlying_type
#include <utility> // move, forward, declval, pair
#include <valarray> // valarray
#include <vector> // vector
#include <nlohmann/detail/iterators/iteration_proxy.hpp>
#include <nlohmann/detail/macro_scope.hpp>
#include <nlohmann/detail/meta/cpp_future.hpp>
#include <nlohmann/detail/meta/std_fs.hpp>
#include <nlohmann/detail/meta/type_traits.hpp>
#include <nlohmann/detail/value_t.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
//////////////////
// constructors //
//////////////////
/*
* Note all external_constructor<>::construct functions need to call
* j.m_data.m_value.destroy(j.m_data.m_type) to avoid a memory leak in case j contains an
* allocated value (e.g., a string). See bug issue
* https://github.com/nlohmann/json/issues/2865 for more information.
*/
template<value_t> struct external_constructor;
template<>
struct external_constructor<value_t::boolean>
{
template<typename BasicJsonType>
static void construct(BasicJsonType& j, typename BasicJsonType::boolean_t b) noexcept
{
j.m_data.m_value.destroy(j.m_data.m_type);
j.m_data.m_type = value_t::boolean;
j.m_data.m_value = b;
j.assert_invariant();
}
};
template<>
struct external_constructor<value_t::string>
{
template<typename BasicJsonType>
static void construct(BasicJsonType& j, const typename BasicJsonType::string_t& s)
{
j.m_data.m_value.destroy(j.m_data.m_type);
j.m_data.m_type = value_t::string;
j.m_data.m_value = s;
j.assert_invariant();
}
template<typename BasicJsonType>
static void construct(BasicJsonType& j, typename BasicJsonType::string_t&& s)
{
j.m_data.m_value.destroy(j.m_data.m_type);
j.m_data.m_type = value_t::string;
j.m_data.m_value = std::move(s);
j.assert_invariant();
}
template < typename BasicJsonType, typename CompatibleStringType,
enable_if_t < !std::is_same<CompatibleStringType, typename BasicJsonType::string_t>::value,
int > = 0 >
static void construct(BasicJsonType& j, const CompatibleStringType& str)
{
j.m_data.m_value.destroy(j.m_data.m_type);
j.m_data.m_type = value_t::string;
j.m_data.m_value.string = j.template create<typename BasicJsonType::string_t>(str);
j.assert_invariant();
}
};
template<>
struct external_constructor<value_t::binary>
{
template<typename BasicJsonType>
static void construct(BasicJsonType& j, const typename BasicJsonType::binary_t& b)
{
j.m_data.m_value.destroy(j.m_data.m_type);
j.m_data.m_type = value_t::binary;
j.m_data.m_value = typename BasicJsonType::binary_t(b);
j.assert_invariant();
}
template<typename BasicJsonType>
static void construct(BasicJsonType& j, typename BasicJsonType::binary_t&& b)
{
j.m_data.m_value.destroy(j.m_data.m_type);
j.m_data.m_type = value_t::binary;
j.m_data.m_value = typename BasicJsonType::binary_t(std::move(b));
j.assert_invariant();
}
};
template<>
struct external_constructor<value_t::number_float>
{
template<typename BasicJsonType>
static void construct(BasicJsonType& j, typename BasicJsonType::number_float_t val) noexcept
{
j.m_data.m_value.destroy(j.m_data.m_type);
j.m_data.m_type = value_t::number_float;
j.m_data.m_value = val;
j.assert_invariant();
}
};
template<>
struct external_constructor<value_t::number_unsigned>
{
template<typename BasicJsonType>
static void construct(BasicJsonType& j, typename BasicJsonType::number_unsigned_t val) noexcept
{
j.m_data.m_value.destroy(j.m_data.m_type);
j.m_data.m_type = value_t::number_unsigned;
j.m_data.m_value = val;
j.assert_invariant();
}
};
template<>
struct external_constructor<value_t::number_integer>
{
template<typename BasicJsonType>
static void construct(BasicJsonType& j, typename BasicJsonType::number_integer_t val) noexcept
{
j.m_data.m_value.destroy(j.m_data.m_type);
j.m_data.m_type = value_t::number_integer;
j.m_data.m_value = val;
j.assert_invariant();
}
};
template<>
struct external_constructor<value_t::array>
{
template<typename BasicJsonType>
static void construct(BasicJsonType& j, const typename BasicJsonType::array_t& arr)
{
j.m_data.m_value.destroy(j.m_data.m_type);
j.m_data.m_type = value_t::array;
j.m_data.m_value = arr;
j.set_parents();
j.assert_invariant();
}
template<typename BasicJsonType>
static void construct(BasicJsonType& j, typename BasicJsonType::array_t&& arr)
{
j.m_data.m_value.destroy(j.m_data.m_type);
j.m_data.m_type = value_t::array;
j.m_data.m_value = std::move(arr);
j.set_parents();
j.assert_invariant();
}
template < typename BasicJsonType, typename CompatibleArrayType,
enable_if_t < !std::is_same<CompatibleArrayType, typename BasicJsonType::array_t>::value,
int > = 0 >
static void construct(BasicJsonType& j, const CompatibleArrayType& arr)
{
using std::begin;
using std::end;
j.m_data.m_value.destroy(j.m_data.m_type);
j.m_data.m_type = value_t::array;
j.m_data.m_value.array = j.template create<typename BasicJsonType::array_t>(begin(arr), end(arr));
j.set_parents();
j.assert_invariant();
}
template<typename BasicJsonType>
static void construct(BasicJsonType& j, const std::vector<bool>& arr)
{
j.m_data.m_value.destroy(j.m_data.m_type);
j.m_data.m_type = value_t::array;
j.m_data.m_value = value_t::array;
j.m_data.m_value.array->reserve(arr.size());
for (const bool x : arr)
{
j.m_data.m_value.array->push_back(x);
j.set_parent(j.m_data.m_value.array->back());
}
j.assert_invariant();
}
template<typename BasicJsonType, typename T,
enable_if_t<std::is_convertible<T, BasicJsonType>::value, int> = 0>
static void construct(BasicJsonType& j, const std::valarray<T>& arr)
{
j.m_data.m_value.destroy(j.m_data.m_type);
j.m_data.m_type = value_t::array;
j.m_data.m_value = value_t::array;
j.m_data.m_value.array->resize(arr.size());
if (arr.size() > 0)
{
std::copy(std::begin(arr), std::end(arr), j.m_data.m_value.array->begin());
}
j.set_parents();
j.assert_invariant();
}
};
template<>
struct external_constructor<value_t::object>
{
template<typename BasicJsonType>
static void construct(BasicJsonType& j, const typename BasicJsonType::object_t& obj)
{
j.m_data.m_value.destroy(j.m_data.m_type);
j.m_data.m_type = value_t::object;
j.m_data.m_value = obj;
j.set_parents();
j.assert_invariant();
}
template<typename BasicJsonType>
static void construct(BasicJsonType& j, typename BasicJsonType::object_t&& obj)
{
j.m_data.m_value.destroy(j.m_data.m_type);
j.m_data.m_type = value_t::object;
j.m_data.m_value = std::move(obj);
j.set_parents();
j.assert_invariant();
}
template < typename BasicJsonType, typename CompatibleObjectType,
enable_if_t < !std::is_same<CompatibleObjectType, typename BasicJsonType::object_t>::value, int > = 0 >
static void construct(BasicJsonType& j, const CompatibleObjectType& obj)
{
using std::begin;
using std::end;
j.m_data.m_value.destroy(j.m_data.m_type);
j.m_data.m_type = value_t::object;
j.m_data.m_value.object = j.template create<typename BasicJsonType::object_t>(begin(obj), end(obj));
j.set_parents();
j.assert_invariant();
}
};
/////////////
// to_json //
/////////////
template<typename BasicJsonType, typename T,
enable_if_t<std::is_same<T, typename BasicJsonType::boolean_t>::value, int> = 0>
inline void to_json(BasicJsonType& j, T b) noexcept
{
external_constructor<value_t::boolean>::construct(j, b);
}
template < typename BasicJsonType, typename BoolRef,
enable_if_t <
((std::is_same<std::vector<bool>::reference, BoolRef>::value
&& !std::is_same <std::vector<bool>::reference, typename BasicJsonType::boolean_t&>::value)
|| (std::is_same<std::vector<bool>::const_reference, BoolRef>::value
&& !std::is_same <detail::uncvref_t<std::vector<bool>::const_reference>,
typename BasicJsonType::boolean_t >::value))
&& std::is_convertible<const BoolRef&, typename BasicJsonType::boolean_t>::value, int > = 0 >
inline void to_json(BasicJsonType& j, const BoolRef& b) noexcept
{
external_constructor<value_t::boolean>::construct(j, static_cast<typename BasicJsonType::boolean_t>(b));
}
template<typename BasicJsonType, typename CompatibleString,
enable_if_t<std::is_constructible<typename BasicJsonType::string_t, CompatibleString>::value, int> = 0>
inline void to_json(BasicJsonType& j, const CompatibleString& s)
{
external_constructor<value_t::string>::construct(j, s);
}
template<typename BasicJsonType>
inline void to_json(BasicJsonType& j, typename BasicJsonType::string_t&& s)
{
external_constructor<value_t::string>::construct(j, std::move(s));
}
template<typename BasicJsonType, typename FloatType,
enable_if_t<std::is_floating_point<FloatType>::value, int> = 0>
inline void to_json(BasicJsonType& j, FloatType val) noexcept
{
external_constructor<value_t::number_float>::construct(j, static_cast<typename BasicJsonType::number_float_t>(val));
}
template<typename BasicJsonType, typename CompatibleNumberUnsignedType,
enable_if_t<is_compatible_integer_type<typename BasicJsonType::number_unsigned_t, CompatibleNumberUnsignedType>::value, int> = 0>
inline void to_json(BasicJsonType& j, CompatibleNumberUnsignedType val) noexcept
{
external_constructor<value_t::number_unsigned>::construct(j, static_cast<typename BasicJsonType::number_unsigned_t>(val));
}
template<typename BasicJsonType, typename CompatibleNumberIntegerType,
enable_if_t<is_compatible_integer_type<typename BasicJsonType::number_integer_t, CompatibleNumberIntegerType>::value, int> = 0>
inline void to_json(BasicJsonType& j, CompatibleNumberIntegerType val) noexcept
{
external_constructor<value_t::number_integer>::construct(j, static_cast<typename BasicJsonType::number_integer_t>(val));
}
#if !JSON_DISABLE_ENUM_SERIALIZATION
template<typename BasicJsonType, typename EnumType,
enable_if_t<std::is_enum<EnumType>::value, int> = 0>
inline void to_json(BasicJsonType& j, EnumType e) noexcept
{
using underlying_type = typename std::underlying_type<EnumType>::type;
static constexpr value_t integral_value_t = std::is_unsigned<underlying_type>::value ? value_t::number_unsigned : value_t::number_integer;
external_constructor<integral_value_t>::construct(j, static_cast<underlying_type>(e));
}
#endif // JSON_DISABLE_ENUM_SERIALIZATION
template<typename BasicJsonType>
inline void to_json(BasicJsonType& j, const std::vector<bool>& e)
{
external_constructor<value_t::array>::construct(j, e);
}
template < typename BasicJsonType, typename CompatibleArrayType,
enable_if_t < is_compatible_array_type<BasicJsonType,
CompatibleArrayType>::value&&
!is_compatible_object_type<BasicJsonType, CompatibleArrayType>::value&&
!is_compatible_string_type<BasicJsonType, CompatibleArrayType>::value&&
!std::is_same<typename BasicJsonType::binary_t, CompatibleArrayType>::value&&
!is_basic_json<CompatibleArrayType>::value,
int > = 0 >
inline void to_json(BasicJsonType& j, const CompatibleArrayType& arr)
{
external_constructor<value_t::array>::construct(j, arr);
}
template<typename BasicJsonType>
inline void to_json(BasicJsonType& j, const typename BasicJsonType::binary_t& bin)
{
external_constructor<value_t::binary>::construct(j, bin);
}
template<typename BasicJsonType, typename T,
enable_if_t<std::is_convertible<T, BasicJsonType>::value, int> = 0>
inline void to_json(BasicJsonType& j, const std::valarray<T>& arr)
{
external_constructor<value_t::array>::construct(j, std::move(arr));
}
template<typename BasicJsonType>
inline void to_json(BasicJsonType& j, typename BasicJsonType::array_t&& arr)
{
external_constructor<value_t::array>::construct(j, std::move(arr));
}
template < typename BasicJsonType, typename CompatibleObjectType,
enable_if_t < is_compatible_object_type<BasicJsonType, CompatibleObjectType>::value&& !is_basic_json<CompatibleObjectType>::value, int > = 0 >
inline void to_json(BasicJsonType& j, const CompatibleObjectType& obj)
{
external_constructor<value_t::object>::construct(j, obj);
}
template<typename BasicJsonType>
inline void to_json(BasicJsonType& j, typename BasicJsonType::object_t&& obj)
{
external_constructor<value_t::object>::construct(j, std::move(obj));
}
template <
typename BasicJsonType, typename T, std::size_t N,
enable_if_t < !std::is_constructible<typename BasicJsonType::string_t,
const T(&)[N]>::value, // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays)
int > = 0 >
inline void to_json(BasicJsonType& j, const T(&arr)[N]) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays)
{
external_constructor<value_t::array>::construct(j, arr);
}
template < typename BasicJsonType, typename T1, typename T2, enable_if_t < std::is_constructible<BasicJsonType, T1>::value&& std::is_constructible<BasicJsonType, T2>::value, int > = 0 >
inline void to_json(BasicJsonType& j, const std::pair<T1, T2>& p)
{
j = { p.first, p.second };
}
// for https://github.com/nlohmann/json/pull/1134
template<typename BasicJsonType, typename T,
enable_if_t<std::is_same<T, iteration_proxy_value<typename BasicJsonType::iterator>>::value, int> = 0>
inline void to_json(BasicJsonType& j, const T& b)
{
j = { {b.key(), b.value()} };
}
template<typename BasicJsonType, typename Tuple, std::size_t... Idx>
inline void to_json_tuple_impl(BasicJsonType& j, const Tuple& t, index_sequence<Idx...> /*unused*/)
{
j = { std::get<Idx>(t)... };
}
template<typename BasicJsonType, typename T, enable_if_t<is_constructible_tuple<BasicJsonType, T>::value, int > = 0>
inline void to_json(BasicJsonType& j, const T& t)
{
to_json_tuple_impl(j, t, make_index_sequence<std::tuple_size<T>::value> {});
}
#if JSON_HAS_FILESYSTEM || JSON_HAS_EXPERIMENTAL_FILESYSTEM
template<typename BasicJsonType>
inline void to_json(BasicJsonType& j, const std_fs::path& p)
{
j = p.string();
}
#endif
struct to_json_fn
{
template<typename BasicJsonType, typename T>
auto operator()(BasicJsonType& j, T&& val) const noexcept(noexcept(to_json(j, std::forward<T>(val))))
-> decltype(to_json(j, std::forward<T>(val)), void())
{
return to_json(j, std::forward<T>(val));
}
};
} // namespace detail
#ifndef JSON_HAS_CPP_17
/// namespace to hold default `to_json` function
/// to see why this is required:
/// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4381.html
namespace // NOLINT(cert-dcl59-cpp,fuchsia-header-anon-namespaces,google-build-namespaces)
{
#endif
JSON_INLINE_VARIABLE constexpr const auto& to_json = // NOLINT(misc-definitions-in-headers)
detail::static_const<detail::to_json_fn>::value;
#ifndef JSON_HAS_CPP_17
} // namespace
#endif
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,257 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <cstddef> // nullptr_t
#include <exception> // exception
#if JSON_DIAGNOSTICS
#include <numeric> // accumulate
#endif
#include <stdexcept> // runtime_error
#include <string> // to_string
#include <vector> // vector
#include <nlohmann/detail/value_t.hpp>
#include <nlohmann/detail/string_escape.hpp>
#include <nlohmann/detail/input/position_t.hpp>
#include <nlohmann/detail/macro_scope.hpp>
#include <nlohmann/detail/meta/cpp_future.hpp>
#include <nlohmann/detail/meta/type_traits.hpp>
#include <nlohmann/detail/string_concat.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
////////////////
// exceptions //
////////////////
/// @brief general exception of the @ref basic_json class
/// @sa https://json.nlohmann.me/api/basic_json/exception/
class exception : public std::exception
{
public:
/// returns the explanatory string
const char* what() const noexcept override
{
return m.what();
}
/// the id of the exception
const int id; // NOLINT(cppcoreguidelines-non-private-member-variables-in-classes)
protected:
JSON_HEDLEY_NON_NULL(3)
exception(int id_, const char* what_arg) : id(id_), m(what_arg) {} // NOLINT(bugprone-throw-keyword-missing)
static std::string name(const std::string& ename, int id_)
{
return concat("[json.exception.", ename, '.', std::to_string(id_), "] ");
}
static std::string diagnostics(std::nullptr_t /*leaf_element*/)
{
return "";
}
template<typename BasicJsonType>
static std::string diagnostics(const BasicJsonType* leaf_element)
{
#if JSON_DIAGNOSTICS
std::vector<std::string> tokens;
for (const auto* current = leaf_element; current != nullptr && current->m_parent != nullptr; current = current->m_parent)
{
switch (current->m_parent->type())
{
case value_t::array:
{
for (std::size_t i = 0; i < current->m_parent->m_data.m_value.array->size(); ++i)
{
if (&current->m_parent->m_data.m_value.array->operator[](i) == current)
{
tokens.emplace_back(std::to_string(i));
break;
}
}
break;
}
case value_t::object:
{
for (const auto& element : *current->m_parent->m_data.m_value.object)
{
if (&element.second == current)
{
tokens.emplace_back(element.first.c_str());
break;
}
}
break;
}
case value_t::null: // LCOV_EXCL_LINE
case value_t::string: // LCOV_EXCL_LINE
case value_t::boolean: // LCOV_EXCL_LINE
case value_t::number_integer: // LCOV_EXCL_LINE
case value_t::number_unsigned: // LCOV_EXCL_LINE
case value_t::number_float: // LCOV_EXCL_LINE
case value_t::binary: // LCOV_EXCL_LINE
case value_t::discarded: // LCOV_EXCL_LINE
default: // LCOV_EXCL_LINE
break; // LCOV_EXCL_LINE
}
}
if (tokens.empty())
{
return "";
}
auto str = std::accumulate(tokens.rbegin(), tokens.rend(), std::string{},
[](const std::string & a, const std::string & b)
{
return concat(a, '/', detail::escape(b));
});
return concat('(', str, ") ");
#else
static_cast<void>(leaf_element);
return "";
#endif
}
private:
/// an exception object as storage for error messages
std::runtime_error m;
};
/// @brief exception indicating a parse error
/// @sa https://json.nlohmann.me/api/basic_json/parse_error/
class parse_error : public exception
{
public:
/*!
@brief create a parse error exception
@param[in] id_ the id of the exception
@param[in] pos the position where the error occurred (or with
chars_read_total=0 if the position cannot be
determined)
@param[in] what_arg the explanatory string
@return parse_error object
*/
template<typename BasicJsonContext, enable_if_t<is_basic_json_context<BasicJsonContext>::value, int> = 0>
static parse_error create(int id_, const position_t& pos, const std::string& what_arg, BasicJsonContext context)
{
const std::string w = concat(exception::name("parse_error", id_), "parse error",
position_string(pos), ": ", exception::diagnostics(context), what_arg);
return {id_, pos.chars_read_total, w.c_str()};
}
template<typename BasicJsonContext, enable_if_t<is_basic_json_context<BasicJsonContext>::value, int> = 0>
static parse_error create(int id_, std::size_t byte_, const std::string& what_arg, BasicJsonContext context)
{
const std::string w = concat(exception::name("parse_error", id_), "parse error",
(byte_ != 0 ? (concat(" at byte ", std::to_string(byte_))) : ""),
": ", exception::diagnostics(context), what_arg);
return {id_, byte_, w.c_str()};
}
/*!
@brief byte index of the parse error
The byte index of the last read character in the input file.
@note For an input with n bytes, 1 is the index of the first character and
n+1 is the index of the terminating null byte or the end of file.
This also holds true when reading a byte vector (CBOR or MessagePack).
*/
const std::size_t byte;
private:
parse_error(int id_, std::size_t byte_, const char* what_arg)
: exception(id_, what_arg), byte(byte_) {}
static std::string position_string(const position_t& pos)
{
return concat(" at line ", std::to_string(pos.lines_read + 1),
", column ", std::to_string(pos.chars_read_current_line));
}
};
/// @brief exception indicating errors with iterators
/// @sa https://json.nlohmann.me/api/basic_json/invalid_iterator/
class invalid_iterator : public exception
{
public:
template<typename BasicJsonContext, enable_if_t<is_basic_json_context<BasicJsonContext>::value, int> = 0>
static invalid_iterator create(int id_, const std::string& what_arg, BasicJsonContext context)
{
const std::string w = concat(exception::name("invalid_iterator", id_), exception::diagnostics(context), what_arg);
return {id_, w.c_str()};
}
private:
JSON_HEDLEY_NON_NULL(3)
invalid_iterator(int id_, const char* what_arg)
: exception(id_, what_arg) {}
};
/// @brief exception indicating executing a member function with a wrong type
/// @sa https://json.nlohmann.me/api/basic_json/type_error/
class type_error : public exception
{
public:
template<typename BasicJsonContext, enable_if_t<is_basic_json_context<BasicJsonContext>::value, int> = 0>
static type_error create(int id_, const std::string& what_arg, BasicJsonContext context)
{
const std::string w = concat(exception::name("type_error", id_), exception::diagnostics(context), what_arg);
return {id_, w.c_str()};
}
private:
JSON_HEDLEY_NON_NULL(3)
type_error(int id_, const char* what_arg) : exception(id_, what_arg) {}
};
/// @brief exception indicating access out of the defined range
/// @sa https://json.nlohmann.me/api/basic_json/out_of_range/
class out_of_range : public exception
{
public:
template<typename BasicJsonContext, enable_if_t<is_basic_json_context<BasicJsonContext>::value, int> = 0>
static out_of_range create(int id_, const std::string& what_arg, BasicJsonContext context)
{
const std::string w = concat(exception::name("out_of_range", id_), exception::diagnostics(context), what_arg);
return {id_, w.c_str()};
}
private:
JSON_HEDLEY_NON_NULL(3)
out_of_range(int id_, const char* what_arg) : exception(id_, what_arg) {}
};
/// @brief exception indicating other library errors
/// @sa https://json.nlohmann.me/api/basic_json/other_error/
class other_error : public exception
{
public:
template<typename BasicJsonContext, enable_if_t<is_basic_json_context<BasicJsonContext>::value, int> = 0>
static other_error create(int id_, const std::string& what_arg, BasicJsonContext context)
{
const std::string w = concat(exception::name("other_error", id_), exception::diagnostics(context), what_arg);
return {id_, w.c_str()};
}
private:
JSON_HEDLEY_NON_NULL(3)
other_error(int id_, const char* what_arg) : exception(id_, what_arg) {}
};
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,129 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <cstdint> // uint8_t
#include <cstddef> // size_t
#include <functional> // hash
#include <nlohmann/detail/abi_macros.hpp>
#include <nlohmann/detail/value_t.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
// boost::hash_combine
inline std::size_t combine(std::size_t seed, std::size_t h) noexcept
{
seed ^= h + 0x9e3779b9 + (seed << 6U) + (seed >> 2U);
return seed;
}
/*!
@brief hash a JSON value
The hash function tries to rely on std::hash where possible. Furthermore, the
type of the JSON value is taken into account to have different hash values for
null, 0, 0U, and false, etc.
@tparam BasicJsonType basic_json specialization
@param j JSON value to hash
@return hash value of j
*/
template<typename BasicJsonType>
std::size_t hash(const BasicJsonType& j)
{
using string_t = typename BasicJsonType::string_t;
using number_integer_t = typename BasicJsonType::number_integer_t;
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
using number_float_t = typename BasicJsonType::number_float_t;
const auto type = static_cast<std::size_t>(j.type());
switch (j.type())
{
case BasicJsonType::value_t::null:
case BasicJsonType::value_t::discarded:
{
return combine(type, 0);
}
case BasicJsonType::value_t::object:
{
auto seed = combine(type, j.size());
for (const auto& element : j.items())
{
const auto h = std::hash<string_t> {}(element.key());
seed = combine(seed, h);
seed = combine(seed, hash(element.value()));
}
return seed;
}
case BasicJsonType::value_t::array:
{
auto seed = combine(type, j.size());
for (const auto& element : j)
{
seed = combine(seed, hash(element));
}
return seed;
}
case BasicJsonType::value_t::string:
{
const auto h = std::hash<string_t> {}(j.template get_ref<const string_t&>());
return combine(type, h);
}
case BasicJsonType::value_t::boolean:
{
const auto h = std::hash<bool> {}(j.template get<bool>());
return combine(type, h);
}
case BasicJsonType::value_t::number_integer:
{
const auto h = std::hash<number_integer_t> {}(j.template get<number_integer_t>());
return combine(type, h);
}
case BasicJsonType::value_t::number_unsigned:
{
const auto h = std::hash<number_unsigned_t> {}(j.template get<number_unsigned_t>());
return combine(type, h);
}
case BasicJsonType::value_t::number_float:
{
const auto h = std::hash<number_float_t> {}(j.template get<number_float_t>());
return combine(type, h);
}
case BasicJsonType::value_t::binary:
{
auto seed = combine(type, j.get_binary().size());
const auto h = std::hash<bool> {}(j.get_binary().has_subtype());
seed = combine(seed, h);
seed = combine(seed, static_cast<std::size_t>(j.get_binary().subtype()));
for (const auto byte : j.get_binary())
{
seed = combine(seed, std::hash<std::uint8_t> {}(byte));
}
return seed;
}
default: // LCOV_EXCL_LINE
JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE
return 0; // LCOV_EXCL_LINE
}
}
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END

File diff suppressed because it is too large Load Diff

View File

@@ -1,492 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <array> // array
#include <cstddef> // size_t
#include <cstring> // strlen
#include <iterator> // begin, end, iterator_traits, random_access_iterator_tag, distance, next
#include <memory> // shared_ptr, make_shared, addressof
#include <numeric> // accumulate
#include <string> // string, char_traits
#include <type_traits> // enable_if, is_base_of, is_pointer, is_integral, remove_pointer
#include <utility> // pair, declval
#ifndef JSON_NO_IO
#include <cstdio> // FILE *
#include <istream> // istream
#endif // JSON_NO_IO
#include <nlohmann/detail/iterators/iterator_traits.hpp>
#include <nlohmann/detail/macro_scope.hpp>
#include <nlohmann/detail/meta/type_traits.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
/// the supported input formats
enum class input_format_t { json, cbor, msgpack, ubjson, bson, bjdata };
////////////////////
// input adapters //
////////////////////
#ifndef JSON_NO_IO
/*!
Input adapter for stdio file access. This adapter read only 1 byte and do not use any
buffer. This adapter is a very low level adapter.
*/
class file_input_adapter
{
public:
using char_type = char;
JSON_HEDLEY_NON_NULL(2)
explicit file_input_adapter(std::FILE* f) noexcept
: m_file(f)
{
JSON_ASSERT(m_file != nullptr);
}
// make class move-only
file_input_adapter(const file_input_adapter&) = delete;
file_input_adapter(file_input_adapter&&) noexcept = default;
file_input_adapter& operator=(const file_input_adapter&) = delete;
file_input_adapter& operator=(file_input_adapter&&) = delete;
~file_input_adapter() = default;
std::char_traits<char>::int_type get_character() noexcept
{
return std::fgetc(m_file);
}
private:
/// the file pointer to read from
std::FILE* m_file;
};
/*!
Input adapter for a (caching) istream. Ignores a UFT Byte Order Mark at
beginning of input. Does not support changing the underlying std::streambuf
in mid-input. Maintains underlying std::istream and std::streambuf to support
subsequent use of standard std::istream operations to process any input
characters following those used in parsing the JSON input. Clears the
std::istream flags; any input errors (e.g., EOF) will be detected by the first
subsequent call for input from the std::istream.
*/
class input_stream_adapter
{
public:
using char_type = char;
~input_stream_adapter()
{
// clear stream flags; we use underlying streambuf I/O, do not
// maintain ifstream flags, except eof
if (is != nullptr)
{
is->clear(is->rdstate() & std::ios::eofbit);
}
}
explicit input_stream_adapter(std::istream& i)
: is(&i), sb(i.rdbuf())
{}
// delete because of pointer members
input_stream_adapter(const input_stream_adapter&) = delete;
input_stream_adapter& operator=(input_stream_adapter&) = delete;
input_stream_adapter& operator=(input_stream_adapter&&) = delete;
input_stream_adapter(input_stream_adapter&& rhs) noexcept
: is(rhs.is), sb(rhs.sb)
{
rhs.is = nullptr;
rhs.sb = nullptr;
}
// std::istream/std::streambuf use std::char_traits<char>::to_int_type, to
// ensure that std::char_traits<char>::eof() and the character 0xFF do not
// end up as the same value, e.g. 0xFFFFFFFF.
std::char_traits<char>::int_type get_character()
{
auto res = sb->sbumpc();
// set eof manually, as we don't use the istream interface.
if (JSON_HEDLEY_UNLIKELY(res == std::char_traits<char>::eof()))
{
is->clear(is->rdstate() | std::ios::eofbit);
}
return res;
}
private:
/// the associated input stream
std::istream* is = nullptr;
std::streambuf* sb = nullptr;
};
#endif // JSON_NO_IO
// General-purpose iterator-based adapter. It might not be as fast as
// theoretically possible for some containers, but it is extremely versatile.
template<typename IteratorType>
class iterator_input_adapter
{
public:
using char_type = typename std::iterator_traits<IteratorType>::value_type;
iterator_input_adapter(IteratorType first, IteratorType last)
: current(std::move(first)), end(std::move(last))
{}
typename char_traits<char_type>::int_type get_character()
{
if (JSON_HEDLEY_LIKELY(current != end))
{
auto result = char_traits<char_type>::to_int_type(*current);
std::advance(current, 1);
return result;
}
return char_traits<char_type>::eof();
}
private:
IteratorType current;
IteratorType end;
template<typename BaseInputAdapter, size_t T>
friend struct wide_string_input_helper;
bool empty() const
{
return current == end;
}
};
template<typename BaseInputAdapter, size_t T>
struct wide_string_input_helper;
template<typename BaseInputAdapter>
struct wide_string_input_helper<BaseInputAdapter, 4>
{
// UTF-32
static void fill_buffer(BaseInputAdapter& input,
std::array<std::char_traits<char>::int_type, 4>& utf8_bytes,
size_t& utf8_bytes_index,
size_t& utf8_bytes_filled)
{
utf8_bytes_index = 0;
if (JSON_HEDLEY_UNLIKELY(input.empty()))
{
utf8_bytes[0] = std::char_traits<char>::eof();
utf8_bytes_filled = 1;
}
else
{
// get the current character
const auto wc = input.get_character();
// UTF-32 to UTF-8 encoding
if (wc < 0x80)
{
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(wc);
utf8_bytes_filled = 1;
}
else if (wc <= 0x7FF)
{
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xC0u | ((static_cast<unsigned int>(wc) >> 6u) & 0x1Fu));
utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
utf8_bytes_filled = 2;
}
else if (wc <= 0xFFFF)
{
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xE0u | ((static_cast<unsigned int>(wc) >> 12u) & 0x0Fu));
utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | ((static_cast<unsigned int>(wc) >> 6u) & 0x3Fu));
utf8_bytes[2] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
utf8_bytes_filled = 3;
}
else if (wc <= 0x10FFFF)
{
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xF0u | ((static_cast<unsigned int>(wc) >> 18u) & 0x07u));
utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | ((static_cast<unsigned int>(wc) >> 12u) & 0x3Fu));
utf8_bytes[2] = static_cast<std::char_traits<char>::int_type>(0x80u | ((static_cast<unsigned int>(wc) >> 6u) & 0x3Fu));
utf8_bytes[3] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
utf8_bytes_filled = 4;
}
else
{
// unknown character
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(wc);
utf8_bytes_filled = 1;
}
}
}
};
template<typename BaseInputAdapter>
struct wide_string_input_helper<BaseInputAdapter, 2>
{
// UTF-16
static void fill_buffer(BaseInputAdapter& input,
std::array<std::char_traits<char>::int_type, 4>& utf8_bytes,
size_t& utf8_bytes_index,
size_t& utf8_bytes_filled)
{
utf8_bytes_index = 0;
if (JSON_HEDLEY_UNLIKELY(input.empty()))
{
utf8_bytes[0] = std::char_traits<char>::eof();
utf8_bytes_filled = 1;
}
else
{
// get the current character
const auto wc = input.get_character();
// UTF-16 to UTF-8 encoding
if (wc < 0x80)
{
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(wc);
utf8_bytes_filled = 1;
}
else if (wc <= 0x7FF)
{
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xC0u | ((static_cast<unsigned int>(wc) >> 6u)));
utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
utf8_bytes_filled = 2;
}
else if (0xD800 > wc || wc >= 0xE000)
{
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xE0u | ((static_cast<unsigned int>(wc) >> 12u)));
utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | ((static_cast<unsigned int>(wc) >> 6u) & 0x3Fu));
utf8_bytes[2] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
utf8_bytes_filled = 3;
}
else
{
if (JSON_HEDLEY_UNLIKELY(!input.empty()))
{
const auto wc2 = static_cast<unsigned int>(input.get_character());
const auto charcode = 0x10000u + (((static_cast<unsigned int>(wc) & 0x3FFu) << 10u) | (wc2 & 0x3FFu));
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xF0u | (charcode >> 18u));
utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | ((charcode >> 12u) & 0x3Fu));
utf8_bytes[2] = static_cast<std::char_traits<char>::int_type>(0x80u | ((charcode >> 6u) & 0x3Fu));
utf8_bytes[3] = static_cast<std::char_traits<char>::int_type>(0x80u | (charcode & 0x3Fu));
utf8_bytes_filled = 4;
}
else
{
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(wc);
utf8_bytes_filled = 1;
}
}
}
}
};
// Wraps another input adapter to convert wide character types into individual bytes.
template<typename BaseInputAdapter, typename WideCharType>
class wide_string_input_adapter
{
public:
using char_type = char;
wide_string_input_adapter(BaseInputAdapter base)
: base_adapter(base) {}
typename std::char_traits<char>::int_type get_character() noexcept
{
// check if buffer needs to be filled
if (utf8_bytes_index == utf8_bytes_filled)
{
fill_buffer<sizeof(WideCharType)>();
JSON_ASSERT(utf8_bytes_filled > 0);
JSON_ASSERT(utf8_bytes_index == 0);
}
// use buffer
JSON_ASSERT(utf8_bytes_filled > 0);
JSON_ASSERT(utf8_bytes_index < utf8_bytes_filled);
return utf8_bytes[utf8_bytes_index++];
}
private:
BaseInputAdapter base_adapter;
template<size_t T>
void fill_buffer()
{
wide_string_input_helper<BaseInputAdapter, T>::fill_buffer(base_adapter, utf8_bytes, utf8_bytes_index, utf8_bytes_filled);
}
/// a buffer for UTF-8 bytes
std::array<std::char_traits<char>::int_type, 4> utf8_bytes = {{0, 0, 0, 0}};
/// index to the utf8_codes array for the next valid byte
std::size_t utf8_bytes_index = 0;
/// number of valid bytes in the utf8_codes array
std::size_t utf8_bytes_filled = 0;
};
template<typename IteratorType, typename Enable = void>
struct iterator_input_adapter_factory
{
using iterator_type = IteratorType;
using char_type = typename std::iterator_traits<iterator_type>::value_type;
using adapter_type = iterator_input_adapter<iterator_type>;
static adapter_type create(IteratorType first, IteratorType last)
{
return adapter_type(std::move(first), std::move(last));
}
};
template<typename T>
struct is_iterator_of_multibyte
{
using value_type = typename std::iterator_traits<T>::value_type;
enum
{
value = sizeof(value_type) > 1
};
};
template<typename IteratorType>
struct iterator_input_adapter_factory<IteratorType, enable_if_t<is_iterator_of_multibyte<IteratorType>::value>>
{
using iterator_type = IteratorType;
using char_type = typename std::iterator_traits<iterator_type>::value_type;
using base_adapter_type = iterator_input_adapter<iterator_type>;
using adapter_type = wide_string_input_adapter<base_adapter_type, char_type>;
static adapter_type create(IteratorType first, IteratorType last)
{
return adapter_type(base_adapter_type(std::move(first), std::move(last)));
}
};
// General purpose iterator-based input
template<typename IteratorType>
typename iterator_input_adapter_factory<IteratorType>::adapter_type input_adapter(IteratorType first, IteratorType last)
{
using factory_type = iterator_input_adapter_factory<IteratorType>;
return factory_type::create(first, last);
}
// Convenience shorthand from container to iterator
// Enables ADL on begin(container) and end(container)
// Encloses the using declarations in namespace for not to leak them to outside scope
namespace container_input_adapter_factory_impl
{
using std::begin;
using std::end;
template<typename ContainerType, typename Enable = void>
struct container_input_adapter_factory {};
template<typename ContainerType>
struct container_input_adapter_factory< ContainerType,
void_t<decltype(begin(std::declval<ContainerType>()), end(std::declval<ContainerType>()))>>
{
using adapter_type = decltype(input_adapter(begin(std::declval<ContainerType>()), end(std::declval<ContainerType>())));
static adapter_type create(const ContainerType& container)
{
return input_adapter(begin(container), end(container));
}
};
} // namespace container_input_adapter_factory_impl
template<typename ContainerType>
typename container_input_adapter_factory_impl::container_input_adapter_factory<ContainerType>::adapter_type input_adapter(const ContainerType& container)
{
return container_input_adapter_factory_impl::container_input_adapter_factory<ContainerType>::create(container);
}
#ifndef JSON_NO_IO
// Special cases with fast paths
inline file_input_adapter input_adapter(std::FILE* file)
{
return file_input_adapter(file);
}
inline input_stream_adapter input_adapter(std::istream& stream)
{
return input_stream_adapter(stream);
}
inline input_stream_adapter input_adapter(std::istream&& stream)
{
return input_stream_adapter(stream);
}
#endif // JSON_NO_IO
using contiguous_bytes_input_adapter = decltype(input_adapter(std::declval<const char*>(), std::declval<const char*>()));
// Null-delimited strings, and the like.
template < typename CharT,
typename std::enable_if <
std::is_pointer<CharT>::value&&
!std::is_array<CharT>::value&&
std::is_integral<typename std::remove_pointer<CharT>::type>::value&&
sizeof(typename std::remove_pointer<CharT>::type) == 1,
int >::type = 0 >
contiguous_bytes_input_adapter input_adapter(CharT b)
{
auto length = std::strlen(reinterpret_cast<const char*>(b));
const auto* ptr = reinterpret_cast<const char*>(b);
return input_adapter(ptr, ptr + length);
}
template<typename T, std::size_t N>
auto input_adapter(T (&array)[N]) -> decltype(input_adapter(array, array + N)) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays)
{
return input_adapter(array, array + N);
}
// This class only handles inputs of input_buffer_adapter type.
// It's required so that expressions like {ptr, len} can be implicitly cast
// to the correct adapter.
class span_input_adapter
{
public:
template < typename CharT,
typename std::enable_if <
std::is_pointer<CharT>::value&&
std::is_integral<typename std::remove_pointer<CharT>::type>::value&&
sizeof(typename std::remove_pointer<CharT>::type) == 1,
int >::type = 0 >
span_input_adapter(CharT b, std::size_t l)
: ia(reinterpret_cast<const char*>(b), reinterpret_cast<const char*>(b) + l) {}
template<class IteratorType,
typename std::enable_if<
std::is_same<typename iterator_traits<IteratorType>::iterator_category, std::random_access_iterator_tag>::value,
int>::type = 0>
span_input_adapter(IteratorType first, IteratorType last)
: ia(input_adapter(first, last)) {}
contiguous_bytes_input_adapter&& get()
{
return std::move(ia); // NOLINT(hicpp-move-const-arg,performance-move-const-arg)
}
private:
contiguous_bytes_input_adapter ia;
};
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,727 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <cstddef>
#include <string> // string
#include <utility> // move
#include <vector> // vector
#include <nlohmann/detail/exceptions.hpp>
#include <nlohmann/detail/macro_scope.hpp>
#include <nlohmann/detail/string_concat.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
/*!
@brief SAX interface
This class describes the SAX interface used by @ref nlohmann::json::sax_parse.
Each function is called in different situations while the input is parsed. The
boolean return value informs the parser whether to continue processing the
input.
*/
template<typename BasicJsonType>
struct json_sax
{
using number_integer_t = typename BasicJsonType::number_integer_t;
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
using number_float_t = typename BasicJsonType::number_float_t;
using string_t = typename BasicJsonType::string_t;
using binary_t = typename BasicJsonType::binary_t;
/*!
@brief a null value was read
@return whether parsing should proceed
*/
virtual bool null() = 0;
/*!
@brief a boolean value was read
@param[in] val boolean value
@return whether parsing should proceed
*/
virtual bool boolean(bool val) = 0;
/*!
@brief an integer number was read
@param[in] val integer value
@return whether parsing should proceed
*/
virtual bool number_integer(number_integer_t val) = 0;
/*!
@brief an unsigned integer number was read
@param[in] val unsigned integer value
@return whether parsing should proceed
*/
virtual bool number_unsigned(number_unsigned_t val) = 0;
/*!
@brief a floating-point number was read
@param[in] val floating-point value
@param[in] s raw token value
@return whether parsing should proceed
*/
virtual bool number_float(number_float_t val, const string_t& s) = 0;
/*!
@brief a string value was read
@param[in] val string value
@return whether parsing should proceed
@note It is safe to move the passed string value.
*/
virtual bool string(string_t& val) = 0;
/*!
@brief a binary value was read
@param[in] val binary value
@return whether parsing should proceed
@note It is safe to move the passed binary value.
*/
virtual bool binary(binary_t& val) = 0;
/*!
@brief the beginning of an object was read
@param[in] elements number of object elements or -1 if unknown
@return whether parsing should proceed
@note binary formats may report the number of elements
*/
virtual bool start_object(std::size_t elements) = 0;
/*!
@brief an object key was read
@param[in] val object key
@return whether parsing should proceed
@note It is safe to move the passed string.
*/
virtual bool key(string_t& val) = 0;
/*!
@brief the end of an object was read
@return whether parsing should proceed
*/
virtual bool end_object() = 0;
/*!
@brief the beginning of an array was read
@param[in] elements number of array elements or -1 if unknown
@return whether parsing should proceed
@note binary formats may report the number of elements
*/
virtual bool start_array(std::size_t elements) = 0;
/*!
@brief the end of an array was read
@return whether parsing should proceed
*/
virtual bool end_array() = 0;
/*!
@brief a parse error occurred
@param[in] position the position in the input where the error occurs
@param[in] last_token the last read token
@param[in] ex an exception object describing the error
@return whether parsing should proceed (must return false)
*/
virtual bool parse_error(std::size_t position,
const std::string& last_token,
const detail::exception& ex) = 0;
json_sax() = default;
json_sax(const json_sax&) = default;
json_sax(json_sax&&) noexcept = default;
json_sax& operator=(const json_sax&) = default;
json_sax& operator=(json_sax&&) noexcept = default;
virtual ~json_sax() = default;
};
namespace detail
{
/*!
@brief SAX implementation to create a JSON value from SAX events
This class implements the @ref json_sax interface and processes the SAX events
to create a JSON value which makes it basically a DOM parser. The structure or
hierarchy of the JSON value is managed by the stack `ref_stack` which contains
a pointer to the respective array or object for each recursion depth.
After successful parsing, the value that is passed by reference to the
constructor contains the parsed value.
@tparam BasicJsonType the JSON type
*/
template<typename BasicJsonType>
class json_sax_dom_parser
{
public:
using number_integer_t = typename BasicJsonType::number_integer_t;
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
using number_float_t = typename BasicJsonType::number_float_t;
using string_t = typename BasicJsonType::string_t;
using binary_t = typename BasicJsonType::binary_t;
/*!
@param[in,out] r reference to a JSON value that is manipulated while
parsing
@param[in] allow_exceptions_ whether parse errors yield exceptions
*/
explicit json_sax_dom_parser(BasicJsonType& r, const bool allow_exceptions_ = true)
: root(r), allow_exceptions(allow_exceptions_)
{}
// make class move-only
json_sax_dom_parser(const json_sax_dom_parser&) = delete;
json_sax_dom_parser(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor)
json_sax_dom_parser& operator=(const json_sax_dom_parser&) = delete;
json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor)
~json_sax_dom_parser() = default;
bool null()
{
handle_value(nullptr);
return true;
}
bool boolean(bool val)
{
handle_value(val);
return true;
}
bool number_integer(number_integer_t val)
{
handle_value(val);
return true;
}
bool number_unsigned(number_unsigned_t val)
{
handle_value(val);
return true;
}
bool number_float(number_float_t val, const string_t& /*unused*/)
{
handle_value(val);
return true;
}
bool string(string_t& val)
{
handle_value(val);
return true;
}
bool binary(binary_t& val)
{
handle_value(std::move(val));
return true;
}
bool start_object(std::size_t len)
{
ref_stack.push_back(handle_value(BasicJsonType::value_t::object));
if (JSON_HEDLEY_UNLIKELY(len != static_cast<std::size_t>(-1) && len > ref_stack.back()->max_size()))
{
JSON_THROW(out_of_range::create(408, concat("excessive object size: ", std::to_string(len)), ref_stack.back()));
}
return true;
}
bool key(string_t& val)
{
JSON_ASSERT(!ref_stack.empty());
JSON_ASSERT(ref_stack.back()->is_object());
// add null at given key and store the reference for later
object_element = &(ref_stack.back()->m_data.m_value.object->operator[](val));
return true;
}
bool end_object()
{
JSON_ASSERT(!ref_stack.empty());
JSON_ASSERT(ref_stack.back()->is_object());
ref_stack.back()->set_parents();
ref_stack.pop_back();
return true;
}
bool start_array(std::size_t len)
{
ref_stack.push_back(handle_value(BasicJsonType::value_t::array));
if (JSON_HEDLEY_UNLIKELY(len != static_cast<std::size_t>(-1) && len > ref_stack.back()->max_size()))
{
JSON_THROW(out_of_range::create(408, concat("excessive array size: ", std::to_string(len)), ref_stack.back()));
}
return true;
}
bool end_array()
{
JSON_ASSERT(!ref_stack.empty());
JSON_ASSERT(ref_stack.back()->is_array());
ref_stack.back()->set_parents();
ref_stack.pop_back();
return true;
}
template<class Exception>
bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/,
const Exception& ex)
{
errored = true;
static_cast<void>(ex);
if (allow_exceptions)
{
JSON_THROW(ex);
}
return false;
}
constexpr bool is_errored() const
{
return errored;
}
private:
/*!
@invariant If the ref stack is empty, then the passed value will be the new
root.
@invariant If the ref stack contains a value, then it is an array or an
object to which we can add elements
*/
template<typename Value>
JSON_HEDLEY_RETURNS_NON_NULL
BasicJsonType* handle_value(Value&& v)
{
if (ref_stack.empty())
{
root = BasicJsonType(std::forward<Value>(v));
return &root;
}
JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object());
if (ref_stack.back()->is_array())
{
ref_stack.back()->m_data.m_value.array->emplace_back(std::forward<Value>(v));
return &(ref_stack.back()->m_data.m_value.array->back());
}
JSON_ASSERT(ref_stack.back()->is_object());
JSON_ASSERT(object_element);
*object_element = BasicJsonType(std::forward<Value>(v));
return object_element;
}
/// the parsed JSON value
BasicJsonType& root;
/// stack to model hierarchy of values
std::vector<BasicJsonType*> ref_stack {};
/// helper to hold the reference for the next object element
BasicJsonType* object_element = nullptr;
/// whether a syntax error occurred
bool errored = false;
/// whether to throw exceptions in case of errors
const bool allow_exceptions = true;
};
template<typename BasicJsonType>
class json_sax_dom_callback_parser
{
public:
using number_integer_t = typename BasicJsonType::number_integer_t;
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
using number_float_t = typename BasicJsonType::number_float_t;
using string_t = typename BasicJsonType::string_t;
using binary_t = typename BasicJsonType::binary_t;
using parser_callback_t = typename BasicJsonType::parser_callback_t;
using parse_event_t = typename BasicJsonType::parse_event_t;
json_sax_dom_callback_parser(BasicJsonType& r,
const parser_callback_t cb,
const bool allow_exceptions_ = true)
: root(r), callback(cb), allow_exceptions(allow_exceptions_)
{
keep_stack.push_back(true);
}
// make class move-only
json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = delete;
json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor)
json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_parser&) = delete;
json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor)
~json_sax_dom_callback_parser() = default;
bool null()
{
handle_value(nullptr);
return true;
}
bool boolean(bool val)
{
handle_value(val);
return true;
}
bool number_integer(number_integer_t val)
{
handle_value(val);
return true;
}
bool number_unsigned(number_unsigned_t val)
{
handle_value(val);
return true;
}
bool number_float(number_float_t val, const string_t& /*unused*/)
{
handle_value(val);
return true;
}
bool string(string_t& val)
{
handle_value(val);
return true;
}
bool binary(binary_t& val)
{
handle_value(std::move(val));
return true;
}
bool start_object(std::size_t len)
{
// check callback for object start
const bool keep = callback(static_cast<int>(ref_stack.size()), parse_event_t::object_start, discarded);
keep_stack.push_back(keep);
auto val = handle_value(BasicJsonType::value_t::object, true);
ref_stack.push_back(val.second);
// check object limit
if (ref_stack.back() && JSON_HEDLEY_UNLIKELY(len != static_cast<std::size_t>(-1) && len > ref_stack.back()->max_size()))
{
JSON_THROW(out_of_range::create(408, concat("excessive object size: ", std::to_string(len)), ref_stack.back()));
}
return true;
}
bool key(string_t& val)
{
BasicJsonType k = BasicJsonType(val);
// check callback for key
const bool keep = callback(static_cast<int>(ref_stack.size()), parse_event_t::key, k);
key_keep_stack.push_back(keep);
// add discarded value at given key and store the reference for later
if (keep && ref_stack.back())
{
object_element = &(ref_stack.back()->m_data.m_value.object->operator[](val) = discarded);
}
return true;
}
bool end_object()
{
if (ref_stack.back())
{
if (!callback(static_cast<int>(ref_stack.size()) - 1, parse_event_t::object_end, *ref_stack.back()))
{
// discard object
*ref_stack.back() = discarded;
}
else
{
ref_stack.back()->set_parents();
}
}
JSON_ASSERT(!ref_stack.empty());
JSON_ASSERT(!keep_stack.empty());
ref_stack.pop_back();
keep_stack.pop_back();
if (!ref_stack.empty() && ref_stack.back() && ref_stack.back()->is_structured())
{
// remove discarded value
for (auto it = ref_stack.back()->begin(); it != ref_stack.back()->end(); ++it)
{
if (it->is_discarded())
{
ref_stack.back()->erase(it);
break;
}
}
}
return true;
}
bool start_array(std::size_t len)
{
const bool keep = callback(static_cast<int>(ref_stack.size()), parse_event_t::array_start, discarded);
keep_stack.push_back(keep);
auto val = handle_value(BasicJsonType::value_t::array, true);
ref_stack.push_back(val.second);
// check array limit
if (ref_stack.back() && JSON_HEDLEY_UNLIKELY(len != static_cast<std::size_t>(-1) && len > ref_stack.back()->max_size()))
{
JSON_THROW(out_of_range::create(408, concat("excessive array size: ", std::to_string(len)), ref_stack.back()));
}
return true;
}
bool end_array()
{
bool keep = true;
if (ref_stack.back())
{
keep = callback(static_cast<int>(ref_stack.size()) - 1, parse_event_t::array_end, *ref_stack.back());
if (keep)
{
ref_stack.back()->set_parents();
}
else
{
// discard array
*ref_stack.back() = discarded;
}
}
JSON_ASSERT(!ref_stack.empty());
JSON_ASSERT(!keep_stack.empty());
ref_stack.pop_back();
keep_stack.pop_back();
// remove discarded value
if (!keep && !ref_stack.empty() && ref_stack.back()->is_array())
{
ref_stack.back()->m_data.m_value.array->pop_back();
}
return true;
}
template<class Exception>
bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/,
const Exception& ex)
{
errored = true;
static_cast<void>(ex);
if (allow_exceptions)
{
JSON_THROW(ex);
}
return false;
}
constexpr bool is_errored() const
{
return errored;
}
private:
/*!
@param[in] v value to add to the JSON value we build during parsing
@param[in] skip_callback whether we should skip calling the callback
function; this is required after start_array() and
start_object() SAX events, because otherwise we would call the
callback function with an empty array or object, respectively.
@invariant If the ref stack is empty, then the passed value will be the new
root.
@invariant If the ref stack contains a value, then it is an array or an
object to which we can add elements
@return pair of boolean (whether value should be kept) and pointer (to the
passed value in the ref_stack hierarchy; nullptr if not kept)
*/
template<typename Value>
std::pair<bool, BasicJsonType*> handle_value(Value&& v, const bool skip_callback = false)
{
JSON_ASSERT(!keep_stack.empty());
// do not handle this value if we know it would be added to a discarded
// container
if (!keep_stack.back())
{
return {false, nullptr};
}
// create value
auto value = BasicJsonType(std::forward<Value>(v));
// check callback
const bool keep = skip_callback || callback(static_cast<int>(ref_stack.size()), parse_event_t::value, value);
// do not handle this value if we just learnt it shall be discarded
if (!keep)
{
return {false, nullptr};
}
if (ref_stack.empty())
{
root = std::move(value);
return {true, & root};
}
// skip this value if we already decided to skip the parent
// (https://github.com/nlohmann/json/issues/971#issuecomment-413678360)
if (!ref_stack.back())
{
return {false, nullptr};
}
// we now only expect arrays and objects
JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object());
// array
if (ref_stack.back()->is_array())
{
ref_stack.back()->m_data.m_value.array->emplace_back(std::move(value));
return {true, & (ref_stack.back()->m_data.m_value.array->back())};
}
// object
JSON_ASSERT(ref_stack.back()->is_object());
// check if we should store an element for the current key
JSON_ASSERT(!key_keep_stack.empty());
const bool store_element = key_keep_stack.back();
key_keep_stack.pop_back();
if (!store_element)
{
return {false, nullptr};
}
JSON_ASSERT(object_element);
*object_element = std::move(value);
return {true, object_element};
}
/// the parsed JSON value
BasicJsonType& root;
/// stack to model hierarchy of values
std::vector<BasicJsonType*> ref_stack {};
/// stack to manage which values to keep
std::vector<bool> keep_stack {}; // NOLINT(readability-redundant-member-init)
/// stack to manage which object keys to keep
std::vector<bool> key_keep_stack {}; // NOLINT(readability-redundant-member-init)
/// helper to hold the reference for the next object element
BasicJsonType* object_element = nullptr;
/// whether a syntax error occurred
bool errored = false;
/// callback function
const parser_callback_t callback = nullptr;
/// whether to throw exceptions in case of errors
const bool allow_exceptions = true;
/// a discarded value for the callback
BasicJsonType discarded = BasicJsonType::value_t::discarded;
};
template<typename BasicJsonType>
class json_sax_acceptor
{
public:
using number_integer_t = typename BasicJsonType::number_integer_t;
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
using number_float_t = typename BasicJsonType::number_float_t;
using string_t = typename BasicJsonType::string_t;
using binary_t = typename BasicJsonType::binary_t;
bool null()
{
return true;
}
bool boolean(bool /*unused*/)
{
return true;
}
bool number_integer(number_integer_t /*unused*/)
{
return true;
}
bool number_unsigned(number_unsigned_t /*unused*/)
{
return true;
}
bool number_float(number_float_t /*unused*/, const string_t& /*unused*/)
{
return true;
}
bool string(string_t& /*unused*/)
{
return true;
}
bool binary(binary_t& /*unused*/)
{
return true;
}
bool start_object(std::size_t /*unused*/ = static_cast<std::size_t>(-1))
{
return true;
}
bool key(string_t& /*unused*/)
{
return true;
}
bool end_object()
{
return true;
}
bool start_array(std::size_t /*unused*/ = static_cast<std::size_t>(-1))
{
return true;
}
bool end_array()
{
return true;
}
bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const detail::exception& /*unused*/)
{
return false;
}
};
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END

File diff suppressed because it is too large Load Diff

View File

@@ -1,519 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <cmath> // isfinite
#include <cstdint> // uint8_t
#include <functional> // function
#include <string> // string
#include <utility> // move
#include <vector> // vector
#include <nlohmann/detail/exceptions.hpp>
#include <nlohmann/detail/input/input_adapters.hpp>
#include <nlohmann/detail/input/json_sax.hpp>
#include <nlohmann/detail/input/lexer.hpp>
#include <nlohmann/detail/macro_scope.hpp>
#include <nlohmann/detail/meta/is_sax.hpp>
#include <nlohmann/detail/string_concat.hpp>
#include <nlohmann/detail/value_t.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
////////////
// parser //
////////////
enum class parse_event_t : std::uint8_t
{
/// the parser read `{` and started to process a JSON object
object_start,
/// the parser read `}` and finished processing a JSON object
object_end,
/// the parser read `[` and started to process a JSON array
array_start,
/// the parser read `]` and finished processing a JSON array
array_end,
/// the parser read a key of a value in an object
key,
/// the parser finished reading a JSON value
value
};
template<typename BasicJsonType>
using parser_callback_t =
std::function<bool(int /*depth*/, parse_event_t /*event*/, BasicJsonType& /*parsed*/)>;
/*!
@brief syntax analysis
This class implements a recursive descent parser.
*/
template<typename BasicJsonType, typename InputAdapterType>
class parser
{
using number_integer_t = typename BasicJsonType::number_integer_t;
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
using number_float_t = typename BasicJsonType::number_float_t;
using string_t = typename BasicJsonType::string_t;
using lexer_t = lexer<BasicJsonType, InputAdapterType>;
using token_type = typename lexer_t::token_type;
public:
/// a parser reading from an input adapter
explicit parser(InputAdapterType&& adapter,
const parser_callback_t<BasicJsonType> cb = nullptr,
const bool allow_exceptions_ = true,
const bool skip_comments = false)
: callback(cb)
, m_lexer(std::move(adapter), skip_comments)
, allow_exceptions(allow_exceptions_)
{
// read first token
get_token();
}
/*!
@brief public parser interface
@param[in] strict whether to expect the last token to be EOF
@param[in,out] result parsed JSON value
@throw parse_error.101 in case of an unexpected token
@throw parse_error.102 if to_unicode fails or surrogate error
@throw parse_error.103 if to_unicode fails
*/
void parse(const bool strict, BasicJsonType& result)
{
if (callback)
{
json_sax_dom_callback_parser<BasicJsonType> sdp(result, callback, allow_exceptions);
sax_parse_internal(&sdp);
// in strict mode, input must be completely read
if (strict && (get_token() != token_type::end_of_input))
{
sdp.parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
parse_error::create(101, m_lexer.get_position(),
exception_message(token_type::end_of_input, "value"), nullptr));
}
// in case of an error, return discarded value
if (sdp.is_errored())
{
result = value_t::discarded;
return;
}
// set top-level value to null if it was discarded by the callback
// function
if (result.is_discarded())
{
result = nullptr;
}
}
else
{
json_sax_dom_parser<BasicJsonType> sdp(result, allow_exceptions);
sax_parse_internal(&sdp);
// in strict mode, input must be completely read
if (strict && (get_token() != token_type::end_of_input))
{
sdp.parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::end_of_input, "value"), nullptr));
}
// in case of an error, return discarded value
if (sdp.is_errored())
{
result = value_t::discarded;
return;
}
}
result.assert_invariant();
}
/*!
@brief public accept interface
@param[in] strict whether to expect the last token to be EOF
@return whether the input is a proper JSON text
*/
bool accept(const bool strict = true)
{
json_sax_acceptor<BasicJsonType> sax_acceptor;
return sax_parse(&sax_acceptor, strict);
}
template<typename SAX>
JSON_HEDLEY_NON_NULL(2)
bool sax_parse(SAX* sax, const bool strict = true)
{
(void)detail::is_sax_static_asserts<SAX, BasicJsonType> {};
const bool result = sax_parse_internal(sax);
// strict mode: next byte must be EOF
if (result && strict && (get_token() != token_type::end_of_input))
{
return sax->parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::end_of_input, "value"), nullptr));
}
return result;
}
private:
template<typename SAX>
JSON_HEDLEY_NON_NULL(2)
bool sax_parse_internal(SAX* sax)
{
// stack to remember the hierarchy of structured values we are parsing
// true = array; false = object
std::vector<bool> states;
// value to avoid a goto (see comment where set to true)
bool skip_to_state_evaluation = false;
while (true)
{
if (!skip_to_state_evaluation)
{
// invariant: get_token() was called before each iteration
switch (last_token)
{
case token_type::begin_object:
{
if (JSON_HEDLEY_UNLIKELY(!sax->start_object(static_cast<std::size_t>(-1))))
{
return false;
}
// closing } -> we are done
if (get_token() == token_type::end_object)
{
if (JSON_HEDLEY_UNLIKELY(!sax->end_object()))
{
return false;
}
break;
}
// parse key
if (JSON_HEDLEY_UNLIKELY(last_token != token_type::value_string))
{
return sax->parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::value_string, "object key"), nullptr));
}
if (JSON_HEDLEY_UNLIKELY(!sax->key(m_lexer.get_string())))
{
return false;
}
// parse separator (:)
if (JSON_HEDLEY_UNLIKELY(get_token() != token_type::name_separator))
{
return sax->parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::name_separator, "object separator"), nullptr));
}
// remember we are now inside an object
states.push_back(false);
// parse values
get_token();
continue;
}
case token_type::begin_array:
{
if (JSON_HEDLEY_UNLIKELY(!sax->start_array(static_cast<std::size_t>(-1))))
{
return false;
}
// closing ] -> we are done
if (get_token() == token_type::end_array)
{
if (JSON_HEDLEY_UNLIKELY(!sax->end_array()))
{
return false;
}
break;
}
// remember we are now inside an array
states.push_back(true);
// parse values (no need to call get_token)
continue;
}
case token_type::value_float:
{
const auto res = m_lexer.get_number_float();
if (JSON_HEDLEY_UNLIKELY(!std::isfinite(res)))
{
return sax->parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
out_of_range::create(406, concat("number overflow parsing '", m_lexer.get_token_string(), '\''), nullptr));
}
if (JSON_HEDLEY_UNLIKELY(!sax->number_float(res, m_lexer.get_string())))
{
return false;
}
break;
}
case token_type::literal_false:
{
if (JSON_HEDLEY_UNLIKELY(!sax->boolean(false)))
{
return false;
}
break;
}
case token_type::literal_null:
{
if (JSON_HEDLEY_UNLIKELY(!sax->null()))
{
return false;
}
break;
}
case token_type::literal_true:
{
if (JSON_HEDLEY_UNLIKELY(!sax->boolean(true)))
{
return false;
}
break;
}
case token_type::value_integer:
{
if (JSON_HEDLEY_UNLIKELY(!sax->number_integer(m_lexer.get_number_integer())))
{
return false;
}
break;
}
case token_type::value_string:
{
if (JSON_HEDLEY_UNLIKELY(!sax->string(m_lexer.get_string())))
{
return false;
}
break;
}
case token_type::value_unsigned:
{
if (JSON_HEDLEY_UNLIKELY(!sax->number_unsigned(m_lexer.get_number_unsigned())))
{
return false;
}
break;
}
case token_type::parse_error:
{
// using "uninitialized" to avoid "expected" message
return sax->parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::uninitialized, "value"), nullptr));
}
case token_type::end_of_input:
{
if (JSON_HEDLEY_UNLIKELY(m_lexer.get_position().chars_read_total == 1))
{
return sax->parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
parse_error::create(101, m_lexer.get_position(),
"attempting to parse an empty input; check that your input string or stream contains the expected JSON", nullptr));
}
return sax->parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::literal_or_value, "value"), nullptr));
}
case token_type::uninitialized:
case token_type::end_array:
case token_type::end_object:
case token_type::name_separator:
case token_type::value_separator:
case token_type::literal_or_value:
default: // the last token was unexpected
{
return sax->parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::literal_or_value, "value"), nullptr));
}
}
}
else
{
skip_to_state_evaluation = false;
}
// we reached this line after we successfully parsed a value
if (states.empty())
{
// empty stack: we reached the end of the hierarchy: done
return true;
}
if (states.back()) // array
{
// comma -> next value
if (get_token() == token_type::value_separator)
{
// parse a new value
get_token();
continue;
}
// closing ]
if (JSON_HEDLEY_LIKELY(last_token == token_type::end_array))
{
if (JSON_HEDLEY_UNLIKELY(!sax->end_array()))
{
return false;
}
// We are done with this array. Before we can parse a
// new value, we need to evaluate the new state first.
// By setting skip_to_state_evaluation to false, we
// are effectively jumping to the beginning of this if.
JSON_ASSERT(!states.empty());
states.pop_back();
skip_to_state_evaluation = true;
continue;
}
return sax->parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::end_array, "array"), nullptr));
}
// states.back() is false -> object
// comma -> next value
if (get_token() == token_type::value_separator)
{
// parse key
if (JSON_HEDLEY_UNLIKELY(get_token() != token_type::value_string))
{
return sax->parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::value_string, "object key"), nullptr));
}
if (JSON_HEDLEY_UNLIKELY(!sax->key(m_lexer.get_string())))
{
return false;
}
// parse separator (:)
if (JSON_HEDLEY_UNLIKELY(get_token() != token_type::name_separator))
{
return sax->parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::name_separator, "object separator"), nullptr));
}
// parse values
get_token();
continue;
}
// closing }
if (JSON_HEDLEY_LIKELY(last_token == token_type::end_object))
{
if (JSON_HEDLEY_UNLIKELY(!sax->end_object()))
{
return false;
}
// We are done with this object. Before we can parse a
// new value, we need to evaluate the new state first.
// By setting skip_to_state_evaluation to false, we
// are effectively jumping to the beginning of this if.
JSON_ASSERT(!states.empty());
states.pop_back();
skip_to_state_evaluation = true;
continue;
}
return sax->parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::end_object, "object"), nullptr));
}
}
/// get next token from lexer
token_type get_token()
{
return last_token = m_lexer.scan();
}
std::string exception_message(const token_type expected, const std::string& context)
{
std::string error_msg = "syntax error ";
if (!context.empty())
{
error_msg += concat("while parsing ", context, ' ');
}
error_msg += "- ";
if (last_token == token_type::parse_error)
{
error_msg += concat(m_lexer.get_error_message(), "; last read: '",
m_lexer.get_token_string(), '\'');
}
else
{
error_msg += concat("unexpected ", lexer_t::token_type_name(last_token));
}
if (expected != token_type::uninitialized)
{
error_msg += concat("; expected ", lexer_t::token_type_name(expected));
}
return error_msg;
}
private:
/// callback function
const parser_callback_t<BasicJsonType> callback = nullptr;
/// the type of the last read token
token_type last_token = token_type::uninitialized;
/// the lexer
lexer_t m_lexer;
/// whether to throw exceptions in case of errors
const bool allow_exceptions = true;
};
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,37 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <cstddef> // size_t
#include <nlohmann/detail/abi_macros.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
/// struct to capture the start position of the current token
struct position_t
{
/// the total number of characters read
std::size_t chars_read_total = 0;
/// the number of characters read in the current line
std::size_t chars_read_current_line = 0;
/// the number of lines read
std::size_t lines_read = 0;
/// conversion to size_t to preserve SAX interface
constexpr operator size_t() const
{
return chars_read_total;
}
};
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,35 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <nlohmann/detail/abi_macros.hpp>
#include <nlohmann/detail/iterators/primitive_iterator.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
/*!
@brief an iterator value
@note This structure could easily be a union, but MSVC currently does not allow
unions members with complex constructors, see https://github.com/nlohmann/json/pull/105.
*/
template<typename BasicJsonType> struct internal_iterator
{
/// iterator for JSON objects
typename BasicJsonType::object_t::iterator object_iterator {};
/// iterator for JSON arrays
typename BasicJsonType::array_t::iterator array_iterator {};
/// generic iterator for all other types
primitive_iterator_t primitive_iterator {};
};
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,751 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <iterator> // iterator, random_access_iterator_tag, bidirectional_iterator_tag, advance, next
#include <type_traits> // conditional, is_const, remove_const
#include <nlohmann/detail/exceptions.hpp>
#include <nlohmann/detail/iterators/internal_iterator.hpp>
#include <nlohmann/detail/iterators/primitive_iterator.hpp>
#include <nlohmann/detail/macro_scope.hpp>
#include <nlohmann/detail/meta/cpp_future.hpp>
#include <nlohmann/detail/meta/type_traits.hpp>
#include <nlohmann/detail/value_t.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
// forward declare, to be able to friend it later on
template<typename IteratorType> class iteration_proxy;
template<typename IteratorType> class iteration_proxy_value;
/*!
@brief a template for a bidirectional iterator for the @ref basic_json class
This class implements a both iterators (iterator and const_iterator) for the
@ref basic_json class.
@note An iterator is called *initialized* when a pointer to a JSON value has
been set (e.g., by a constructor or a copy assignment). If the iterator is
default-constructed, it is *uninitialized* and most methods are undefined.
**The library uses assertions to detect calls on uninitialized iterators.**
@requirement The class satisfies the following concept requirements:
-
[BidirectionalIterator](https://en.cppreference.com/w/cpp/named_req/BidirectionalIterator):
The iterator that can be moved can be moved in both directions (i.e.
incremented and decremented).
@since version 1.0.0, simplified in version 2.0.9, change to bidirectional
iterators in version 3.0.0 (see https://github.com/nlohmann/json/issues/593)
*/
template<typename BasicJsonType>
class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-special-member-functions)
{
/// the iterator with BasicJsonType of different const-ness
using other_iter_impl = iter_impl<typename std::conditional<std::is_const<BasicJsonType>::value, typename std::remove_const<BasicJsonType>::type, const BasicJsonType>::type>;
/// allow basic_json to access private members
friend other_iter_impl;
friend BasicJsonType;
friend iteration_proxy<iter_impl>;
friend iteration_proxy_value<iter_impl>;
using object_t = typename BasicJsonType::object_t;
using array_t = typename BasicJsonType::array_t;
// make sure BasicJsonType is basic_json or const basic_json
static_assert(is_basic_json<typename std::remove_const<BasicJsonType>::type>::value,
"iter_impl only accepts (const) basic_json");
// superficial check for the LegacyBidirectionalIterator named requirement
static_assert(std::is_base_of<std::bidirectional_iterator_tag, std::bidirectional_iterator_tag>::value
&& std::is_base_of<std::bidirectional_iterator_tag, typename std::iterator_traits<typename array_t::iterator>::iterator_category>::value,
"basic_json iterator assumes array and object type iterators satisfy the LegacyBidirectionalIterator named requirement.");
public:
/// The std::iterator class template (used as a base class to provide typedefs) is deprecated in C++17.
/// The C++ Standard has never required user-defined iterators to derive from std::iterator.
/// A user-defined iterator should provide publicly accessible typedefs named
/// iterator_category, value_type, difference_type, pointer, and reference.
/// Note that value_type is required to be non-const, even for constant iterators.
using iterator_category = std::bidirectional_iterator_tag;
/// the type of the values when the iterator is dereferenced
using value_type = typename BasicJsonType::value_type;
/// a type to represent differences between iterators
using difference_type = typename BasicJsonType::difference_type;
/// defines a pointer to the type iterated over (value_type)
using pointer = typename std::conditional<std::is_const<BasicJsonType>::value,
typename BasicJsonType::const_pointer,
typename BasicJsonType::pointer>::type;
/// defines a reference to the type iterated over (value_type)
using reference =
typename std::conditional<std::is_const<BasicJsonType>::value,
typename BasicJsonType::const_reference,
typename BasicJsonType::reference>::type;
iter_impl() = default;
~iter_impl() = default;
iter_impl(iter_impl&&) noexcept = default;
iter_impl& operator=(iter_impl&&) noexcept = default;
/*!
@brief constructor for a given JSON instance
@param[in] object pointer to a JSON object for this iterator
@pre object != nullptr
@post The iterator is initialized; i.e. `m_object != nullptr`.
*/
explicit iter_impl(pointer object) noexcept : m_object(object)
{
JSON_ASSERT(m_object != nullptr);
switch (m_object->m_data.m_type)
{
case value_t::object:
{
m_it.object_iterator = typename object_t::iterator();
break;
}
case value_t::array:
{
m_it.array_iterator = typename array_t::iterator();
break;
}
case value_t::null:
case value_t::string:
case value_t::boolean:
case value_t::number_integer:
case value_t::number_unsigned:
case value_t::number_float:
case value_t::binary:
case value_t::discarded:
default:
{
m_it.primitive_iterator = primitive_iterator_t();
break;
}
}
}
/*!
@note The conventional copy constructor and copy assignment are implicitly
defined. Combined with the following converting constructor and
assignment, they support: (1) copy from iterator to iterator, (2)
copy from const iterator to const iterator, and (3) conversion from
iterator to const iterator. However conversion from const iterator
to iterator is not defined.
*/
/*!
@brief const copy constructor
@param[in] other const iterator to copy from
@note This copy constructor had to be defined explicitly to circumvent a bug
occurring on msvc v19.0 compiler (VS 2015) debug build. For more
information refer to: https://github.com/nlohmann/json/issues/1608
*/
iter_impl(const iter_impl<const BasicJsonType>& other) noexcept
: m_object(other.m_object), m_it(other.m_it)
{}
/*!
@brief converting assignment
@param[in] other const iterator to copy from
@return const/non-const iterator
@note It is not checked whether @a other is initialized.
*/
iter_impl& operator=(const iter_impl<const BasicJsonType>& other) noexcept
{
if (&other != this)
{
m_object = other.m_object;
m_it = other.m_it;
}
return *this;
}
/*!
@brief converting constructor
@param[in] other non-const iterator to copy from
@note It is not checked whether @a other is initialized.
*/
iter_impl(const iter_impl<typename std::remove_const<BasicJsonType>::type>& other) noexcept
: m_object(other.m_object), m_it(other.m_it)
{}
/*!
@brief converting assignment
@param[in] other non-const iterator to copy from
@return const/non-const iterator
@note It is not checked whether @a other is initialized.
*/
iter_impl& operator=(const iter_impl<typename std::remove_const<BasicJsonType>::type>& other) noexcept // NOLINT(cert-oop54-cpp)
{
m_object = other.m_object;
m_it = other.m_it;
return *this;
}
JSON_PRIVATE_UNLESS_TESTED:
/*!
@brief set the iterator to the first value
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
void set_begin() noexcept
{
JSON_ASSERT(m_object != nullptr);
switch (m_object->m_data.m_type)
{
case value_t::object:
{
m_it.object_iterator = m_object->m_data.m_value.object->begin();
break;
}
case value_t::array:
{
m_it.array_iterator = m_object->m_data.m_value.array->begin();
break;
}
case value_t::null:
{
// set to end so begin()==end() is true: null is empty
m_it.primitive_iterator.set_end();
break;
}
case value_t::string:
case value_t::boolean:
case value_t::number_integer:
case value_t::number_unsigned:
case value_t::number_float:
case value_t::binary:
case value_t::discarded:
default:
{
m_it.primitive_iterator.set_begin();
break;
}
}
}
/*!
@brief set the iterator past the last value
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
void set_end() noexcept
{
JSON_ASSERT(m_object != nullptr);
switch (m_object->m_data.m_type)
{
case value_t::object:
{
m_it.object_iterator = m_object->m_data.m_value.object->end();
break;
}
case value_t::array:
{
m_it.array_iterator = m_object->m_data.m_value.array->end();
break;
}
case value_t::null:
case value_t::string:
case value_t::boolean:
case value_t::number_integer:
case value_t::number_unsigned:
case value_t::number_float:
case value_t::binary:
case value_t::discarded:
default:
{
m_it.primitive_iterator.set_end();
break;
}
}
}
public:
/*!
@brief return a reference to the value pointed to by the iterator
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
reference operator*() const
{
JSON_ASSERT(m_object != nullptr);
switch (m_object->m_data.m_type)
{
case value_t::object:
{
JSON_ASSERT(m_it.object_iterator != m_object->m_data.m_value.object->end());
return m_it.object_iterator->second;
}
case value_t::array:
{
JSON_ASSERT(m_it.array_iterator != m_object->m_data.m_value.array->end());
return *m_it.array_iterator;
}
case value_t::null:
JSON_THROW(invalid_iterator::create(214, "cannot get value", m_object));
case value_t::string:
case value_t::boolean:
case value_t::number_integer:
case value_t::number_unsigned:
case value_t::number_float:
case value_t::binary:
case value_t::discarded:
default:
{
if (JSON_HEDLEY_LIKELY(m_it.primitive_iterator.is_begin()))
{
return *m_object;
}
JSON_THROW(invalid_iterator::create(214, "cannot get value", m_object));
}
}
}
/*!
@brief dereference the iterator
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
pointer operator->() const
{
JSON_ASSERT(m_object != nullptr);
switch (m_object->m_data.m_type)
{
case value_t::object:
{
JSON_ASSERT(m_it.object_iterator != m_object->m_data.m_value.object->end());
return &(m_it.object_iterator->second);
}
case value_t::array:
{
JSON_ASSERT(m_it.array_iterator != m_object->m_data.m_value.array->end());
return &*m_it.array_iterator;
}
case value_t::null:
case value_t::string:
case value_t::boolean:
case value_t::number_integer:
case value_t::number_unsigned:
case value_t::number_float:
case value_t::binary:
case value_t::discarded:
default:
{
if (JSON_HEDLEY_LIKELY(m_it.primitive_iterator.is_begin()))
{
return m_object;
}
JSON_THROW(invalid_iterator::create(214, "cannot get value", m_object));
}
}
}
/*!
@brief post-increment (it++)
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
iter_impl operator++(int)& // NOLINT(cert-dcl21-cpp)
{
auto result = *this;
++(*this);
return result;
}
/*!
@brief pre-increment (++it)
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
iter_impl& operator++()
{
JSON_ASSERT(m_object != nullptr);
switch (m_object->m_data.m_type)
{
case value_t::object:
{
std::advance(m_it.object_iterator, 1);
break;
}
case value_t::array:
{
std::advance(m_it.array_iterator, 1);
break;
}
case value_t::null:
case value_t::string:
case value_t::boolean:
case value_t::number_integer:
case value_t::number_unsigned:
case value_t::number_float:
case value_t::binary:
case value_t::discarded:
default:
{
++m_it.primitive_iterator;
break;
}
}
return *this;
}
/*!
@brief post-decrement (it--)
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
iter_impl operator--(int)& // NOLINT(cert-dcl21-cpp)
{
auto result = *this;
--(*this);
return result;
}
/*!
@brief pre-decrement (--it)
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
iter_impl& operator--()
{
JSON_ASSERT(m_object != nullptr);
switch (m_object->m_data.m_type)
{
case value_t::object:
{
std::advance(m_it.object_iterator, -1);
break;
}
case value_t::array:
{
std::advance(m_it.array_iterator, -1);
break;
}
case value_t::null:
case value_t::string:
case value_t::boolean:
case value_t::number_integer:
case value_t::number_unsigned:
case value_t::number_float:
case value_t::binary:
case value_t::discarded:
default:
{
--m_it.primitive_iterator;
break;
}
}
return *this;
}
/*!
@brief comparison: equal
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
template < typename IterImpl, detail::enable_if_t < (std::is_same<IterImpl, iter_impl>::value || std::is_same<IterImpl, other_iter_impl>::value), std::nullptr_t > = nullptr >
bool operator==(const IterImpl& other) const
{
// if objects are not the same, the comparison is undefined
if (JSON_HEDLEY_UNLIKELY(m_object != other.m_object))
{
JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers", m_object));
}
JSON_ASSERT(m_object != nullptr);
switch (m_object->m_data.m_type)
{
case value_t::object:
return (m_it.object_iterator == other.m_it.object_iterator);
case value_t::array:
return (m_it.array_iterator == other.m_it.array_iterator);
case value_t::null:
case value_t::string:
case value_t::boolean:
case value_t::number_integer:
case value_t::number_unsigned:
case value_t::number_float:
case value_t::binary:
case value_t::discarded:
default:
return (m_it.primitive_iterator == other.m_it.primitive_iterator);
}
}
/*!
@brief comparison: not equal
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
template < typename IterImpl, detail::enable_if_t < (std::is_same<IterImpl, iter_impl>::value || std::is_same<IterImpl, other_iter_impl>::value), std::nullptr_t > = nullptr >
bool operator!=(const IterImpl& other) const
{
return !operator==(other);
}
/*!
@brief comparison: smaller
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
bool operator<(const iter_impl& other) const
{
// if objects are not the same, the comparison is undefined
if (JSON_HEDLEY_UNLIKELY(m_object != other.m_object))
{
JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers", m_object));
}
JSON_ASSERT(m_object != nullptr);
switch (m_object->m_data.m_type)
{
case value_t::object:
JSON_THROW(invalid_iterator::create(213, "cannot compare order of object iterators", m_object));
case value_t::array:
return (m_it.array_iterator < other.m_it.array_iterator);
case value_t::null:
case value_t::string:
case value_t::boolean:
case value_t::number_integer:
case value_t::number_unsigned:
case value_t::number_float:
case value_t::binary:
case value_t::discarded:
default:
return (m_it.primitive_iterator < other.m_it.primitive_iterator);
}
}
/*!
@brief comparison: less than or equal
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
bool operator<=(const iter_impl& other) const
{
return !other.operator < (*this);
}
/*!
@brief comparison: greater than
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
bool operator>(const iter_impl& other) const
{
return !operator<=(other);
}
/*!
@brief comparison: greater than or equal
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
bool operator>=(const iter_impl& other) const
{
return !operator<(other);
}
/*!
@brief add to iterator
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
iter_impl& operator+=(difference_type i)
{
JSON_ASSERT(m_object != nullptr);
switch (m_object->m_data.m_type)
{
case value_t::object:
JSON_THROW(invalid_iterator::create(209, "cannot use offsets with object iterators", m_object));
case value_t::array:
{
std::advance(m_it.array_iterator, i);
break;
}
case value_t::null:
case value_t::string:
case value_t::boolean:
case value_t::number_integer:
case value_t::number_unsigned:
case value_t::number_float:
case value_t::binary:
case value_t::discarded:
default:
{
m_it.primitive_iterator += i;
break;
}
}
return *this;
}
/*!
@brief subtract from iterator
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
iter_impl& operator-=(difference_type i)
{
return operator+=(-i);
}
/*!
@brief add to iterator
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
iter_impl operator+(difference_type i) const
{
auto result = *this;
result += i;
return result;
}
/*!
@brief addition of distance and iterator
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
friend iter_impl operator+(difference_type i, const iter_impl& it)
{
auto result = it;
result += i;
return result;
}
/*!
@brief subtract from iterator
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
iter_impl operator-(difference_type i) const
{
auto result = *this;
result -= i;
return result;
}
/*!
@brief return difference
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
difference_type operator-(const iter_impl& other) const
{
JSON_ASSERT(m_object != nullptr);
switch (m_object->m_data.m_type)
{
case value_t::object:
JSON_THROW(invalid_iterator::create(209, "cannot use offsets with object iterators", m_object));
case value_t::array:
return m_it.array_iterator - other.m_it.array_iterator;
case value_t::null:
case value_t::string:
case value_t::boolean:
case value_t::number_integer:
case value_t::number_unsigned:
case value_t::number_float:
case value_t::binary:
case value_t::discarded:
default:
return m_it.primitive_iterator - other.m_it.primitive_iterator;
}
}
/*!
@brief access to successor
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
reference operator[](difference_type n) const
{
JSON_ASSERT(m_object != nullptr);
switch (m_object->m_data.m_type)
{
case value_t::object:
JSON_THROW(invalid_iterator::create(208, "cannot use operator[] for object iterators", m_object));
case value_t::array:
return *std::next(m_it.array_iterator, n);
case value_t::null:
JSON_THROW(invalid_iterator::create(214, "cannot get value", m_object));
case value_t::string:
case value_t::boolean:
case value_t::number_integer:
case value_t::number_unsigned:
case value_t::number_float:
case value_t::binary:
case value_t::discarded:
default:
{
if (JSON_HEDLEY_LIKELY(m_it.primitive_iterator.get_value() == -n))
{
return *m_object;
}
JSON_THROW(invalid_iterator::create(214, "cannot get value", m_object));
}
}
}
/*!
@brief return the key of an object iterator
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
const typename object_t::key_type& key() const
{
JSON_ASSERT(m_object != nullptr);
if (JSON_HEDLEY_LIKELY(m_object->is_object()))
{
return m_it.object_iterator->first;
}
JSON_THROW(invalid_iterator::create(207, "cannot use key() for non-object iterators", m_object));
}
/*!
@brief return the value of an iterator
@pre The iterator is initialized; i.e. `m_object != nullptr`.
*/
reference value() const
{
return operator*();
}
JSON_PRIVATE_UNLESS_TESTED:
/// associated JSON instance
pointer m_object = nullptr;
/// the actual iterator of the associated instance
internal_iterator<typename std::remove_const<BasicJsonType>::type> m_it {};
};
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,242 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <cstddef> // size_t
#include <iterator> // input_iterator_tag
#include <string> // string, to_string
#include <tuple> // tuple_size, get, tuple_element
#include <utility> // move
#if JSON_HAS_RANGES
#include <ranges> // enable_borrowed_range
#endif
#include <nlohmann/detail/abi_macros.hpp>
#include <nlohmann/detail/meta/type_traits.hpp>
#include <nlohmann/detail/value_t.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
template<typename string_type>
void int_to_string( string_type& target, std::size_t value )
{
// For ADL
using std::to_string;
target = to_string(value);
}
template<typename IteratorType> class iteration_proxy_value
{
public:
using difference_type = std::ptrdiff_t;
using value_type = iteration_proxy_value;
using pointer = value_type *;
using reference = value_type &;
using iterator_category = std::input_iterator_tag;
using string_type = typename std::remove_cv< typename std::remove_reference<decltype( std::declval<IteratorType>().key() ) >::type >::type;
private:
/// the iterator
IteratorType anchor{};
/// an index for arrays (used to create key names)
std::size_t array_index = 0;
/// last stringified array index
mutable std::size_t array_index_last = 0;
/// a string representation of the array index
mutable string_type array_index_str = "0";
/// an empty string (to return a reference for primitive values)
string_type empty_str{};
public:
explicit iteration_proxy_value() = default;
explicit iteration_proxy_value(IteratorType it, std::size_t array_index_ = 0)
noexcept(std::is_nothrow_move_constructible<IteratorType>::value
&& std::is_nothrow_default_constructible<string_type>::value)
: anchor(std::move(it))
, array_index(array_index_)
{}
iteration_proxy_value(iteration_proxy_value const&) = default;
iteration_proxy_value& operator=(iteration_proxy_value const&) = default;
// older GCCs are a bit fussy and require explicit noexcept specifiers on defaulted functions
iteration_proxy_value(iteration_proxy_value&&)
noexcept(std::is_nothrow_move_constructible<IteratorType>::value
&& std::is_nothrow_move_constructible<string_type>::value) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor,cppcoreguidelines-noexcept-move-operations)
iteration_proxy_value& operator=(iteration_proxy_value&&)
noexcept(std::is_nothrow_move_assignable<IteratorType>::value
&& std::is_nothrow_move_assignable<string_type>::value) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor,cppcoreguidelines-noexcept-move-operations)
~iteration_proxy_value() = default;
/// dereference operator (needed for range-based for)
const iteration_proxy_value& operator*() const
{
return *this;
}
/// increment operator (needed for range-based for)
iteration_proxy_value& operator++()
{
++anchor;
++array_index;
return *this;
}
iteration_proxy_value operator++(int)& // NOLINT(cert-dcl21-cpp)
{
auto tmp = iteration_proxy_value(anchor, array_index);
++anchor;
++array_index;
return tmp;
}
/// equality operator (needed for InputIterator)
bool operator==(const iteration_proxy_value& o) const
{
return anchor == o.anchor;
}
/// inequality operator (needed for range-based for)
bool operator!=(const iteration_proxy_value& o) const
{
return anchor != o.anchor;
}
/// return key of the iterator
const string_type& key() const
{
JSON_ASSERT(anchor.m_object != nullptr);
switch (anchor.m_object->type())
{
// use integer array index as key
case value_t::array:
{
if (array_index != array_index_last)
{
int_to_string( array_index_str, array_index );
array_index_last = array_index;
}
return array_index_str;
}
// use key from the object
case value_t::object:
return anchor.key();
// use an empty key for all primitive types
case value_t::null:
case value_t::string:
case value_t::boolean:
case value_t::number_integer:
case value_t::number_unsigned:
case value_t::number_float:
case value_t::binary:
case value_t::discarded:
default:
return empty_str;
}
}
/// return value of the iterator
typename IteratorType::reference value() const
{
return anchor.value();
}
};
/// proxy class for the items() function
template<typename IteratorType> class iteration_proxy
{
private:
/// the container to iterate
typename IteratorType::pointer container = nullptr;
public:
explicit iteration_proxy() = default;
/// construct iteration proxy from a container
explicit iteration_proxy(typename IteratorType::reference cont) noexcept
: container(&cont) {}
iteration_proxy(iteration_proxy const&) = default;
iteration_proxy& operator=(iteration_proxy const&) = default;
iteration_proxy(iteration_proxy&&) noexcept = default;
iteration_proxy& operator=(iteration_proxy&&) noexcept = default;
~iteration_proxy() = default;
/// return iterator begin (needed for range-based for)
iteration_proxy_value<IteratorType> begin() const noexcept
{
return iteration_proxy_value<IteratorType>(container->begin());
}
/// return iterator end (needed for range-based for)
iteration_proxy_value<IteratorType> end() const noexcept
{
return iteration_proxy_value<IteratorType>(container->end());
}
};
// Structured Bindings Support
// For further reference see https://blog.tartanllama.xyz/structured-bindings/
// And see https://github.com/nlohmann/json/pull/1391
template<std::size_t N, typename IteratorType, enable_if_t<N == 0, int> = 0>
auto get(const nlohmann::detail::iteration_proxy_value<IteratorType>& i) -> decltype(i.key())
{
return i.key();
}
// Structured Bindings Support
// For further reference see https://blog.tartanllama.xyz/structured-bindings/
// And see https://github.com/nlohmann/json/pull/1391
template<std::size_t N, typename IteratorType, enable_if_t<N == 1, int> = 0>
auto get(const nlohmann::detail::iteration_proxy_value<IteratorType>& i) -> decltype(i.value())
{
return i.value();
}
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END
// The Addition to the STD Namespace is required to add
// Structured Bindings Support to the iteration_proxy_value class
// For further reference see https://blog.tartanllama.xyz/structured-bindings/
// And see https://github.com/nlohmann/json/pull/1391
namespace std
{
#if defined(__clang__)
// Fix: https://github.com/nlohmann/json/issues/1401
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wmismatched-tags"
#endif
template<typename IteratorType>
class tuple_size<::nlohmann::detail::iteration_proxy_value<IteratorType>> // NOLINT(cert-dcl58-cpp)
: public std::integral_constant<std::size_t, 2> {};
template<std::size_t N, typename IteratorType>
class tuple_element<N, ::nlohmann::detail::iteration_proxy_value<IteratorType >> // NOLINT(cert-dcl58-cpp)
{
public:
using type = decltype(
get<N>(std::declval <
::nlohmann::detail::iteration_proxy_value<IteratorType >> ()));
};
#if defined(__clang__)
#pragma clang diagnostic pop
#endif
} // namespace std
#if JSON_HAS_RANGES
template <typename IteratorType>
inline constexpr bool ::std::ranges::enable_borrowed_range<::nlohmann::detail::iteration_proxy<IteratorType>> = true;
#endif

View File

@@ -1,61 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <iterator> // random_access_iterator_tag
#include <nlohmann/detail/abi_macros.hpp>
#include <nlohmann/detail/meta/void_t.hpp>
#include <nlohmann/detail/meta/cpp_future.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
template<typename It, typename = void>
struct iterator_types {};
template<typename It>
struct iterator_types <
It,
void_t<typename It::difference_type, typename It::value_type, typename It::pointer,
typename It::reference, typename It::iterator_category >>
{
using difference_type = typename It::difference_type;
using value_type = typename It::value_type;
using pointer = typename It::pointer;
using reference = typename It::reference;
using iterator_category = typename It::iterator_category;
};
// This is required as some compilers implement std::iterator_traits in a way that
// doesn't work with SFINAE. See https://github.com/nlohmann/json/issues/1341.
template<typename T, typename = void>
struct iterator_traits
{
};
template<typename T>
struct iterator_traits < T, enable_if_t < !std::is_pointer<T>::value >>
: iterator_types<T>
{
};
template<typename T>
struct iterator_traits<T*, enable_if_t<std::is_object<T>::value>>
{
using iterator_category = std::random_access_iterator_tag;
using value_type = T;
using difference_type = ptrdiff_t;
using pointer = T*;
using reference = T&;
};
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,130 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <cstddef> // ptrdiff_t
#include <iterator> // reverse_iterator
#include <utility> // declval
#include <nlohmann/detail/abi_macros.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
//////////////////////
// reverse_iterator //
//////////////////////
/*!
@brief a template for a reverse iterator class
@tparam Base the base iterator type to reverse. Valid types are @ref
iterator (to create @ref reverse_iterator) and @ref const_iterator (to
create @ref const_reverse_iterator).
@requirement The class satisfies the following concept requirements:
-
[BidirectionalIterator](https://en.cppreference.com/w/cpp/named_req/BidirectionalIterator):
The iterator that can be moved can be moved in both directions (i.e.
incremented and decremented).
- [OutputIterator](https://en.cppreference.com/w/cpp/named_req/OutputIterator):
It is possible to write to the pointed-to element (only if @a Base is
@ref iterator).
@since version 1.0.0
*/
template<typename Base>
class json_reverse_iterator : public std::reverse_iterator<Base>
{
public:
using difference_type = std::ptrdiff_t;
/// shortcut to the reverse iterator adapter
using base_iterator = std::reverse_iterator<Base>;
/// the reference type for the pointed-to element
using reference = typename Base::reference;
/// create reverse iterator from iterator
explicit json_reverse_iterator(const typename base_iterator::iterator_type& it) noexcept
: base_iterator(it) {}
/// create reverse iterator from base class
explicit json_reverse_iterator(const base_iterator& it) noexcept : base_iterator(it) {}
/// post-increment (it++)
json_reverse_iterator operator++(int)& // NOLINT(cert-dcl21-cpp)
{
return static_cast<json_reverse_iterator>(base_iterator::operator++(1));
}
/// pre-increment (++it)
json_reverse_iterator& operator++()
{
return static_cast<json_reverse_iterator&>(base_iterator::operator++());
}
/// post-decrement (it--)
json_reverse_iterator operator--(int)& // NOLINT(cert-dcl21-cpp)
{
return static_cast<json_reverse_iterator>(base_iterator::operator--(1));
}
/// pre-decrement (--it)
json_reverse_iterator& operator--()
{
return static_cast<json_reverse_iterator&>(base_iterator::operator--());
}
/// add to iterator
json_reverse_iterator& operator+=(difference_type i)
{
return static_cast<json_reverse_iterator&>(base_iterator::operator+=(i));
}
/// add to iterator
json_reverse_iterator operator+(difference_type i) const
{
return static_cast<json_reverse_iterator>(base_iterator::operator+(i));
}
/// subtract from iterator
json_reverse_iterator operator-(difference_type i) const
{
return static_cast<json_reverse_iterator>(base_iterator::operator-(i));
}
/// return difference
difference_type operator-(const json_reverse_iterator& other) const
{
return base_iterator(*this) - base_iterator(other);
}
/// access to successor
reference operator[](difference_type n) const
{
return *(this->operator+(n));
}
/// return the key of an object iterator
auto key() const -> decltype(std::declval<Base>().key())
{
auto it = --this->base();
return it.key();
}
/// return the value of an iterator
reference value() const
{
auto it = --this->base();
return it.operator * ();
}
};
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,132 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <cstddef> // ptrdiff_t
#include <limits> // numeric_limits
#include <nlohmann/detail/macro_scope.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
/*
@brief an iterator for primitive JSON types
This class models an iterator for primitive JSON types (boolean, number,
string). It's only purpose is to allow the iterator/const_iterator classes
to "iterate" over primitive values. Internally, the iterator is modeled by
a `difference_type` variable. Value begin_value (`0`) models the begin,
end_value (`1`) models past the end.
*/
class primitive_iterator_t
{
private:
using difference_type = std::ptrdiff_t;
static constexpr difference_type begin_value = 0;
static constexpr difference_type end_value = begin_value + 1;
JSON_PRIVATE_UNLESS_TESTED:
/// iterator as signed integer type
difference_type m_it = (std::numeric_limits<std::ptrdiff_t>::min)();
public:
constexpr difference_type get_value() const noexcept
{
return m_it;
}
/// set iterator to a defined beginning
void set_begin() noexcept
{
m_it = begin_value;
}
/// set iterator to a defined past the end
void set_end() noexcept
{
m_it = end_value;
}
/// return whether the iterator can be dereferenced
constexpr bool is_begin() const noexcept
{
return m_it == begin_value;
}
/// return whether the iterator is at end
constexpr bool is_end() const noexcept
{
return m_it == end_value;
}
friend constexpr bool operator==(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept
{
return lhs.m_it == rhs.m_it;
}
friend constexpr bool operator<(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept
{
return lhs.m_it < rhs.m_it;
}
primitive_iterator_t operator+(difference_type n) noexcept
{
auto result = *this;
result += n;
return result;
}
friend constexpr difference_type operator-(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept
{
return lhs.m_it - rhs.m_it;
}
primitive_iterator_t& operator++() noexcept
{
++m_it;
return *this;
}
primitive_iterator_t operator++(int)& noexcept // NOLINT(cert-dcl21-cpp)
{
auto result = *this;
++m_it;
return result;
}
primitive_iterator_t& operator--() noexcept
{
--m_it;
return *this;
}
primitive_iterator_t operator--(int)& noexcept // NOLINT(cert-dcl21-cpp)
{
auto result = *this;
--m_it;
return result;
}
primitive_iterator_t& operator+=(difference_type n) noexcept
{
m_it += n;
return *this;
}
primitive_iterator_t& operator-=(difference_type n) noexcept
{
m_it -= n;
return *this;
}
};
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,39 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <type_traits> // conditional, is_same
#include <nlohmann/detail/abi_macros.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
/*!
@brief Default base class of the @ref basic_json class.
So that the correct implementations of the copy / move ctors / assign operators
of @ref basic_json do not require complex case distinctions
(no base class / custom base class used as customization point),
@ref basic_json always has a base class.
By default, this class is used because it is empty and thus has no effect
on the behavior of @ref basic_json.
*/
struct json_default_base {};
template<class T>
using json_base_class = typename std::conditional <
std::is_same<T, void>::value,
json_default_base,
T
>::type;
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,988 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <algorithm> // all_of
#include <cctype> // isdigit
#include <cerrno> // errno, ERANGE
#include <cstdlib> // strtoull
#ifndef JSON_NO_IO
#include <iosfwd> // ostream
#endif // JSON_NO_IO
#include <limits> // max
#include <numeric> // accumulate
#include <string> // string
#include <utility> // move
#include <vector> // vector
#include <nlohmann/detail/exceptions.hpp>
#include <nlohmann/detail/macro_scope.hpp>
#include <nlohmann/detail/string_concat.hpp>
#include <nlohmann/detail/string_escape.hpp>
#include <nlohmann/detail/value_t.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
/// @brief JSON Pointer defines a string syntax for identifying a specific value within a JSON document
/// @sa https://json.nlohmann.me/api/json_pointer/
template<typename RefStringType>
class json_pointer
{
// allow basic_json to access private members
NLOHMANN_BASIC_JSON_TPL_DECLARATION
friend class basic_json;
template<typename>
friend class json_pointer;
template<typename T>
struct string_t_helper
{
using type = T;
};
NLOHMANN_BASIC_JSON_TPL_DECLARATION
struct string_t_helper<NLOHMANN_BASIC_JSON_TPL>
{
using type = StringType;
};
public:
// for backwards compatibility accept BasicJsonType
using string_t = typename string_t_helper<RefStringType>::type;
/// @brief create JSON pointer
/// @sa https://json.nlohmann.me/api/json_pointer/json_pointer/
explicit json_pointer(const string_t& s = "")
: reference_tokens(split(s))
{}
/// @brief return a string representation of the JSON pointer
/// @sa https://json.nlohmann.me/api/json_pointer/to_string/
string_t to_string() const
{
return std::accumulate(reference_tokens.begin(), reference_tokens.end(),
string_t{},
[](const string_t& a, const string_t& b)
{
return detail::concat(a, '/', detail::escape(b));
});
}
/// @brief return a string representation of the JSON pointer
/// @sa https://json.nlohmann.me/api/json_pointer/operator_string/
JSON_HEDLEY_DEPRECATED_FOR(3.11.0, to_string())
operator string_t() const
{
return to_string();
}
#ifndef JSON_NO_IO
/// @brief write string representation of the JSON pointer to stream
/// @sa https://json.nlohmann.me/api/basic_json/operator_ltlt/
friend std::ostream& operator<<(std::ostream& o, const json_pointer& ptr)
{
o << ptr.to_string();
return o;
}
#endif
/// @brief append another JSON pointer at the end of this JSON pointer
/// @sa https://json.nlohmann.me/api/json_pointer/operator_slasheq/
json_pointer& operator/=(const json_pointer& ptr)
{
reference_tokens.insert(reference_tokens.end(),
ptr.reference_tokens.begin(),
ptr.reference_tokens.end());
return *this;
}
/// @brief append an unescaped reference token at the end of this JSON pointer
/// @sa https://json.nlohmann.me/api/json_pointer/operator_slasheq/
json_pointer& operator/=(string_t token)
{
push_back(std::move(token));
return *this;
}
/// @brief append an array index at the end of this JSON pointer
/// @sa https://json.nlohmann.me/api/json_pointer/operator_slasheq/
json_pointer& operator/=(std::size_t array_idx)
{
return *this /= std::to_string(array_idx);
}
/// @brief create a new JSON pointer by appending the right JSON pointer at the end of the left JSON pointer
/// @sa https://json.nlohmann.me/api/json_pointer/operator_slash/
friend json_pointer operator/(const json_pointer& lhs,
const json_pointer& rhs)
{
return json_pointer(lhs) /= rhs;
}
/// @brief create a new JSON pointer by appending the unescaped token at the end of the JSON pointer
/// @sa https://json.nlohmann.me/api/json_pointer/operator_slash/
friend json_pointer operator/(const json_pointer& lhs, string_t token) // NOLINT(performance-unnecessary-value-param)
{
return json_pointer(lhs) /= std::move(token);
}
/// @brief create a new JSON pointer by appending the array-index-token at the end of the JSON pointer
/// @sa https://json.nlohmann.me/api/json_pointer/operator_slash/
friend json_pointer operator/(const json_pointer& lhs, std::size_t array_idx)
{
return json_pointer(lhs) /= array_idx;
}
/// @brief returns the parent of this JSON pointer
/// @sa https://json.nlohmann.me/api/json_pointer/parent_pointer/
json_pointer parent_pointer() const
{
if (empty())
{
return *this;
}
json_pointer res = *this;
res.pop_back();
return res;
}
/// @brief remove last reference token
/// @sa https://json.nlohmann.me/api/json_pointer/pop_back/
void pop_back()
{
if (JSON_HEDLEY_UNLIKELY(empty()))
{
JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent", nullptr));
}
reference_tokens.pop_back();
}
/// @brief return last reference token
/// @sa https://json.nlohmann.me/api/json_pointer/back/
const string_t& back() const
{
if (JSON_HEDLEY_UNLIKELY(empty()))
{
JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent", nullptr));
}
return reference_tokens.back();
}
/// @brief append an unescaped token at the end of the reference pointer
/// @sa https://json.nlohmann.me/api/json_pointer/push_back/
void push_back(const string_t& token)
{
reference_tokens.push_back(token);
}
/// @brief append an unescaped token at the end of the reference pointer
/// @sa https://json.nlohmann.me/api/json_pointer/push_back/
void push_back(string_t&& token)
{
reference_tokens.push_back(std::move(token));
}
/// @brief return whether pointer points to the root document
/// @sa https://json.nlohmann.me/api/json_pointer/empty/
bool empty() const noexcept
{
return reference_tokens.empty();
}
private:
/*!
@param[in] s reference token to be converted into an array index
@return integer representation of @a s
@throw parse_error.106 if an array index begins with '0'
@throw parse_error.109 if an array index begins not with a digit
@throw out_of_range.404 if string @a s could not be converted to an integer
@throw out_of_range.410 if an array index exceeds size_type
*/
template<typename BasicJsonType>
static typename BasicJsonType::size_type array_index(const string_t& s)
{
using size_type = typename BasicJsonType::size_type;
// error condition (cf. RFC 6901, Sect. 4)
if (JSON_HEDLEY_UNLIKELY(s.size() > 1 && s[0] == '0'))
{
JSON_THROW(detail::parse_error::create(106, 0, detail::concat("array index '", s, "' must not begin with '0'"), nullptr));
}
// error condition (cf. RFC 6901, Sect. 4)
if (JSON_HEDLEY_UNLIKELY(s.size() > 1 && !(s[0] >= '1' && s[0] <= '9')))
{
JSON_THROW(detail::parse_error::create(109, 0, detail::concat("array index '", s, "' is not a number"), nullptr));
}
const char* p = s.c_str();
char* p_end = nullptr;
errno = 0; // strtoull doesn't reset errno
const unsigned long long res = std::strtoull(p, &p_end, 10); // NOLINT(runtime/int)
if (p == p_end // invalid input or empty string
|| errno == ERANGE // out of range
|| JSON_HEDLEY_UNLIKELY(static_cast<std::size_t>(p_end - p) != s.size())) // incomplete read
{
JSON_THROW(detail::out_of_range::create(404, detail::concat("unresolved reference token '", s, "'"), nullptr));
}
// only triggered on special platforms (like 32bit), see also
// https://github.com/nlohmann/json/pull/2203
if (res >= static_cast<unsigned long long>((std::numeric_limits<size_type>::max)())) // NOLINT(runtime/int)
{
JSON_THROW(detail::out_of_range::create(410, detail::concat("array index ", s, " exceeds size_type"), nullptr)); // LCOV_EXCL_LINE
}
return static_cast<size_type>(res);
}
JSON_PRIVATE_UNLESS_TESTED:
json_pointer top() const
{
if (JSON_HEDLEY_UNLIKELY(empty()))
{
JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent", nullptr));
}
json_pointer result = *this;
result.reference_tokens = {reference_tokens[0]};
return result;
}
private:
/*!
@brief create and return a reference to the pointed to value
@complexity Linear in the number of reference tokens.
@throw parse_error.109 if array index is not a number
@throw type_error.313 if value cannot be unflattened
*/
template<typename BasicJsonType>
BasicJsonType& get_and_create(BasicJsonType& j) const
{
auto* result = &j;
// in case no reference tokens exist, return a reference to the JSON value
// j which will be overwritten by a primitive value
for (const auto& reference_token : reference_tokens)
{
switch (result->type())
{
case detail::value_t::null:
{
if (reference_token == "0")
{
// start a new array if reference token is 0
result = &result->operator[](0);
}
else
{
// start a new object otherwise
result = &result->operator[](reference_token);
}
break;
}
case detail::value_t::object:
{
// create an entry in the object
result = &result->operator[](reference_token);
break;
}
case detail::value_t::array:
{
// create an entry in the array
result = &result->operator[](array_index<BasicJsonType>(reference_token));
break;
}
/*
The following code is only reached if there exists a reference
token _and_ the current value is primitive. In this case, we have
an error situation, because primitive values may only occur as
single value; that is, with an empty list of reference tokens.
*/
case detail::value_t::string:
case detail::value_t::boolean:
case detail::value_t::number_integer:
case detail::value_t::number_unsigned:
case detail::value_t::number_float:
case detail::value_t::binary:
case detail::value_t::discarded:
default:
JSON_THROW(detail::type_error::create(313, "invalid value to unflatten", &j));
}
}
return *result;
}
/*!
@brief return a reference to the pointed to value
@note This version does not throw if a value is not present, but tries to
create nested values instead. For instance, calling this function
with pointer `"/this/that"` on a null value is equivalent to calling
`operator[]("this").operator[]("that")` on that value, effectively
changing the null value to an object.
@param[in] ptr a JSON value
@return reference to the JSON value pointed to by the JSON pointer
@complexity Linear in the length of the JSON pointer.
@throw parse_error.106 if an array index begins with '0'
@throw parse_error.109 if an array index was not a number
@throw out_of_range.404 if the JSON pointer can not be resolved
*/
template<typename BasicJsonType>
BasicJsonType& get_unchecked(BasicJsonType* ptr) const
{
for (const auto& reference_token : reference_tokens)
{
// convert null values to arrays or objects before continuing
if (ptr->is_null())
{
// check if reference token is a number
const bool nums =
std::all_of(reference_token.begin(), reference_token.end(),
[](const unsigned char x)
{
return std::isdigit(x);
});
// change value to array for numbers or "-" or to object otherwise
*ptr = (nums || reference_token == "-")
? detail::value_t::array
: detail::value_t::object;
}
switch (ptr->type())
{
case detail::value_t::object:
{
// use unchecked object access
ptr = &ptr->operator[](reference_token);
break;
}
case detail::value_t::array:
{
if (reference_token == "-")
{
// explicitly treat "-" as index beyond the end
ptr = &ptr->operator[](ptr->m_data.m_value.array->size());
}
else
{
// convert array index to number; unchecked access
ptr = &ptr->operator[](array_index<BasicJsonType>(reference_token));
}
break;
}
case detail::value_t::null:
case detail::value_t::string:
case detail::value_t::boolean:
case detail::value_t::number_integer:
case detail::value_t::number_unsigned:
case detail::value_t::number_float:
case detail::value_t::binary:
case detail::value_t::discarded:
default:
JSON_THROW(detail::out_of_range::create(404, detail::concat("unresolved reference token '", reference_token, "'"), ptr));
}
}
return *ptr;
}
/*!
@throw parse_error.106 if an array index begins with '0'
@throw parse_error.109 if an array index was not a number
@throw out_of_range.402 if the array index '-' is used
@throw out_of_range.404 if the JSON pointer can not be resolved
*/
template<typename BasicJsonType>
BasicJsonType& get_checked(BasicJsonType* ptr) const
{
for (const auto& reference_token : reference_tokens)
{
switch (ptr->type())
{
case detail::value_t::object:
{
// note: at performs range check
ptr = &ptr->at(reference_token);
break;
}
case detail::value_t::array:
{
if (JSON_HEDLEY_UNLIKELY(reference_token == "-"))
{
// "-" always fails the range check
JSON_THROW(detail::out_of_range::create(402, detail::concat(
"array index '-' (", std::to_string(ptr->m_data.m_value.array->size()),
") is out of range"), ptr));
}
// note: at performs range check
ptr = &ptr->at(array_index<BasicJsonType>(reference_token));
break;
}
case detail::value_t::null:
case detail::value_t::string:
case detail::value_t::boolean:
case detail::value_t::number_integer:
case detail::value_t::number_unsigned:
case detail::value_t::number_float:
case detail::value_t::binary:
case detail::value_t::discarded:
default:
JSON_THROW(detail::out_of_range::create(404, detail::concat("unresolved reference token '", reference_token, "'"), ptr));
}
}
return *ptr;
}
/*!
@brief return a const reference to the pointed to value
@param[in] ptr a JSON value
@return const reference to the JSON value pointed to by the JSON
pointer
@throw parse_error.106 if an array index begins with '0'
@throw parse_error.109 if an array index was not a number
@throw out_of_range.402 if the array index '-' is used
@throw out_of_range.404 if the JSON pointer can not be resolved
*/
template<typename BasicJsonType>
const BasicJsonType& get_unchecked(const BasicJsonType* ptr) const
{
for (const auto& reference_token : reference_tokens)
{
switch (ptr->type())
{
case detail::value_t::object:
{
// use unchecked object access
ptr = &ptr->operator[](reference_token);
break;
}
case detail::value_t::array:
{
if (JSON_HEDLEY_UNLIKELY(reference_token == "-"))
{
// "-" cannot be used for const access
JSON_THROW(detail::out_of_range::create(402, detail::concat("array index '-' (", std::to_string(ptr->m_data.m_value.array->size()), ") is out of range"), ptr));
}
// use unchecked array access
ptr = &ptr->operator[](array_index<BasicJsonType>(reference_token));
break;
}
case detail::value_t::null:
case detail::value_t::string:
case detail::value_t::boolean:
case detail::value_t::number_integer:
case detail::value_t::number_unsigned:
case detail::value_t::number_float:
case detail::value_t::binary:
case detail::value_t::discarded:
default:
JSON_THROW(detail::out_of_range::create(404, detail::concat("unresolved reference token '", reference_token, "'"), ptr));
}
}
return *ptr;
}
/*!
@throw parse_error.106 if an array index begins with '0'
@throw parse_error.109 if an array index was not a number
@throw out_of_range.402 if the array index '-' is used
@throw out_of_range.404 if the JSON pointer can not be resolved
*/
template<typename BasicJsonType>
const BasicJsonType& get_checked(const BasicJsonType* ptr) const
{
for (const auto& reference_token : reference_tokens)
{
switch (ptr->type())
{
case detail::value_t::object:
{
// note: at performs range check
ptr = &ptr->at(reference_token);
break;
}
case detail::value_t::array:
{
if (JSON_HEDLEY_UNLIKELY(reference_token == "-"))
{
// "-" always fails the range check
JSON_THROW(detail::out_of_range::create(402, detail::concat(
"array index '-' (", std::to_string(ptr->m_data.m_value.array->size()),
") is out of range"), ptr));
}
// note: at performs range check
ptr = &ptr->at(array_index<BasicJsonType>(reference_token));
break;
}
case detail::value_t::null:
case detail::value_t::string:
case detail::value_t::boolean:
case detail::value_t::number_integer:
case detail::value_t::number_unsigned:
case detail::value_t::number_float:
case detail::value_t::binary:
case detail::value_t::discarded:
default:
JSON_THROW(detail::out_of_range::create(404, detail::concat("unresolved reference token '", reference_token, "'"), ptr));
}
}
return *ptr;
}
/*!
@throw parse_error.106 if an array index begins with '0'
@throw parse_error.109 if an array index was not a number
*/
template<typename BasicJsonType>
bool contains(const BasicJsonType* ptr) const
{
for (const auto& reference_token : reference_tokens)
{
switch (ptr->type())
{
case detail::value_t::object:
{
if (!ptr->contains(reference_token))
{
// we did not find the key in the object
return false;
}
ptr = &ptr->operator[](reference_token);
break;
}
case detail::value_t::array:
{
if (JSON_HEDLEY_UNLIKELY(reference_token == "-"))
{
// "-" always fails the range check
return false;
}
if (JSON_HEDLEY_UNLIKELY(reference_token.size() == 1 && !("0" <= reference_token && reference_token <= "9")))
{
// invalid char
return false;
}
if (JSON_HEDLEY_UNLIKELY(reference_token.size() > 1))
{
if (JSON_HEDLEY_UNLIKELY(!('1' <= reference_token[0] && reference_token[0] <= '9')))
{
// first char should be between '1' and '9'
return false;
}
for (std::size_t i = 1; i < reference_token.size(); i++)
{
if (JSON_HEDLEY_UNLIKELY(!('0' <= reference_token[i] && reference_token[i] <= '9')))
{
// other char should be between '0' and '9'
return false;
}
}
}
const auto idx = array_index<BasicJsonType>(reference_token);
if (idx >= ptr->size())
{
// index out of range
return false;
}
ptr = &ptr->operator[](idx);
break;
}
case detail::value_t::null:
case detail::value_t::string:
case detail::value_t::boolean:
case detail::value_t::number_integer:
case detail::value_t::number_unsigned:
case detail::value_t::number_float:
case detail::value_t::binary:
case detail::value_t::discarded:
default:
{
// we do not expect primitive values if there is still a
// reference token to process
return false;
}
}
}
// no reference token left means we found a primitive value
return true;
}
/*!
@brief split the string input to reference tokens
@note This function is only called by the json_pointer constructor.
All exceptions below are documented there.
@throw parse_error.107 if the pointer is not empty or begins with '/'
@throw parse_error.108 if character '~' is not followed by '0' or '1'
*/
static std::vector<string_t> split(const string_t& reference_string)
{
std::vector<string_t> result;
// special case: empty reference string -> no reference tokens
if (reference_string.empty())
{
return result;
}
// check if nonempty reference string begins with slash
if (JSON_HEDLEY_UNLIKELY(reference_string[0] != '/'))
{
JSON_THROW(detail::parse_error::create(107, 1, detail::concat("JSON pointer must be empty or begin with '/' - was: '", reference_string, "'"), nullptr));
}
// extract the reference tokens:
// - slash: position of the last read slash (or end of string)
// - start: position after the previous slash
for (
// search for the first slash after the first character
std::size_t slash = reference_string.find_first_of('/', 1),
// set the beginning of the first reference token
start = 1;
// we can stop if start == 0 (if slash == string_t::npos)
start != 0;
// set the beginning of the next reference token
// (will eventually be 0 if slash == string_t::npos)
start = (slash == string_t::npos) ? 0 : slash + 1,
// find next slash
slash = reference_string.find_first_of('/', start))
{
// use the text between the beginning of the reference token
// (start) and the last slash (slash).
auto reference_token = reference_string.substr(start, slash - start);
// check reference tokens are properly escaped
for (std::size_t pos = reference_token.find_first_of('~');
pos != string_t::npos;
pos = reference_token.find_first_of('~', pos + 1))
{
JSON_ASSERT(reference_token[pos] == '~');
// ~ must be followed by 0 or 1
if (JSON_HEDLEY_UNLIKELY(pos == reference_token.size() - 1 ||
(reference_token[pos + 1] != '0' &&
reference_token[pos + 1] != '1')))
{
JSON_THROW(detail::parse_error::create(108, 0, "escape character '~' must be followed with '0' or '1'", nullptr));
}
}
// finally, store the reference token
detail::unescape(reference_token);
result.push_back(reference_token);
}
return result;
}
private:
/*!
@param[in] reference_string the reference string to the current value
@param[in] value the value to consider
@param[in,out] result the result object to insert values to
@note Empty objects or arrays are flattened to `null`.
*/
template<typename BasicJsonType>
static void flatten(const string_t& reference_string,
const BasicJsonType& value,
BasicJsonType& result)
{
switch (value.type())
{
case detail::value_t::array:
{
if (value.m_data.m_value.array->empty())
{
// flatten empty array as null
result[reference_string] = nullptr;
}
else
{
// iterate array and use index as reference string
for (std::size_t i = 0; i < value.m_data.m_value.array->size(); ++i)
{
flatten(detail::concat(reference_string, '/', std::to_string(i)),
value.m_data.m_value.array->operator[](i), result);
}
}
break;
}
case detail::value_t::object:
{
if (value.m_data.m_value.object->empty())
{
// flatten empty object as null
result[reference_string] = nullptr;
}
else
{
// iterate object and use keys as reference string
for (const auto& element : *value.m_data.m_value.object)
{
flatten(detail::concat(reference_string, '/', detail::escape(element.first)), element.second, result);
}
}
break;
}
case detail::value_t::null:
case detail::value_t::string:
case detail::value_t::boolean:
case detail::value_t::number_integer:
case detail::value_t::number_unsigned:
case detail::value_t::number_float:
case detail::value_t::binary:
case detail::value_t::discarded:
default:
{
// add primitive value with its reference string
result[reference_string] = value;
break;
}
}
}
/*!
@param[in] value flattened JSON
@return unflattened JSON
@throw parse_error.109 if array index is not a number
@throw type_error.314 if value is not an object
@throw type_error.315 if object values are not primitive
@throw type_error.313 if value cannot be unflattened
*/
template<typename BasicJsonType>
static BasicJsonType
unflatten(const BasicJsonType& value)
{
if (JSON_HEDLEY_UNLIKELY(!value.is_object()))
{
JSON_THROW(detail::type_error::create(314, "only objects can be unflattened", &value));
}
BasicJsonType result;
// iterate the JSON object values
for (const auto& element : *value.m_data.m_value.object)
{
if (JSON_HEDLEY_UNLIKELY(!element.second.is_primitive()))
{
JSON_THROW(detail::type_error::create(315, "values in object must be primitive", &element.second));
}
// assign value to reference pointed to by JSON pointer; Note that if
// the JSON pointer is "" (i.e., points to the whole value), function
// get_and_create returns a reference to result itself. An assignment
// will then create a primitive value.
json_pointer(element.first).get_and_create(result) = element.second;
}
return result;
}
// can't use conversion operator because of ambiguity
json_pointer<string_t> convert() const&
{
json_pointer<string_t> result;
result.reference_tokens = reference_tokens;
return result;
}
json_pointer<string_t> convert()&&
{
json_pointer<string_t> result;
result.reference_tokens = std::move(reference_tokens);
return result;
}
public:
#if JSON_HAS_THREE_WAY_COMPARISON
/// @brief compares two JSON pointers for equality
/// @sa https://json.nlohmann.me/api/json_pointer/operator_eq/
template<typename RefStringTypeRhs>
bool operator==(const json_pointer<RefStringTypeRhs>& rhs) const noexcept
{
return reference_tokens == rhs.reference_tokens;
}
/// @brief compares JSON pointer and string for equality
/// @sa https://json.nlohmann.me/api/json_pointer/operator_eq/
JSON_HEDLEY_DEPRECATED_FOR(3.11.2, operator==(json_pointer))
bool operator==(const string_t& rhs) const
{
return *this == json_pointer(rhs);
}
/// @brief 3-way compares two JSON pointers
template<typename RefStringTypeRhs>
std::strong_ordering operator<=>(const json_pointer<RefStringTypeRhs>& rhs) const noexcept // *NOPAD*
{
return reference_tokens <=> rhs.reference_tokens; // *NOPAD*
}
#else
/// @brief compares two JSON pointers for equality
/// @sa https://json.nlohmann.me/api/json_pointer/operator_eq/
template<typename RefStringTypeLhs, typename RefStringTypeRhs>
// NOLINTNEXTLINE(readability-redundant-declaration)
friend bool operator==(const json_pointer<RefStringTypeLhs>& lhs,
const json_pointer<RefStringTypeRhs>& rhs) noexcept;
/// @brief compares JSON pointer and string for equality
/// @sa https://json.nlohmann.me/api/json_pointer/operator_eq/
template<typename RefStringTypeLhs, typename StringType>
// NOLINTNEXTLINE(readability-redundant-declaration)
friend bool operator==(const json_pointer<RefStringTypeLhs>& lhs,
const StringType& rhs);
/// @brief compares string and JSON pointer for equality
/// @sa https://json.nlohmann.me/api/json_pointer/operator_eq/
template<typename RefStringTypeRhs, typename StringType>
// NOLINTNEXTLINE(readability-redundant-declaration)
friend bool operator==(const StringType& lhs,
const json_pointer<RefStringTypeRhs>& rhs);
/// @brief compares two JSON pointers for inequality
/// @sa https://json.nlohmann.me/api/json_pointer/operator_ne/
template<typename RefStringTypeLhs, typename RefStringTypeRhs>
// NOLINTNEXTLINE(readability-redundant-declaration)
friend bool operator!=(const json_pointer<RefStringTypeLhs>& lhs,
const json_pointer<RefStringTypeRhs>& rhs) noexcept;
/// @brief compares JSON pointer and string for inequality
/// @sa https://json.nlohmann.me/api/json_pointer/operator_ne/
template<typename RefStringTypeLhs, typename StringType>
// NOLINTNEXTLINE(readability-redundant-declaration)
friend bool operator!=(const json_pointer<RefStringTypeLhs>& lhs,
const StringType& rhs);
/// @brief compares string and JSON pointer for inequality
/// @sa https://json.nlohmann.me/api/json_pointer/operator_ne/
template<typename RefStringTypeRhs, typename StringType>
// NOLINTNEXTLINE(readability-redundant-declaration)
friend bool operator!=(const StringType& lhs,
const json_pointer<RefStringTypeRhs>& rhs);
/// @brief compares two JSON pointer for less-than
template<typename RefStringTypeLhs, typename RefStringTypeRhs>
// NOLINTNEXTLINE(readability-redundant-declaration)
friend bool operator<(const json_pointer<RefStringTypeLhs>& lhs,
const json_pointer<RefStringTypeRhs>& rhs) noexcept;
#endif
private:
/// the reference tokens
std::vector<string_t> reference_tokens;
};
#if !JSON_HAS_THREE_WAY_COMPARISON
// functions cannot be defined inside class due to ODR violations
template<typename RefStringTypeLhs, typename RefStringTypeRhs>
inline bool operator==(const json_pointer<RefStringTypeLhs>& lhs,
const json_pointer<RefStringTypeRhs>& rhs) noexcept
{
return lhs.reference_tokens == rhs.reference_tokens;
}
template<typename RefStringTypeLhs,
typename StringType = typename json_pointer<RefStringTypeLhs>::string_t>
JSON_HEDLEY_DEPRECATED_FOR(3.11.2, operator==(json_pointer, json_pointer))
inline bool operator==(const json_pointer<RefStringTypeLhs>& lhs,
const StringType& rhs)
{
return lhs == json_pointer<RefStringTypeLhs>(rhs);
}
template<typename RefStringTypeRhs,
typename StringType = typename json_pointer<RefStringTypeRhs>::string_t>
JSON_HEDLEY_DEPRECATED_FOR(3.11.2, operator==(json_pointer, json_pointer))
inline bool operator==(const StringType& lhs,
const json_pointer<RefStringTypeRhs>& rhs)
{
return json_pointer<RefStringTypeRhs>(lhs) == rhs;
}
template<typename RefStringTypeLhs, typename RefStringTypeRhs>
inline bool operator!=(const json_pointer<RefStringTypeLhs>& lhs,
const json_pointer<RefStringTypeRhs>& rhs) noexcept
{
return !(lhs == rhs);
}
template<typename RefStringTypeLhs,
typename StringType = typename json_pointer<RefStringTypeLhs>::string_t>
JSON_HEDLEY_DEPRECATED_FOR(3.11.2, operator!=(json_pointer, json_pointer))
inline bool operator!=(const json_pointer<RefStringTypeLhs>& lhs,
const StringType& rhs)
{
return !(lhs == rhs);
}
template<typename RefStringTypeRhs,
typename StringType = typename json_pointer<RefStringTypeRhs>::string_t>
JSON_HEDLEY_DEPRECATED_FOR(3.11.2, operator!=(json_pointer, json_pointer))
inline bool operator!=(const StringType& lhs,
const json_pointer<RefStringTypeRhs>& rhs)
{
return !(lhs == rhs);
}
template<typename RefStringTypeLhs, typename RefStringTypeRhs>
inline bool operator<(const json_pointer<RefStringTypeLhs>& lhs,
const json_pointer<RefStringTypeRhs>& rhs) noexcept
{
return lhs.reference_tokens < rhs.reference_tokens;
}
#endif
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,78 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <initializer_list>
#include <utility>
#include <nlohmann/detail/abi_macros.hpp>
#include <nlohmann/detail/meta/type_traits.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
template<typename BasicJsonType>
class json_ref
{
public:
using value_type = BasicJsonType;
json_ref(value_type&& value)
: owned_value(std::move(value))
{}
json_ref(const value_type& value)
: value_ref(&value)
{}
json_ref(std::initializer_list<json_ref> init)
: owned_value(init)
{}
template <
class... Args,
enable_if_t<std::is_constructible<value_type, Args...>::value, int> = 0 >
json_ref(Args && ... args)
: owned_value(std::forward<Args>(args)...)
{}
// class should be movable only
json_ref(json_ref&&) noexcept = default;
json_ref(const json_ref&) = delete;
json_ref& operator=(const json_ref&) = delete;
json_ref& operator=(json_ref&&) = delete;
~json_ref() = default;
value_type moved_or_copied() const
{
if (value_ref == nullptr)
{
return std::move(owned_value);
}
return *value_ref;
}
value_type const& operator*() const
{
return value_ref ? *value_ref : owned_value;
}
value_type const* operator->() const
{
return &** this;
}
private:
mutable value_type owned_value = nullptr;
value_type const* value_ref = nullptr;
};
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,482 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <utility> // declval, pair
#include <nlohmann/detail/meta/detected.hpp>
#include <nlohmann/thirdparty/hedley/hedley.hpp>
// This file contains all internal macro definitions (except those affecting ABI)
// You MUST include macro_unscope.hpp at the end of json.hpp to undef all of them
#include <nlohmann/detail/abi_macros.hpp>
// exclude unsupported compilers
#if !defined(JSON_SKIP_UNSUPPORTED_COMPILER_CHECK)
#if defined(__clang__)
#if (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) < 30400
#error "unsupported Clang version - see https://github.com/nlohmann/json#supported-compilers"
#endif
#elif defined(__GNUC__) && !(defined(__ICC) || defined(__INTEL_COMPILER))
#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40800
#error "unsupported GCC version - see https://github.com/nlohmann/json#supported-compilers"
#endif
#endif
#endif
// C++ language standard detection
// if the user manually specified the used c++ version this is skipped
#if !defined(JSON_HAS_CPP_20) && !defined(JSON_HAS_CPP_17) && !defined(JSON_HAS_CPP_14) && !defined(JSON_HAS_CPP_11)
#if (defined(__cplusplus) && __cplusplus >= 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L)
#define JSON_HAS_CPP_20
#define JSON_HAS_CPP_17
#define JSON_HAS_CPP_14
#elif (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464
#define JSON_HAS_CPP_17
#define JSON_HAS_CPP_14
#elif (defined(__cplusplus) && __cplusplus >= 201402L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1)
#define JSON_HAS_CPP_14
#endif
// the cpp 11 flag is always specified because it is the minimal required version
#define JSON_HAS_CPP_11
#endif
#ifdef __has_include
#if __has_include(<version>)
#include <version>
#endif
#endif
#if !defined(JSON_HAS_FILESYSTEM) && !defined(JSON_HAS_EXPERIMENTAL_FILESYSTEM)
#ifdef JSON_HAS_CPP_17
#if defined(__cpp_lib_filesystem)
#define JSON_HAS_FILESYSTEM 1
#elif defined(__cpp_lib_experimental_filesystem)
#define JSON_HAS_EXPERIMENTAL_FILESYSTEM 1
#elif !defined(__has_include)
#define JSON_HAS_EXPERIMENTAL_FILESYSTEM 1
#elif __has_include(<filesystem>)
#define JSON_HAS_FILESYSTEM 1
#elif __has_include(<experimental/filesystem>)
#define JSON_HAS_EXPERIMENTAL_FILESYSTEM 1
#endif
// std::filesystem does not work on MinGW GCC 8: https://sourceforge.net/p/mingw-w64/bugs/737/
#if defined(__MINGW32__) && defined(__GNUC__) && __GNUC__ == 8
#undef JSON_HAS_FILESYSTEM
#undef JSON_HAS_EXPERIMENTAL_FILESYSTEM
#endif
// no filesystem support before GCC 8: https://en.cppreference.com/w/cpp/compiler_support
#if defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 8
#undef JSON_HAS_FILESYSTEM
#undef JSON_HAS_EXPERIMENTAL_FILESYSTEM
#endif
// no filesystem support before Clang 7: https://en.cppreference.com/w/cpp/compiler_support
#if defined(__clang_major__) && __clang_major__ < 7
#undef JSON_HAS_FILESYSTEM
#undef JSON_HAS_EXPERIMENTAL_FILESYSTEM
#endif
// no filesystem support before MSVC 19.14: https://en.cppreference.com/w/cpp/compiler_support
#if defined(_MSC_VER) && _MSC_VER < 1914
#undef JSON_HAS_FILESYSTEM
#undef JSON_HAS_EXPERIMENTAL_FILESYSTEM
#endif
// no filesystem support before iOS 13
#if defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED < 130000
#undef JSON_HAS_FILESYSTEM
#undef JSON_HAS_EXPERIMENTAL_FILESYSTEM
#endif
// no filesystem support before macOS Catalina
#if defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED < 101500
#undef JSON_HAS_FILESYSTEM
#undef JSON_HAS_EXPERIMENTAL_FILESYSTEM
#endif
#endif
#endif
#ifndef JSON_HAS_EXPERIMENTAL_FILESYSTEM
#define JSON_HAS_EXPERIMENTAL_FILESYSTEM 0
#endif
#ifndef JSON_HAS_FILESYSTEM
#define JSON_HAS_FILESYSTEM 0
#endif
#ifndef JSON_HAS_THREE_WAY_COMPARISON
#if defined(__cpp_impl_three_way_comparison) && __cpp_impl_three_way_comparison >= 201907L \
&& defined(__cpp_lib_three_way_comparison) && __cpp_lib_three_way_comparison >= 201907L
#define JSON_HAS_THREE_WAY_COMPARISON 1
#else
#define JSON_HAS_THREE_WAY_COMPARISON 0
#endif
#endif
#ifndef JSON_HAS_RANGES
// ranges header shipping in GCC 11.1.0 (released 2021-04-27) has syntax error
#if defined(__GLIBCXX__) && __GLIBCXX__ == 20210427
#define JSON_HAS_RANGES 0
#elif defined(__cpp_lib_ranges)
#define JSON_HAS_RANGES 1
#else
#define JSON_HAS_RANGES 0
#endif
#endif
#ifndef JSON_HAS_STATIC_RTTI
#if !defined(_HAS_STATIC_RTTI) || _HAS_STATIC_RTTI != 0
#define JSON_HAS_STATIC_RTTI 1
#else
#define JSON_HAS_STATIC_RTTI 0
#endif
#endif
#ifdef JSON_HAS_CPP_17
#define JSON_INLINE_VARIABLE inline
#else
#define JSON_INLINE_VARIABLE
#endif
#if JSON_HEDLEY_HAS_ATTRIBUTE(no_unique_address)
#define JSON_NO_UNIQUE_ADDRESS [[no_unique_address]]
#else
#define JSON_NO_UNIQUE_ADDRESS
#endif
// disable documentation warnings on clang
#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdocumentation"
#pragma clang diagnostic ignored "-Wdocumentation-unknown-command"
#endif
// allow disabling exceptions
#if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)) && !defined(JSON_NOEXCEPTION)
#define JSON_THROW(exception) throw exception
#define JSON_TRY try
#define JSON_CATCH(exception) catch(exception)
#define JSON_INTERNAL_CATCH(exception) catch(exception)
#else
#include <cstdlib>
#define JSON_THROW(exception) std::abort()
#define JSON_TRY if(true)
#define JSON_CATCH(exception) if(false)
#define JSON_INTERNAL_CATCH(exception) if(false)
#endif
// override exception macros
#if defined(JSON_THROW_USER)
#undef JSON_THROW
#define JSON_THROW JSON_THROW_USER
#endif
#if defined(JSON_TRY_USER)
#undef JSON_TRY
#define JSON_TRY JSON_TRY_USER
#endif
#if defined(JSON_CATCH_USER)
#undef JSON_CATCH
#define JSON_CATCH JSON_CATCH_USER
#undef JSON_INTERNAL_CATCH
#define JSON_INTERNAL_CATCH JSON_CATCH_USER
#endif
#if defined(JSON_INTERNAL_CATCH_USER)
#undef JSON_INTERNAL_CATCH
#define JSON_INTERNAL_CATCH JSON_INTERNAL_CATCH_USER
#endif
// allow overriding assert
#if !defined(JSON_ASSERT)
#include <cassert> // assert
#define JSON_ASSERT(x) assert(x)
#endif
// allow to access some private functions (needed by the test suite)
#if defined(JSON_TESTS_PRIVATE)
#define JSON_PRIVATE_UNLESS_TESTED public
#else
#define JSON_PRIVATE_UNLESS_TESTED private
#endif
/*!
@brief macro to briefly define a mapping between an enum and JSON
@def NLOHMANN_JSON_SERIALIZE_ENUM
@since version 3.4.0
*/
#define NLOHMANN_JSON_SERIALIZE_ENUM(ENUM_TYPE, ...) \
template<typename BasicJsonType> \
inline void to_json(BasicJsonType& j, const ENUM_TYPE& e) \
{ \
static_assert(std::is_enum<ENUM_TYPE>::value, #ENUM_TYPE " must be an enum!"); \
static const std::pair<ENUM_TYPE, BasicJsonType> m[] = __VA_ARGS__; \
auto it = std::find_if(std::begin(m), std::end(m), \
[e](const std::pair<ENUM_TYPE, BasicJsonType>& ej_pair) -> bool \
{ \
return ej_pair.first == e; \
}); \
j = ((it != std::end(m)) ? it : std::begin(m))->second; \
} \
template<typename BasicJsonType> \
inline void from_json(const BasicJsonType& j, ENUM_TYPE& e) \
{ \
static_assert(std::is_enum<ENUM_TYPE>::value, #ENUM_TYPE " must be an enum!"); \
static const std::pair<ENUM_TYPE, BasicJsonType> m[] = __VA_ARGS__; \
auto it = std::find_if(std::begin(m), std::end(m), \
[&j](const std::pair<ENUM_TYPE, BasicJsonType>& ej_pair) -> bool \
{ \
return ej_pair.second == j; \
}); \
e = ((it != std::end(m)) ? it : std::begin(m))->first; \
}
// Ugly macros to avoid uglier copy-paste when specializing basic_json. They
// may be removed in the future once the class is split.
#define NLOHMANN_BASIC_JSON_TPL_DECLARATION \
template<template<typename, typename, typename...> class ObjectType, \
template<typename, typename...> class ArrayType, \
class StringType, class BooleanType, class NumberIntegerType, \
class NumberUnsignedType, class NumberFloatType, \
template<typename> class AllocatorType, \
template<typename, typename = void> class JSONSerializer, \
class BinaryType, \
class CustomBaseClass>
#define NLOHMANN_BASIC_JSON_TPL \
basic_json<ObjectType, ArrayType, StringType, BooleanType, \
NumberIntegerType, NumberUnsignedType, NumberFloatType, \
AllocatorType, JSONSerializer, BinaryType, CustomBaseClass>
// Macros to simplify conversion from/to types
#define NLOHMANN_JSON_EXPAND( x ) x
#define NLOHMANN_JSON_GET_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, NAME,...) NAME
#define NLOHMANN_JSON_PASTE(...) NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_GET_MACRO(__VA_ARGS__, \
NLOHMANN_JSON_PASTE64, \
NLOHMANN_JSON_PASTE63, \
NLOHMANN_JSON_PASTE62, \
NLOHMANN_JSON_PASTE61, \
NLOHMANN_JSON_PASTE60, \
NLOHMANN_JSON_PASTE59, \
NLOHMANN_JSON_PASTE58, \
NLOHMANN_JSON_PASTE57, \
NLOHMANN_JSON_PASTE56, \
NLOHMANN_JSON_PASTE55, \
NLOHMANN_JSON_PASTE54, \
NLOHMANN_JSON_PASTE53, \
NLOHMANN_JSON_PASTE52, \
NLOHMANN_JSON_PASTE51, \
NLOHMANN_JSON_PASTE50, \
NLOHMANN_JSON_PASTE49, \
NLOHMANN_JSON_PASTE48, \
NLOHMANN_JSON_PASTE47, \
NLOHMANN_JSON_PASTE46, \
NLOHMANN_JSON_PASTE45, \
NLOHMANN_JSON_PASTE44, \
NLOHMANN_JSON_PASTE43, \
NLOHMANN_JSON_PASTE42, \
NLOHMANN_JSON_PASTE41, \
NLOHMANN_JSON_PASTE40, \
NLOHMANN_JSON_PASTE39, \
NLOHMANN_JSON_PASTE38, \
NLOHMANN_JSON_PASTE37, \
NLOHMANN_JSON_PASTE36, \
NLOHMANN_JSON_PASTE35, \
NLOHMANN_JSON_PASTE34, \
NLOHMANN_JSON_PASTE33, \
NLOHMANN_JSON_PASTE32, \
NLOHMANN_JSON_PASTE31, \
NLOHMANN_JSON_PASTE30, \
NLOHMANN_JSON_PASTE29, \
NLOHMANN_JSON_PASTE28, \
NLOHMANN_JSON_PASTE27, \
NLOHMANN_JSON_PASTE26, \
NLOHMANN_JSON_PASTE25, \
NLOHMANN_JSON_PASTE24, \
NLOHMANN_JSON_PASTE23, \
NLOHMANN_JSON_PASTE22, \
NLOHMANN_JSON_PASTE21, \
NLOHMANN_JSON_PASTE20, \
NLOHMANN_JSON_PASTE19, \
NLOHMANN_JSON_PASTE18, \
NLOHMANN_JSON_PASTE17, \
NLOHMANN_JSON_PASTE16, \
NLOHMANN_JSON_PASTE15, \
NLOHMANN_JSON_PASTE14, \
NLOHMANN_JSON_PASTE13, \
NLOHMANN_JSON_PASTE12, \
NLOHMANN_JSON_PASTE11, \
NLOHMANN_JSON_PASTE10, \
NLOHMANN_JSON_PASTE9, \
NLOHMANN_JSON_PASTE8, \
NLOHMANN_JSON_PASTE7, \
NLOHMANN_JSON_PASTE6, \
NLOHMANN_JSON_PASTE5, \
NLOHMANN_JSON_PASTE4, \
NLOHMANN_JSON_PASTE3, \
NLOHMANN_JSON_PASTE2, \
NLOHMANN_JSON_PASTE1)(__VA_ARGS__))
#define NLOHMANN_JSON_PASTE2(func, v1) func(v1)
#define NLOHMANN_JSON_PASTE3(func, v1, v2) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE2(func, v2)
#define NLOHMANN_JSON_PASTE4(func, v1, v2, v3) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE3(func, v2, v3)
#define NLOHMANN_JSON_PASTE5(func, v1, v2, v3, v4) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE4(func, v2, v3, v4)
#define NLOHMANN_JSON_PASTE6(func, v1, v2, v3, v4, v5) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE5(func, v2, v3, v4, v5)
#define NLOHMANN_JSON_PASTE7(func, v1, v2, v3, v4, v5, v6) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE6(func, v2, v3, v4, v5, v6)
#define NLOHMANN_JSON_PASTE8(func, v1, v2, v3, v4, v5, v6, v7) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE7(func, v2, v3, v4, v5, v6, v7)
#define NLOHMANN_JSON_PASTE9(func, v1, v2, v3, v4, v5, v6, v7, v8) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE8(func, v2, v3, v4, v5, v6, v7, v8)
#define NLOHMANN_JSON_PASTE10(func, v1, v2, v3, v4, v5, v6, v7, v8, v9) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE9(func, v2, v3, v4, v5, v6, v7, v8, v9)
#define NLOHMANN_JSON_PASTE11(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE10(func, v2, v3, v4, v5, v6, v7, v8, v9, v10)
#define NLOHMANN_JSON_PASTE12(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE11(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11)
#define NLOHMANN_JSON_PASTE13(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE12(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12)
#define NLOHMANN_JSON_PASTE14(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE13(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13)
#define NLOHMANN_JSON_PASTE15(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE14(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14)
#define NLOHMANN_JSON_PASTE16(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE15(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15)
#define NLOHMANN_JSON_PASTE17(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE16(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16)
#define NLOHMANN_JSON_PASTE18(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE17(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17)
#define NLOHMANN_JSON_PASTE19(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE18(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18)
#define NLOHMANN_JSON_PASTE20(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE19(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19)
#define NLOHMANN_JSON_PASTE21(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE20(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20)
#define NLOHMANN_JSON_PASTE22(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE21(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21)
#define NLOHMANN_JSON_PASTE23(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE22(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22)
#define NLOHMANN_JSON_PASTE24(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE23(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23)
#define NLOHMANN_JSON_PASTE25(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE24(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24)
#define NLOHMANN_JSON_PASTE26(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE25(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25)
#define NLOHMANN_JSON_PASTE27(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE26(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26)
#define NLOHMANN_JSON_PASTE28(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE27(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27)
#define NLOHMANN_JSON_PASTE29(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE28(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28)
#define NLOHMANN_JSON_PASTE30(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE29(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29)
#define NLOHMANN_JSON_PASTE31(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE30(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30)
#define NLOHMANN_JSON_PASTE32(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE31(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31)
#define NLOHMANN_JSON_PASTE33(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE32(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32)
#define NLOHMANN_JSON_PASTE34(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE33(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33)
#define NLOHMANN_JSON_PASTE35(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE34(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34)
#define NLOHMANN_JSON_PASTE36(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE35(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35)
#define NLOHMANN_JSON_PASTE37(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE36(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36)
#define NLOHMANN_JSON_PASTE38(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE37(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37)
#define NLOHMANN_JSON_PASTE39(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE38(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38)
#define NLOHMANN_JSON_PASTE40(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE39(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39)
#define NLOHMANN_JSON_PASTE41(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE40(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40)
#define NLOHMANN_JSON_PASTE42(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE41(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41)
#define NLOHMANN_JSON_PASTE43(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE42(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42)
#define NLOHMANN_JSON_PASTE44(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE43(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43)
#define NLOHMANN_JSON_PASTE45(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE44(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44)
#define NLOHMANN_JSON_PASTE46(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE45(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45)
#define NLOHMANN_JSON_PASTE47(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE46(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46)
#define NLOHMANN_JSON_PASTE48(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE47(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47)
#define NLOHMANN_JSON_PASTE49(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE48(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48)
#define NLOHMANN_JSON_PASTE50(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE49(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49)
#define NLOHMANN_JSON_PASTE51(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE50(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50)
#define NLOHMANN_JSON_PASTE52(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE51(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51)
#define NLOHMANN_JSON_PASTE53(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE52(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52)
#define NLOHMANN_JSON_PASTE54(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE53(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53)
#define NLOHMANN_JSON_PASTE55(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE54(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54)
#define NLOHMANN_JSON_PASTE56(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE55(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55)
#define NLOHMANN_JSON_PASTE57(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE56(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56)
#define NLOHMANN_JSON_PASTE58(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE57(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57)
#define NLOHMANN_JSON_PASTE59(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE58(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58)
#define NLOHMANN_JSON_PASTE60(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE59(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59)
#define NLOHMANN_JSON_PASTE61(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE60(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60)
#define NLOHMANN_JSON_PASTE62(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE61(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61)
#define NLOHMANN_JSON_PASTE63(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE62(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62)
#define NLOHMANN_JSON_PASTE64(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62, v63) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE63(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62, v63)
#define NLOHMANN_JSON_TO(v1) nlohmann_json_j[#v1] = nlohmann_json_t.v1;
#define NLOHMANN_JSON_FROM(v1) nlohmann_json_j.at(#v1).get_to(nlohmann_json_t.v1);
#define NLOHMANN_JSON_FROM_WITH_DEFAULT(v1) nlohmann_json_t.v1 = nlohmann_json_j.value(#v1, nlohmann_json_default_obj.v1);
/*!
@brief macro
@def NLOHMANN_DEFINE_TYPE_INTRUSIVE
@since version 3.9.0
*/
#define NLOHMANN_DEFINE_TYPE_INTRUSIVE(Type, ...) \
friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \
friend void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) }
#define NLOHMANN_DEFINE_TYPE_INTRUSIVE_WITH_DEFAULT(Type, ...) \
friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \
friend void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) }
#define NLOHMANN_DEFINE_TYPE_INTRUSIVE_ONLY_SERIALIZE(Type, ...) \
friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) }
/*!
@brief macro
@def NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE
@since version 3.9.0
*/
#define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Type, ...) \
inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \
inline void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) }
#define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_ONLY_SERIALIZE(Type, ...) \
inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) }
#define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_WITH_DEFAULT(Type, ...) \
inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \
inline void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) }
// inspired from https://stackoverflow.com/a/26745591
// allows to call any std function as if (e.g. with begin):
// using std::begin; begin(x);
//
// it allows using the detected idiom to retrieve the return type
// of such an expression
#define NLOHMANN_CAN_CALL_STD_FUNC_IMPL(std_name) \
namespace detail { \
using std::std_name; \
\
template<typename... T> \
using result_of_##std_name = decltype(std_name(std::declval<T>()...)); \
} \
\
namespace detail2 { \
struct std_name##_tag \
{ \
}; \
\
template<typename... T> \
std_name##_tag std_name(T&&...); \
\
template<typename... T> \
using result_of_##std_name = decltype(std_name(std::declval<T>()...)); \
\
template<typename... T> \
struct would_call_std_##std_name \
{ \
static constexpr auto const value = ::nlohmann::detail:: \
is_detected_exact<std_name##_tag, result_of_##std_name, T...>::value; \
}; \
} /* namespace detail2 */ \
\
template<typename... T> \
struct would_call_std_##std_name : detail2::would_call_std_##std_name<T...> \
{ \
}
#ifndef JSON_USE_IMPLICIT_CONVERSIONS
#define JSON_USE_IMPLICIT_CONVERSIONS 1
#endif
#if JSON_USE_IMPLICIT_CONVERSIONS
#define JSON_EXPLICIT
#else
#define JSON_EXPLICIT explicit
#endif
#ifndef JSON_DISABLE_ENUM_SERIALIZATION
#define JSON_DISABLE_ENUM_SERIALIZATION 0
#endif
#ifndef JSON_USE_GLOBAL_UDLS
#define JSON_USE_GLOBAL_UDLS 1
#endif

View File

@@ -1,45 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
// restore clang diagnostic settings
#if defined(__clang__)
#pragma clang diagnostic pop
#endif
// clean up
#undef JSON_ASSERT
#undef JSON_INTERNAL_CATCH
#undef JSON_THROW
#undef JSON_PRIVATE_UNLESS_TESTED
#undef NLOHMANN_BASIC_JSON_TPL_DECLARATION
#undef NLOHMANN_BASIC_JSON_TPL
#undef JSON_EXPLICIT
#undef NLOHMANN_CAN_CALL_STD_FUNC_IMPL
#undef JSON_INLINE_VARIABLE
#undef JSON_NO_UNIQUE_ADDRESS
#undef JSON_DISABLE_ENUM_SERIALIZATION
#undef JSON_USE_GLOBAL_UDLS
#ifndef JSON_TEST_KEEP_MACROS
#undef JSON_CATCH
#undef JSON_TRY
#undef JSON_HAS_CPP_11
#undef JSON_HAS_CPP_14
#undef JSON_HAS_CPP_17
#undef JSON_HAS_CPP_20
#undef JSON_HAS_FILESYSTEM
#undef JSON_HAS_EXPERIMENTAL_FILESYSTEM
#undef JSON_HAS_THREE_WAY_COMPARISON
#undef JSON_HAS_RANGES
#undef JSON_HAS_STATIC_RTTI
#undef JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON
#endif
#include <nlohmann/thirdparty/hedley/hedley_undef.hpp>

View File

@@ -1,17 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <nlohmann/detail/macro_scope.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
NLOHMANN_CAN_CALL_STD_FUNC_IMPL(begin);
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,17 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <nlohmann/detail/macro_scope.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
NLOHMANN_CAN_CALL_STD_FUNC_IMPL(end);
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,171 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-FileCopyrightText: 2018 The Abseil Authors
// SPDX-License-Identifier: MIT
#pragma once
#include <array> // array
#include <cstddef> // size_t
#include <type_traits> // conditional, enable_if, false_type, integral_constant, is_constructible, is_integral, is_same, remove_cv, remove_reference, true_type
#include <utility> // index_sequence, make_index_sequence, index_sequence_for
#include <nlohmann/detail/macro_scope.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
template<typename T>
using uncvref_t = typename std::remove_cv<typename std::remove_reference<T>::type>::type;
#ifdef JSON_HAS_CPP_14
// the following utilities are natively available in C++14
using std::enable_if_t;
using std::index_sequence;
using std::make_index_sequence;
using std::index_sequence_for;
#else
// alias templates to reduce boilerplate
template<bool B, typename T = void>
using enable_if_t = typename std::enable_if<B, T>::type;
// The following code is taken from https://github.com/abseil/abseil-cpp/blob/10cb35e459f5ecca5b2ff107635da0bfa41011b4/absl/utility/utility.h
// which is part of Google Abseil (https://github.com/abseil/abseil-cpp), licensed under the Apache License 2.0.
//// START OF CODE FROM GOOGLE ABSEIL
// integer_sequence
//
// Class template representing a compile-time integer sequence. An instantiation
// of `integer_sequence<T, Ints...>` has a sequence of integers encoded in its
// type through its template arguments (which is a common need when
// working with C++11 variadic templates). `absl::integer_sequence` is designed
// to be a drop-in replacement for C++14's `std::integer_sequence`.
//
// Example:
//
// template< class T, T... Ints >
// void user_function(integer_sequence<T, Ints...>);
//
// int main()
// {
// // user_function's `T` will be deduced to `int` and `Ints...`
// // will be deduced to `0, 1, 2, 3, 4`.
// user_function(make_integer_sequence<int, 5>());
// }
template <typename T, T... Ints>
struct integer_sequence
{
using value_type = T;
static constexpr std::size_t size() noexcept
{
return sizeof...(Ints);
}
};
// index_sequence
//
// A helper template for an `integer_sequence` of `size_t`,
// `absl::index_sequence` is designed to be a drop-in replacement for C++14's
// `std::index_sequence`.
template <size_t... Ints>
using index_sequence = integer_sequence<size_t, Ints...>;
namespace utility_internal
{
template <typename Seq, size_t SeqSize, size_t Rem>
struct Extend;
// Note that SeqSize == sizeof...(Ints). It's passed explicitly for efficiency.
template <typename T, T... Ints, size_t SeqSize>
struct Extend<integer_sequence<T, Ints...>, SeqSize, 0>
{
using type = integer_sequence < T, Ints..., (Ints + SeqSize)... >;
};
template <typename T, T... Ints, size_t SeqSize>
struct Extend<integer_sequence<T, Ints...>, SeqSize, 1>
{
using type = integer_sequence < T, Ints..., (Ints + SeqSize)..., 2 * SeqSize >;
};
// Recursion helper for 'make_integer_sequence<T, N>'.
// 'Gen<T, N>::type' is an alias for 'integer_sequence<T, 0, 1, ... N-1>'.
template <typename T, size_t N>
struct Gen
{
using type =
typename Extend < typename Gen < T, N / 2 >::type, N / 2, N % 2 >::type;
};
template <typename T>
struct Gen<T, 0>
{
using type = integer_sequence<T>;
};
} // namespace utility_internal
// Compile-time sequences of integers
// make_integer_sequence
//
// This template alias is equivalent to
// `integer_sequence<int, 0, 1, ..., N-1>`, and is designed to be a drop-in
// replacement for C++14's `std::make_integer_sequence`.
template <typename T, T N>
using make_integer_sequence = typename utility_internal::Gen<T, N>::type;
// make_index_sequence
//
// This template alias is equivalent to `index_sequence<0, 1, ..., N-1>`,
// and is designed to be a drop-in replacement for C++14's
// `std::make_index_sequence`.
template <size_t N>
using make_index_sequence = make_integer_sequence<size_t, N>;
// index_sequence_for
//
// Converts a typename pack into an index sequence of the same length, and
// is designed to be a drop-in replacement for C++14's
// `std::index_sequence_for()`
template <typename... Ts>
using index_sequence_for = make_index_sequence<sizeof...(Ts)>;
//// END OF CODE FROM GOOGLE ABSEIL
#endif
// dispatch utility (taken from ranges-v3)
template<unsigned N> struct priority_tag : priority_tag < N - 1 > {};
template<> struct priority_tag<0> {};
// taken from ranges-v3
template<typename T>
struct static_const
{
static JSON_INLINE_VARIABLE constexpr T value{};
};
#ifndef JSON_HAS_CPP_17
template<typename T>
constexpr T static_const<T>::value;
#endif
template<typename T, typename... Args>
inline constexpr std::array<T, sizeof...(Args)> make_array(Args&& ... args)
{
return std::array<T, sizeof...(Args)> {{static_cast<T>(std::forward<Args>(args))...}};
}
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,70 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <type_traits>
#include <nlohmann/detail/meta/void_t.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
// https://en.cppreference.com/w/cpp/experimental/is_detected
struct nonesuch
{
nonesuch() = delete;
~nonesuch() = delete;
nonesuch(nonesuch const&) = delete;
nonesuch(nonesuch const&&) = delete;
void operator=(nonesuch const&) = delete;
void operator=(nonesuch&&) = delete;
};
template<class Default,
class AlwaysVoid,
template<class...> class Op,
class... Args>
struct detector
{
using value_t = std::false_type;
using type = Default;
};
template<class Default, template<class...> class Op, class... Args>
struct detector<Default, void_t<Op<Args...>>, Op, Args...>
{
using value_t = std::true_type;
using type = Op<Args...>;
};
template<template<class...> class Op, class... Args>
using is_detected = typename detector<nonesuch, void, Op, Args...>::value_t;
template<template<class...> class Op, class... Args>
struct is_detected_lazy : is_detected<Op, Args...> { };
template<template<class...> class Op, class... Args>
using detected_t = typename detector<nonesuch, void, Op, Args...>::type;
template<class Default, template<class...> class Op, class... Args>
using detected_or = detector<Default, void, Op, Args...>;
template<class Default, template<class...> class Op, class... Args>
using detected_or_t = typename detected_or<Default, Op, Args...>::type;
template<class Expected, template<class...> class Op, class... Args>
using is_detected_exact = std::is_same<Expected, detected_t<Op, Args...>>;
template<class To, template<class...> class Op, class... Args>
using is_detected_convertible =
std::is_convertible<detected_t<Op, Args...>, To>;
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,21 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <nlohmann/detail/abi_macros.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
// dispatching helper struct
template <class T> struct identity_tag {};
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,159 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <cstdint> // size_t
#include <utility> // declval
#include <string> // string
#include <nlohmann/detail/abi_macros.hpp>
#include <nlohmann/detail/meta/detected.hpp>
#include <nlohmann/detail/meta/type_traits.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
template<typename T>
using null_function_t = decltype(std::declval<T&>().null());
template<typename T>
using boolean_function_t =
decltype(std::declval<T&>().boolean(std::declval<bool>()));
template<typename T, typename Integer>
using number_integer_function_t =
decltype(std::declval<T&>().number_integer(std::declval<Integer>()));
template<typename T, typename Unsigned>
using number_unsigned_function_t =
decltype(std::declval<T&>().number_unsigned(std::declval<Unsigned>()));
template<typename T, typename Float, typename String>
using number_float_function_t = decltype(std::declval<T&>().number_float(
std::declval<Float>(), std::declval<const String&>()));
template<typename T, typename String>
using string_function_t =
decltype(std::declval<T&>().string(std::declval<String&>()));
template<typename T, typename Binary>
using binary_function_t =
decltype(std::declval<T&>().binary(std::declval<Binary&>()));
template<typename T>
using start_object_function_t =
decltype(std::declval<T&>().start_object(std::declval<std::size_t>()));
template<typename T, typename String>
using key_function_t =
decltype(std::declval<T&>().key(std::declval<String&>()));
template<typename T>
using end_object_function_t = decltype(std::declval<T&>().end_object());
template<typename T>
using start_array_function_t =
decltype(std::declval<T&>().start_array(std::declval<std::size_t>()));
template<typename T>
using end_array_function_t = decltype(std::declval<T&>().end_array());
template<typename T, typename Exception>
using parse_error_function_t = decltype(std::declval<T&>().parse_error(
std::declval<std::size_t>(), std::declval<const std::string&>(),
std::declval<const Exception&>()));
template<typename SAX, typename BasicJsonType>
struct is_sax
{
private:
static_assert(is_basic_json<BasicJsonType>::value,
"BasicJsonType must be of type basic_json<...>");
using number_integer_t = typename BasicJsonType::number_integer_t;
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
using number_float_t = typename BasicJsonType::number_float_t;
using string_t = typename BasicJsonType::string_t;
using binary_t = typename BasicJsonType::binary_t;
using exception_t = typename BasicJsonType::exception;
public:
static constexpr bool value =
is_detected_exact<bool, null_function_t, SAX>::value &&
is_detected_exact<bool, boolean_function_t, SAX>::value &&
is_detected_exact<bool, number_integer_function_t, SAX, number_integer_t>::value &&
is_detected_exact<bool, number_unsigned_function_t, SAX, number_unsigned_t>::value &&
is_detected_exact<bool, number_float_function_t, SAX, number_float_t, string_t>::value &&
is_detected_exact<bool, string_function_t, SAX, string_t>::value &&
is_detected_exact<bool, binary_function_t, SAX, binary_t>::value &&
is_detected_exact<bool, start_object_function_t, SAX>::value &&
is_detected_exact<bool, key_function_t, SAX, string_t>::value &&
is_detected_exact<bool, end_object_function_t, SAX>::value &&
is_detected_exact<bool, start_array_function_t, SAX>::value &&
is_detected_exact<bool, end_array_function_t, SAX>::value &&
is_detected_exact<bool, parse_error_function_t, SAX, exception_t>::value;
};
template<typename SAX, typename BasicJsonType>
struct is_sax_static_asserts
{
private:
static_assert(is_basic_json<BasicJsonType>::value,
"BasicJsonType must be of type basic_json<...>");
using number_integer_t = typename BasicJsonType::number_integer_t;
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
using number_float_t = typename BasicJsonType::number_float_t;
using string_t = typename BasicJsonType::string_t;
using binary_t = typename BasicJsonType::binary_t;
using exception_t = typename BasicJsonType::exception;
public:
static_assert(is_detected_exact<bool, null_function_t, SAX>::value,
"Missing/invalid function: bool null()");
static_assert(is_detected_exact<bool, boolean_function_t, SAX>::value,
"Missing/invalid function: bool boolean(bool)");
static_assert(is_detected_exact<bool, boolean_function_t, SAX>::value,
"Missing/invalid function: bool boolean(bool)");
static_assert(
is_detected_exact<bool, number_integer_function_t, SAX,
number_integer_t>::value,
"Missing/invalid function: bool number_integer(number_integer_t)");
static_assert(
is_detected_exact<bool, number_unsigned_function_t, SAX,
number_unsigned_t>::value,
"Missing/invalid function: bool number_unsigned(number_unsigned_t)");
static_assert(is_detected_exact<bool, number_float_function_t, SAX,
number_float_t, string_t>::value,
"Missing/invalid function: bool number_float(number_float_t, const string_t&)");
static_assert(
is_detected_exact<bool, string_function_t, SAX, string_t>::value,
"Missing/invalid function: bool string(string_t&)");
static_assert(
is_detected_exact<bool, binary_function_t, SAX, binary_t>::value,
"Missing/invalid function: bool binary(binary_t&)");
static_assert(is_detected_exact<bool, start_object_function_t, SAX>::value,
"Missing/invalid function: bool start_object(std::size_t)");
static_assert(is_detected_exact<bool, key_function_t, SAX, string_t>::value,
"Missing/invalid function: bool key(string_t&)");
static_assert(is_detected_exact<bool, end_object_function_t, SAX>::value,
"Missing/invalid function: bool end_object()");
static_assert(is_detected_exact<bool, start_array_function_t, SAX>::value,
"Missing/invalid function: bool start_array(std::size_t)");
static_assert(is_detected_exact<bool, end_array_function_t, SAX>::value,
"Missing/invalid function: bool end_array()");
static_assert(
is_detected_exact<bool, parse_error_function_t, SAX, exception_t>::value,
"Missing/invalid function: bool parse_error(std::size_t, const "
"std::string&, const exception&)");
};
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END

View File

@@ -1,29 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <nlohmann/detail/macro_scope.hpp>
#if JSON_HAS_EXPERIMENTAL_FILESYSTEM
#include <experimental/filesystem>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
namespace std_fs = std::experimental::filesystem;
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END
#elif JSON_HAS_FILESYSTEM
#include <filesystem>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
namespace std_fs = std::filesystem;
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END
#endif

View File

@@ -1,795 +0,0 @@
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.11.3
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#pragma once
#include <limits> // numeric_limits
#include <type_traits> // false_type, is_constructible, is_integral, is_same, true_type
#include <utility> // declval
#include <tuple> // tuple
#include <string> // char_traits
#include <nlohmann/detail/iterators/iterator_traits.hpp>
#include <nlohmann/detail/macro_scope.hpp>
#include <nlohmann/detail/meta/call_std/begin.hpp>
#include <nlohmann/detail/meta/call_std/end.hpp>
#include <nlohmann/detail/meta/cpp_future.hpp>
#include <nlohmann/detail/meta/detected.hpp>
#include <nlohmann/json_fwd.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
/*!
@brief detail namespace with internal helper functions
This namespace collects functions that should not be exposed,
implementations of some @ref basic_json methods, and meta-programming helpers.
@since version 2.1.0
*/
namespace detail
{
/////////////
// helpers //
/////////////
// Note to maintainers:
//
// Every trait in this file expects a non CV-qualified type.
// The only exceptions are in the 'aliases for detected' section
// (i.e. those of the form: decltype(T::member_function(std::declval<T>())))
//
// In this case, T has to be properly CV-qualified to constraint the function arguments
// (e.g. to_json(BasicJsonType&, const T&))
template<typename> struct is_basic_json : std::false_type {};
NLOHMANN_BASIC_JSON_TPL_DECLARATION
struct is_basic_json<NLOHMANN_BASIC_JSON_TPL> : std::true_type {};
// used by exceptions create() member functions
// true_type for pointer to possibly cv-qualified basic_json or std::nullptr_t
// false_type otherwise
template<typename BasicJsonContext>
struct is_basic_json_context :
std::integral_constant < bool,
is_basic_json<typename std::remove_cv<typename std::remove_pointer<BasicJsonContext>::type>::type>::value
|| std::is_same<BasicJsonContext, std::nullptr_t>::value >
{};
//////////////////////
// json_ref helpers //
//////////////////////
template<typename>
class json_ref;
template<typename>
struct is_json_ref : std::false_type {};
template<typename T>
struct is_json_ref<json_ref<T>> : std::true_type {};
//////////////////////////
// aliases for detected //
//////////////////////////
template<typename T>
using mapped_type_t = typename T::mapped_type;
template<typename T>
using key_type_t = typename T::key_type;
template<typename T>
using value_type_t = typename T::value_type;
template<typename T>
using difference_type_t = typename T::difference_type;
template<typename T>
using pointer_t = typename T::pointer;
template<typename T>
using reference_t = typename T::reference;
template<typename T>
using iterator_category_t = typename T::iterator_category;
template<typename T, typename... Args>
using to_json_function = decltype(T::to_json(std::declval<Args>()...));
template<typename T, typename... Args>
using from_json_function = decltype(T::from_json(std::declval<Args>()...));
template<typename T, typename U>
using get_template_function = decltype(std::declval<T>().template get<U>());
// trait checking if JSONSerializer<T>::from_json(json const&, udt&) exists
template<typename BasicJsonType, typename T, typename = void>
struct has_from_json : std::false_type {};
// trait checking if j.get<T> is valid
// use this trait instead of std::is_constructible or std::is_convertible,
// both rely on, or make use of implicit conversions, and thus fail when T
// has several constructors/operator= (see https://github.com/nlohmann/json/issues/958)
template <typename BasicJsonType, typename T>
struct is_getable
{
static constexpr bool value = is_detected<get_template_function, const BasicJsonType&, T>::value;
};
template<typename BasicJsonType, typename T>
struct has_from_json < BasicJsonType, T, enable_if_t < !is_basic_json<T>::value >>
{
using serializer = typename BasicJsonType::template json_serializer<T, void>;
static constexpr bool value =
is_detected_exact<void, from_json_function, serializer,
const BasicJsonType&, T&>::value;
};
// This trait checks if JSONSerializer<T>::from_json(json const&) exists
// this overload is used for non-default-constructible user-defined-types
template<typename BasicJsonType, typename T, typename = void>
struct has_non_default_from_json : std::false_type {};
template<typename BasicJsonType, typename T>
struct has_non_default_from_json < BasicJsonType, T, enable_if_t < !is_basic_json<T>::value >>
{
using serializer = typename BasicJsonType::template json_serializer<T, void>;
static constexpr bool value =
is_detected_exact<T, from_json_function, serializer,
const BasicJsonType&>::value;
};
// This trait checks if BasicJsonType::json_serializer<T>::to_json exists
// Do not evaluate the trait when T is a basic_json type, to avoid template instantiation infinite recursion.
template<typename BasicJsonType, typename T, typename = void>
struct has_to_json : std::false_type {};
template<typename BasicJsonType, typename T>
struct has_to_json < BasicJsonType, T, enable_if_t < !is_basic_json<T>::value >>
{
using serializer = typename BasicJsonType::template json_serializer<T, void>;
static constexpr bool value =
is_detected_exact<void, to_json_function, serializer, BasicJsonType&,
T>::value;
};
template<typename T>
using detect_key_compare = typename T::key_compare;
template<typename T>
struct has_key_compare : std::integral_constant<bool, is_detected<detect_key_compare, T>::value> {};
// obtains the actual object key comparator
template<typename BasicJsonType>
struct actual_object_comparator
{
using object_t = typename BasicJsonType::object_t;
using object_comparator_t = typename BasicJsonType::default_object_comparator_t;
using type = typename std::conditional < has_key_compare<object_t>::value,
typename object_t::key_compare, object_comparator_t>::type;
};
template<typename BasicJsonType>
using actual_object_comparator_t = typename actual_object_comparator<BasicJsonType>::type;
/////////////////
// char_traits //
/////////////////
// Primary template of char_traits calls std char_traits
template<typename T>
struct char_traits : std::char_traits<T>
{};
// Explicitly define char traits for unsigned char since it is not standard
template<>
struct char_traits<unsigned char> : std::char_traits<char>
{
using char_type = unsigned char;
using int_type = uint64_t;
// Redefine to_int_type function
static int_type to_int_type(char_type c) noexcept
{
return static_cast<int_type>(c);
}
static char_type to_char_type(int_type i) noexcept
{
return static_cast<char_type>(i);
}
static constexpr int_type eof() noexcept
{
return static_cast<int_type>(EOF);
}
};
// Explicitly define char traits for signed char since it is not standard
template<>
struct char_traits<signed char> : std::char_traits<char>
{
using char_type = signed char;
using int_type = uint64_t;
// Redefine to_int_type function
static int_type to_int_type(char_type c) noexcept
{
return static_cast<int_type>(c);
}
static char_type to_char_type(int_type i) noexcept
{
return static_cast<char_type>(i);
}
static constexpr int_type eof() noexcept
{
return static_cast<int_type>(EOF);
}
};
///////////////////
// is_ functions //
///////////////////
// https://en.cppreference.com/w/cpp/types/conjunction
template<class...> struct conjunction : std::true_type { };
template<class B> struct conjunction<B> : B { };
template<class B, class... Bn>
struct conjunction<B, Bn...>
: std::conditional<static_cast<bool>(B::value), conjunction<Bn...>, B>::type {};
// https://en.cppreference.com/w/cpp/types/negation
template<class B> struct negation : std::integral_constant < bool, !B::value > { };
// Reimplementation of is_constructible and is_default_constructible, due to them being broken for
// std::pair and std::tuple until LWG 2367 fix (see https://cplusplus.github.io/LWG/lwg-defects.html#2367).
// This causes compile errors in e.g. clang 3.5 or gcc 4.9.
template <typename T>
struct is_default_constructible : std::is_default_constructible<T> {};
template <typename T1, typename T2>
struct is_default_constructible<std::pair<T1, T2>>
: conjunction<is_default_constructible<T1>, is_default_constructible<T2>> {};
template <typename T1, typename T2>
struct is_default_constructible<const std::pair<T1, T2>>
: conjunction<is_default_constructible<T1>, is_default_constructible<T2>> {};
template <typename... Ts>
struct is_default_constructible<std::tuple<Ts...>>
: conjunction<is_default_constructible<Ts>...> {};
template <typename... Ts>
struct is_default_constructible<const std::tuple<Ts...>>
: conjunction<is_default_constructible<Ts>...> {};
template <typename T, typename... Args>
struct is_constructible : std::is_constructible<T, Args...> {};
template <typename T1, typename T2>
struct is_constructible<std::pair<T1, T2>> : is_default_constructible<std::pair<T1, T2>> {};
template <typename T1, typename T2>
struct is_constructible<const std::pair<T1, T2>> : is_default_constructible<const std::pair<T1, T2>> {};
template <typename... Ts>
struct is_constructible<std::tuple<Ts...>> : is_default_constructible<std::tuple<Ts...>> {};
template <typename... Ts>
struct is_constructible<const std::tuple<Ts...>> : is_default_constructible<const std::tuple<Ts...>> {};
template<typename T, typename = void>
struct is_iterator_traits : std::false_type {};
template<typename T>
struct is_iterator_traits<iterator_traits<T>>
{
private:
using traits = iterator_traits<T>;
public:
static constexpr auto value =
is_detected<value_type_t, traits>::value &&
is_detected<difference_type_t, traits>::value &&
is_detected<pointer_t, traits>::value &&
is_detected<iterator_category_t, traits>::value &&
is_detected<reference_t, traits>::value;
};
template<typename T>
struct is_range
{
private:
using t_ref = typename std::add_lvalue_reference<T>::type;
using iterator = detected_t<result_of_begin, t_ref>;
using sentinel = detected_t<result_of_end, t_ref>;
// to be 100% correct, it should use https://en.cppreference.com/w/cpp/iterator/input_or_output_iterator
// and https://en.cppreference.com/w/cpp/iterator/sentinel_for
// but reimplementing these would be too much work, as a lot of other concepts are used underneath
static constexpr auto is_iterator_begin =
is_iterator_traits<iterator_traits<iterator>>::value;
public:
static constexpr bool value = !std::is_same<iterator, nonesuch>::value && !std::is_same<sentinel, nonesuch>::value && is_iterator_begin;
};
template<typename R>
using iterator_t = enable_if_t<is_range<R>::value, result_of_begin<decltype(std::declval<R&>())>>;
template<typename T>
using range_value_t = value_type_t<iterator_traits<iterator_t<T>>>;
// The following implementation of is_complete_type is taken from
// https://blogs.msdn.microsoft.com/vcblog/2015/12/02/partial-support-for-expression-sfinae-in-vs-2015-update-1/
// and is written by Xiang Fan who agreed to using it in this library.
template<typename T, typename = void>
struct is_complete_type : std::false_type {};
template<typename T>
struct is_complete_type<T, decltype(void(sizeof(T)))> : std::true_type {};
template<typename BasicJsonType, typename CompatibleObjectType,
typename = void>
struct is_compatible_object_type_impl : std::false_type {};
template<typename BasicJsonType, typename CompatibleObjectType>
struct is_compatible_object_type_impl <
BasicJsonType, CompatibleObjectType,
enable_if_t < is_detected<mapped_type_t, CompatibleObjectType>::value&&
is_detected<key_type_t, CompatibleObjectType>::value >>
{
using object_t = typename BasicJsonType::object_t;
// macOS's is_constructible does not play well with nonesuch...
static constexpr bool value =
is_constructible<typename object_t::key_type,
typename CompatibleObjectType::key_type>::value &&
is_constructible<typename object_t::mapped_type,
typename CompatibleObjectType::mapped_type>::value;
};
template<typename BasicJsonType, typename CompatibleObjectType>
struct is_compatible_object_type
: is_compatible_object_type_impl<BasicJsonType, CompatibleObjectType> {};
template<typename BasicJsonType, typename ConstructibleObjectType,
typename = void>
struct is_constructible_object_type_impl : std::false_type {};
template<typename BasicJsonType, typename ConstructibleObjectType>
struct is_constructible_object_type_impl <
BasicJsonType, ConstructibleObjectType,
enable_if_t < is_detected<mapped_type_t, ConstructibleObjectType>::value&&
is_detected<key_type_t, ConstructibleObjectType>::value >>
{
using object_t = typename BasicJsonType::object_t;
static constexpr bool value =
(is_default_constructible<ConstructibleObjectType>::value &&
(std::is_move_assignable<ConstructibleObjectType>::value ||
std::is_copy_assignable<ConstructibleObjectType>::value) &&
(is_constructible<typename ConstructibleObjectType::key_type,
typename object_t::key_type>::value &&
std::is_same <
typename object_t::mapped_type,
typename ConstructibleObjectType::mapped_type >::value)) ||
(has_from_json<BasicJsonType,
typename ConstructibleObjectType::mapped_type>::value ||
has_non_default_from_json <
BasicJsonType,
typename ConstructibleObjectType::mapped_type >::value);
};
template<typename BasicJsonType, typename ConstructibleObjectType>
struct is_constructible_object_type
: is_constructible_object_type_impl<BasicJsonType,
ConstructibleObjectType> {};
template<typename BasicJsonType, typename CompatibleStringType>
struct is_compatible_string_type
{
static constexpr auto value =
is_constructible<typename BasicJsonType::string_t, CompatibleStringType>::value;
};
template<typename BasicJsonType, typename ConstructibleStringType>
struct is_constructible_string_type
{
// launder type through decltype() to fix compilation failure on ICPC
#ifdef __INTEL_COMPILER
using laundered_type = decltype(std::declval<ConstructibleStringType>());
#else
using laundered_type = ConstructibleStringType;
#endif
static constexpr auto value =
conjunction <
is_constructible<laundered_type, typename BasicJsonType::string_t>,
is_detected_exact<typename BasicJsonType::string_t::value_type,
value_type_t, laundered_type >>::value;
};
template<typename BasicJsonType, typename CompatibleArrayType, typename = void>
struct is_compatible_array_type_impl : std::false_type {};
template<typename BasicJsonType, typename CompatibleArrayType>
struct is_compatible_array_type_impl <
BasicJsonType, CompatibleArrayType,
enable_if_t <
is_detected<iterator_t, CompatibleArrayType>::value&&
is_iterator_traits<iterator_traits<detected_t<iterator_t, CompatibleArrayType>>>::value&&
// special case for types like std::filesystem::path whose iterator's value_type are themselves
// c.f. https://github.com/nlohmann/json/pull/3073
!std::is_same<CompatibleArrayType, detected_t<range_value_t, CompatibleArrayType>>::value >>
{
static constexpr bool value =
is_constructible<BasicJsonType,
range_value_t<CompatibleArrayType>>::value;
};
template<typename BasicJsonType, typename CompatibleArrayType>
struct is_compatible_array_type
: is_compatible_array_type_impl<BasicJsonType, CompatibleArrayType> {};
template<typename BasicJsonType, typename ConstructibleArrayType, typename = void>
struct is_constructible_array_type_impl : std::false_type {};
template<typename BasicJsonType, typename ConstructibleArrayType>
struct is_constructible_array_type_impl <
BasicJsonType, ConstructibleArrayType,
enable_if_t<std::is_same<ConstructibleArrayType,
typename BasicJsonType::value_type>::value >>
: std::true_type {};
template<typename BasicJsonType, typename ConstructibleArrayType>
struct is_constructible_array_type_impl <
BasicJsonType, ConstructibleArrayType,
enable_if_t < !std::is_same<ConstructibleArrayType,
typename BasicJsonType::value_type>::value&&
!is_compatible_string_type<BasicJsonType, ConstructibleArrayType>::value&&
is_default_constructible<ConstructibleArrayType>::value&&
(std::is_move_assignable<ConstructibleArrayType>::value ||
std::is_copy_assignable<ConstructibleArrayType>::value)&&
is_detected<iterator_t, ConstructibleArrayType>::value&&
is_iterator_traits<iterator_traits<detected_t<iterator_t, ConstructibleArrayType>>>::value&&
is_detected<range_value_t, ConstructibleArrayType>::value&&
// special case for types like std::filesystem::path whose iterator's value_type are themselves
// c.f. https://github.com/nlohmann/json/pull/3073
!std::is_same<ConstructibleArrayType, detected_t<range_value_t, ConstructibleArrayType>>::value&&
is_complete_type <
detected_t<range_value_t, ConstructibleArrayType >>::value >>
{
using value_type = range_value_t<ConstructibleArrayType>;
static constexpr bool value =
std::is_same<value_type,
typename BasicJsonType::array_t::value_type>::value ||
has_from_json<BasicJsonType,
value_type>::value ||
has_non_default_from_json <
BasicJsonType,
value_type >::value;
};
template<typename BasicJsonType, typename ConstructibleArrayType>
struct is_constructible_array_type
: is_constructible_array_type_impl<BasicJsonType, ConstructibleArrayType> {};
template<typename RealIntegerType, typename CompatibleNumberIntegerType,
typename = void>
struct is_compatible_integer_type_impl : std::false_type {};
template<typename RealIntegerType, typename CompatibleNumberIntegerType>
struct is_compatible_integer_type_impl <
RealIntegerType, CompatibleNumberIntegerType,
enable_if_t < std::is_integral<RealIntegerType>::value&&
std::is_integral<CompatibleNumberIntegerType>::value&&
!std::is_same<bool, CompatibleNumberIntegerType>::value >>
{
// is there an assert somewhere on overflows?
using RealLimits = std::numeric_limits<RealIntegerType>;
using CompatibleLimits = std::numeric_limits<CompatibleNumberIntegerType>;
static constexpr auto value =
is_constructible<RealIntegerType,
CompatibleNumberIntegerType>::value &&
CompatibleLimits::is_integer &&
RealLimits::is_signed == CompatibleLimits::is_signed;
};
template<typename RealIntegerType, typename CompatibleNumberIntegerType>
struct is_compatible_integer_type
: is_compatible_integer_type_impl<RealIntegerType,
CompatibleNumberIntegerType> {};
template<typename BasicJsonType, typename CompatibleType, typename = void>
struct is_compatible_type_impl: std::false_type {};
template<typename BasicJsonType, typename CompatibleType>
struct is_compatible_type_impl <
BasicJsonType, CompatibleType,
enable_if_t<is_complete_type<CompatibleType>::value >>
{
static constexpr bool value =
has_to_json<BasicJsonType, CompatibleType>::value;
};
template<typename BasicJsonType, typename CompatibleType>
struct is_compatible_type
: is_compatible_type_impl<BasicJsonType, CompatibleType> {};
template<typename T1, typename T2>
struct is_constructible_tuple : std::false_type {};
template<typename T1, typename... Args>
struct is_constructible_tuple<T1, std::tuple<Args...>> : conjunction<is_constructible<T1, Args>...> {};
template<typename BasicJsonType, typename T>
struct is_json_iterator_of : std::false_type {};
template<typename BasicJsonType>
struct is_json_iterator_of<BasicJsonType, typename BasicJsonType::iterator> : std::true_type {};
template<typename BasicJsonType>
struct is_json_iterator_of<BasicJsonType, typename BasicJsonType::const_iterator> : std::true_type
{};
// checks if a given type T is a template specialization of Primary
template<template <typename...> class Primary, typename T>
struct is_specialization_of : std::false_type {};
template<template <typename...> class Primary, typename... Args>
struct is_specialization_of<Primary, Primary<Args...>> : std::true_type {};
template<typename T>
using is_json_pointer = is_specialization_of<::nlohmann::json_pointer, uncvref_t<T>>;
// checks if A and B are comparable using Compare functor
template<typename Compare, typename A, typename B, typename = void>
struct is_comparable : std::false_type {};
template<typename Compare, typename A, typename B>
struct is_comparable<Compare, A, B, void_t<
decltype(std::declval<Compare>()(std::declval<A>(), std::declval<B>())),
decltype(std::declval<Compare>()(std::declval<B>(), std::declval<A>()))
>> : std::true_type {};
template<typename T>
using detect_is_transparent = typename T::is_transparent;
// type trait to check if KeyType can be used as object key (without a BasicJsonType)
// see is_usable_as_basic_json_key_type below
template<typename Comparator, typename ObjectKeyType, typename KeyTypeCVRef, bool RequireTransparentComparator = true,
bool ExcludeObjectKeyType = RequireTransparentComparator, typename KeyType = uncvref_t<KeyTypeCVRef>>
using is_usable_as_key_type = typename std::conditional <
is_comparable<Comparator, ObjectKeyType, KeyTypeCVRef>::value
&& !(ExcludeObjectKeyType && std::is_same<KeyType,
ObjectKeyType>::value)
&& (!RequireTransparentComparator
|| is_detected <detect_is_transparent, Comparator>::value)
&& !is_json_pointer<KeyType>::value,
std::true_type,
std::false_type >::type;
// type trait to check if KeyType can be used as object key
// true if:
// - KeyType is comparable with BasicJsonType::object_t::key_type
// - if ExcludeObjectKeyType is true, KeyType is not BasicJsonType::object_t::key_type
// - the comparator is transparent or RequireTransparentComparator is false
// - KeyType is not a JSON iterator or json_pointer
template<typename BasicJsonType, typename KeyTypeCVRef, bool RequireTransparentComparator = true,
bool ExcludeObjectKeyType = RequireTransparentComparator, typename KeyType = uncvref_t<KeyTypeCVRef>>
using is_usable_as_basic_json_key_type = typename std::conditional <
is_usable_as_key_type<typename BasicJsonType::object_comparator_t,
typename BasicJsonType::object_t::key_type, KeyTypeCVRef,
RequireTransparentComparator, ExcludeObjectKeyType>::value
&& !is_json_iterator_of<BasicJsonType, KeyType>::value,
std::true_type,
std::false_type >::type;
template<typename ObjectType, typename KeyType>
using detect_erase_with_key_type = decltype(std::declval<ObjectType&>().erase(std::declval<KeyType>()));
// type trait to check if object_t has an erase() member functions accepting KeyType
template<typename BasicJsonType, typename KeyType>
using has_erase_with_key_type = typename std::conditional <
is_detected <
detect_erase_with_key_type,
typename BasicJsonType::object_t, KeyType >::value,
std::true_type,
std::false_type >::type;
// a naive helper to check if a type is an ordered_map (exploits the fact that
// ordered_map inherits capacity() from std::vector)
template <typename T>
struct is_ordered_map
{
using one = char;
struct two
{
char x[2]; // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays)
};
template <typename C> static one test( decltype(&C::capacity) ) ;
template <typename C> static two test(...);
enum { value = sizeof(test<T>(nullptr)) == sizeof(char) }; // NOLINT(cppcoreguidelines-pro-type-vararg,hicpp-vararg)
};
// to avoid useless casts (see https://github.com/nlohmann/json/issues/2893#issuecomment-889152324)
template < typename T, typename U, enable_if_t < !std::is_same<T, U>::value, int > = 0 >
T conditional_static_cast(U value)
{
return static_cast<T>(value);
}
template<typename T, typename U, enable_if_t<std::is_same<T, U>::value, int> = 0>
T conditional_static_cast(U value)
{
return value;
}
template<typename... Types>
using all_integral = conjunction<std::is_integral<Types>...>;
template<typename... Types>
using all_signed = conjunction<std::is_signed<Types>...>;
template<typename... Types>
using all_unsigned = conjunction<std::is_unsigned<Types>...>;
// there's a disjunction trait in another PR; replace when merged
template<typename... Types>
using same_sign = std::integral_constant < bool,
all_signed<Types...>::value || all_unsigned<Types...>::value >;
template<typename OfType, typename T>
using never_out_of_range = std::integral_constant < bool,
(std::is_signed<OfType>::value && (sizeof(T) < sizeof(OfType)))
|| (same_sign<OfType, T>::value && sizeof(OfType) == sizeof(T)) >;
template<typename OfType, typename T,
bool OfTypeSigned = std::is_signed<OfType>::value,
bool TSigned = std::is_signed<T>::value>
struct value_in_range_of_impl2;
template<typename OfType, typename T>
struct value_in_range_of_impl2<OfType, T, false, false>
{
static constexpr bool test(T val)
{
using CommonType = typename std::common_type<OfType, T>::type;
return static_cast<CommonType>(val) <= static_cast<CommonType>((std::numeric_limits<OfType>::max)());
}
};
template<typename OfType, typename T>
struct value_in_range_of_impl2<OfType, T, true, false>
{
static constexpr bool test(T val)
{
using CommonType = typename std::common_type<OfType, T>::type;
return static_cast<CommonType>(val) <= static_cast<CommonType>((std::numeric_limits<OfType>::max)());
}
};
template<typename OfType, typename T>
struct value_in_range_of_impl2<OfType, T, false, true>
{
static constexpr bool test(T val)
{
using CommonType = typename std::common_type<OfType, T>::type;
return val >= 0 && static_cast<CommonType>(val) <= static_cast<CommonType>((std::numeric_limits<OfType>::max)());
}
};
template<typename OfType, typename T>
struct value_in_range_of_impl2<OfType, T, true, true>
{
static constexpr bool test(T val)
{
using CommonType = typename std::common_type<OfType, T>::type;
return static_cast<CommonType>(val) >= static_cast<CommonType>((std::numeric_limits<OfType>::min)())
&& static_cast<CommonType>(val) <= static_cast<CommonType>((std::numeric_limits<OfType>::max)());
}
};
template<typename OfType, typename T,
bool NeverOutOfRange = never_out_of_range<OfType, T>::value,
typename = detail::enable_if_t<all_integral<OfType, T>::value>>
struct value_in_range_of_impl1;
template<typename OfType, typename T>
struct value_in_range_of_impl1<OfType, T, false>
{
static constexpr bool test(T val)
{
return value_in_range_of_impl2<OfType, T>::test(val);
}
};
template<typename OfType, typename T>
struct value_in_range_of_impl1<OfType, T, true>
{
static constexpr bool test(T /*val*/)
{
return true;
}
};
template<typename OfType, typename T>
inline constexpr bool value_in_range_of(T val)
{
return value_in_range_of_impl1<OfType, T>::test(val);
}
template<bool Value>
using bool_constant = std::integral_constant<bool, Value>;
///////////////////////////////////////////////////////////////////////////////
// is_c_string
///////////////////////////////////////////////////////////////////////////////
namespace impl
{
template<typename T>
inline constexpr bool is_c_string()
{
using TUnExt = typename std::remove_extent<T>::type;
using TUnCVExt = typename std::remove_cv<TUnExt>::type;
using TUnPtr = typename std::remove_pointer<T>::type;
using TUnCVPtr = typename std::remove_cv<TUnPtr>::type;
return
(std::is_array<T>::value && std::is_same<TUnCVExt, char>::value)
|| (std::is_pointer<T>::value && std::is_same<TUnCVPtr, char>::value);
}
} // namespace impl
// checks whether T is a [cv] char */[cv] char[] C string
template<typename T>
struct is_c_string : bool_constant<impl::is_c_string<T>()> {};
template<typename T>
using is_c_string_uncvref = is_c_string<uncvref_t<T>>;
///////////////////////////////////////////////////////////////////////////////
// is_transparent
///////////////////////////////////////////////////////////////////////////////
namespace impl
{
template<typename T>
inline constexpr bool is_transparent()
{
return is_detected<detect_is_transparent, T>::value;
}
} // namespace impl
// checks whether T has a member named is_transparent
template<typename T>
struct is_transparent : bool_constant<impl::is_transparent<T>()> {};
///////////////////////////////////////////////////////////////////////////////
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END

Some files were not shown because too many files have changed in this diff Show More