Compare commits
294 Commits
optimize_m
...
v1.0.4
Author | SHA1 | Date | |
---|---|---|---|
538af0253b
|
|||
0b65e34772
|
|||
635ef22520
|
|||
1231f4522a | |||
cc34f79b91
|
|||
6899033806
|
|||
8e2d05e663
|
|||
eba2095718
|
|||
199ffc95d2
|
|||
cbe15e317d
|
|||
debd890519
|
|||
46e929ff4d | |||
d858e26e4b
|
|||
0ee3eaed53
|
|||
093c197f0a
|
|||
78d7ea7c4d
|
|||
d6af1ffe8e
|
|||
20669dd161
|
|||
272dbad4f3
|
|||
8bccc3e4bc
|
|||
903b143338
|
|||
f10d0daf2e
|
|||
d39a17089e
|
|||
2e325cd114 | |||
fc3d63b7db
|
|||
43dc79a345
|
|||
b8589bcd0a | |||
3007e22a7d
|
|||
02e456befb
|
|||
8477698d8d
|
|||
52abd2d670
|
|||
3116eaa763
|
|||
443e5cc882
|
|||
e1c4221c11
|
|||
a63a35df3f
|
|||
c7555dac3f
|
|||
f3b8150e2c
|
|||
03f8b8653b
|
|||
2163e95c4a
|
|||
b33da34655
|
|||
e17aee7bdb
|
|||
37c31ee4c2
|
|||
80afdc06f7
|
|||
|
666782217e | ||
55af0714cd
|
|||
6ef5ca541a
|
|||
4364317411 | |||
65a96851ef
|
|||
722da7f781
|
|||
b1833a5feb
|
|||
41a0bd4ddd
|
|||
9ab4fc7d76
|
|||
beadb7465f
|
|||
652e5f623f
|
|||
b7fef9a99d
|
|||
343269d48c
|
|||
21c4c6df51
|
|||
702f086706
|
|||
981bc8f98b
|
|||
e0b7b2d316
|
|||
9b9e91e856 | |||
18e8e84284
|
|||
7de11b0e6d
|
|||
9b8db37a4b
|
|||
49b26bd04b
|
|||
b5b5b48864
|
|||
19586a3a5a
|
|||
ffe6d37436
|
|||
b73f4be146
|
|||
dbf2f35502
|
|||
db9e80a70e
|
|||
40ae4ad7f9
|
|||
234342f2de
|
|||
aa0936abd1
|
|||
f0d6f0cc38
|
|||
cc316bb8d3
|
|||
0723564e66
|
|||
2e95e8999d
|
|||
fb9b395748
|
|||
03e4437fea
|
|||
33cd32c639
|
|||
c460ef46ed
|
|||
dee9c674da
|
|||
e3f6dc1e0b
|
|||
460d20a402
|
|||
8dbbb65a2f
|
|||
d06bf187b2
|
|||
4addaefb47
|
|||
82964190f6
|
|||
4fefe9a1d2
|
|||
7c12dd25e5
|
|||
c713c0b1df
|
|||
64069a6cb7
|
|||
ba2a3f9523 | |||
f94e2d6a27
|
|||
2121ba9b98
|
|||
8b7b59d42b
|
|||
bbe5302ab1
|
|||
c2eb727fc7
|
|||
fb347ed5b9
|
|||
b657762c0c
|
|||
495d8a8528
|
|||
4628e48d3c
|
|||
5876be4b24
|
|||
dc3400197f
|
|||
26d3a57782
|
|||
4f3a04058f
|
|||
89c4613591
|
|||
28f3d87e32 | |||
e8d2c9fc0b
|
|||
d3cb580387
|
|||
f088df14fd
|
|||
e2249eace7
|
|||
64f5a7f14a
|
|||
408db2aad5
|
|||
e03efb5f63
|
|||
f617886133
|
|||
69ad660040
|
|||
431b3a3aa5
|
|||
6a23e2cc26
|
|||
f6e00530be
|
|||
f9258e43b9
|
|||
92820555da
|
|||
5a3af51826
|
|||
a8f9800631
|
|||
84cec0c1e0
|
|||
130139f644
|
|||
651f84b562
|
|||
553ab0fa22
|
|||
4975feabff
|
|||
32293af69f
|
|||
858664be2d
|
|||
1f705f6018
|
|||
7bcd2eed06
|
|||
833acefbb3
|
|||
26b649ebae
|
|||
080eddf9cd
|
|||
04e754b2f5
|
|||
38423048bd
|
|||
64fc97b892
|
|||
2c2159f192
|
|||
6765552a7c
|
|||
f72aa5b9a6 | |||
fa7fe081ad
|
|||
660e783517
|
|||
b35532dd9e
|
|||
6ef49385ea
|
|||
6d5a25cdc8
|
|||
d00b08cbe8
|
|||
977ff6fddb
|
|||
54b8939f35
|
|||
5022a4dc90
|
|||
40d1dad5d8
|
|||
47e2b138c5
|
|||
e7ded68267
|
|||
ca833a34f5
|
|||
df9b4c48d2
|
|||
f288bbd6fa
|
|||
7d8aca4f59
|
|||
8fdad78a8c
|
|||
e3ae073333
|
|||
4b732e76c2
|
|||
fe5fead27e
|
|||
8c3864f3c8
|
|||
1287160c47
|
|||
2f58807322
|
|||
17e079edd5
|
|||
b9e0028e9d
|
|||
e0d39fe631
|
|||
36b0277576
|
|||
da8d018ec4
|
|||
5f0676691c
|
|||
3448fb1299
|
|||
5e938d5cca
|
|||
55e742438f
|
|||
c4ae3fe429
|
|||
93e4ff94db
|
|||
57c27f739c
|
|||
a434d7f1ae
|
|||
294666c516
|
|||
fd04e78ad9
|
|||
66ec1b343b
|
|||
bb423da42f
|
|||
db17c14042
|
|||
a4401cb78f
|
|||
9d3d9cc6c6
|
|||
cfcf3c16df
|
|||
85202260f3
|
|||
82acb3cab5
|
|||
623ceed396 | |||
926de2bebd
|
|||
71704e3547
|
|||
3b06534327
|
|||
ac89a451e3
|
|||
00c6cf663b
|
|||
5043c12be8
|
|||
11320e2cc7
|
|||
ce66483b65
|
|||
cab8e14b2d
|
|||
f0d0abe891
|
|||
dcba146e12
|
|||
3ea0285119
|
|||
e3888e1503 | |||
06de13df98
|
|||
de4fa6a04f
|
|||
3a7bf4e672
|
|||
cd0bc02a74
|
|||
c8597a794e
|
|||
b30416364d
|
|||
3a16589220
|
|||
c4f9187e2a
|
|||
c4d0a5b4e6
|
|||
7bfafe555f
|
|||
337b6f7e79
|
|||
5fa0b957dd
|
|||
67252fc41d
|
|||
94ae9456a0
|
|||
781993e326
|
|||
8257a6ae39
|
|||
fc81730dfc | |||
d8734ff082
|
|||
03533461c8
|
|||
68f22a673d
|
|||
b9bc0088f3
|
|||
c280e254ca
|
|||
3d0f29fda3
|
|||
20a6ebab7c
|
|||
925f71166c
|
|||
f69f415b92
|
|||
1bdfbd1620
|
|||
06fb135526
|
|||
501ea0ab4e
|
|||
847c6761d7
|
|||
6030885fc3
|
|||
89df7f4db0
|
|||
41257ed566
|
|||
506369e46b
|
|||
d908f389f5
|
|||
5a7c8f1818
|
|||
64fc7bd9dd
|
|||
0b7beda78c
|
|||
05b670dfc0
|
|||
de62d42b74
|
|||
edb957d22e
|
|||
4de5cb4c6c | |||
c35030f137
|
|||
182b07ed90
|
|||
7806f961e2
|
|||
7c3e315ae7
|
|||
284ef6dfd1
|
|||
1c6af619b5
|
|||
86ffdfd6f3
|
|||
d82148079d
|
|||
067430fd1b
|
|||
f5d0d16365 | |||
97ca8ac084
|
|||
1c1385b768
|
|||
35432b6294
|
|||
c59dd30e53
|
|||
d2da0ddb88
|
|||
8066701c3c
|
|||
0f66ac73d0
|
|||
4370bf51d7
|
|||
2b7353b9e0
|
|||
b686b3c9c3
|
|||
2dd04a6c44
|
|||
1da83662d0
|
|||
3ac9593c65
|
|||
6b317accf1
|
|||
4964aab722
|
|||
7a6ec73d63 | |||
1a534888d6
|
|||
59ffd179f4
|
|||
9972738deb
|
|||
bafcb26bb6
|
|||
2d7999d5f2
|
|||
a6bb22dfb5
|
|||
704dc937be
|
|||
a3e665eed6
|
|||
918a7b4180
|
|||
80b20f35b4
|
|||
4d4780c1d5
|
|||
fa612c531e
|
|||
24b68f9ae2
|
|||
a062ebf445 | |||
2a3fc9aa45
|
|||
55d21294d5
|
|||
3691cb4a61
|
|||
054567c65a
|
|||
2729b92f06
|
|||
f26ea1f0ac
|
|||
af0419c9da
|
|||
90c92e5c56 | |||
6679b90a82 |
@@ -5,11 +5,12 @@ Checks: '-*,
|
||||
cppcoreguidelines-*,
|
||||
modernize-*,
|
||||
performance-*,
|
||||
-modernize-use-nodiscard,
|
||||
-cppcoreguidelines-pro-type-vararg,
|
||||
-modernize-use-trailing-return-type,
|
||||
-bugprone-exception-escape'
|
||||
|
||||
HeaderFilterRegex: 'src/*'
|
||||
HeaderFilterRegex: 'bayesnet/*'
|
||||
AnalyzeTemporaryDtors: false
|
||||
WarningsAsErrors: ''
|
||||
FormatStyle: file
|
||||
|
7
.gitignore
vendored
7
.gitignore
vendored
@@ -31,7 +31,12 @@
|
||||
*.exe
|
||||
*.out
|
||||
*.app
|
||||
build/
|
||||
build/**
|
||||
build_*/**
|
||||
*.dSYM/**
|
||||
cmake-build*/**
|
||||
.idea
|
||||
puml/**
|
||||
.vscode/settings.json
|
||||
sample/build
|
||||
|
||||
|
14
.gitmodules
vendored
14
.gitmodules
vendored
@@ -1,12 +1,20 @@
|
||||
[submodule "lib/mdlp"]
|
||||
path = lib/mdlp
|
||||
url = https://github.com/rmontanana/mdlp
|
||||
main = main
|
||||
update = merge
|
||||
[submodule "lib/catch2"]
|
||||
path = lib/catch2
|
||||
main = v2.x
|
||||
update = merge
|
||||
url = https://github.com/catchorg/Catch2.git
|
||||
[submodule "lib/argparse"]
|
||||
path = lib/argparse
|
||||
url = https://github.com/p-ranav/argparse
|
||||
[submodule "lib/json"]
|
||||
path = lib/json
|
||||
url = https://github.com/nlohmann/json.git
|
||||
master = master
|
||||
update = merge
|
||||
[submodule "lib/folding"]
|
||||
path = lib/folding
|
||||
url = https://github.com/rmontanana/folding
|
||||
main = main
|
||||
update = merge
|
||||
|
18
.vscode/c_cpp_properties.json
vendored
Normal file
18
.vscode/c_cpp_properties.json
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Mac",
|
||||
"includePath": [
|
||||
"${workspaceFolder}/**"
|
||||
],
|
||||
"defines": [],
|
||||
"macFrameworkPath": [
|
||||
"/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks"
|
||||
],
|
||||
"cStandard": "c17",
|
||||
"cppStandard": "c++17",
|
||||
"compileCommands": "${workspaceFolder}/cmake-build-release/compile_commands.json"
|
||||
}
|
||||
],
|
||||
"version": 4
|
||||
}
|
40
.vscode/launch.json
vendored
40
.vscode/launch.json
vendored
@@ -5,47 +5,21 @@
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"name": "sample",
|
||||
"program": "${workspaceFolder}/build/sample/BayesNetSample",
|
||||
"program": "${workspaceFolder}/build_release/sample/bayesnet_sample",
|
||||
"args": [
|
||||
"-d",
|
||||
"iris",
|
||||
"-m",
|
||||
"KDB",
|
||||
"-s",
|
||||
"271",
|
||||
"-p",
|
||||
"/Users/rmontanana/Code/discretizbench/datasets/",
|
||||
"${workspaceFolder}/tests/data/glass.arff"
|
||||
],
|
||||
//"cwd": "${workspaceFolder}/build/sample/",
|
||||
},
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"name": "experiment",
|
||||
"program": "${workspaceFolder}/build/src/Platform/main",
|
||||
"name": "test",
|
||||
"program": "${workspaceFolder}/build_debug/tests/unit_tests_bayesnet",
|
||||
"args": [
|
||||
"-m",
|
||||
"SPODELd",
|
||||
"-p",
|
||||
"/Users/rmontanana/Code/discretizbench/datasets",
|
||||
"--stratified",
|
||||
"-d",
|
||||
"iris"
|
||||
//"-c=\"Metrics Test\"",
|
||||
// "-s",
|
||||
],
|
||||
"cwd": "/Users/rmontanana/Code/discretizbench",
|
||||
},
|
||||
{
|
||||
"name": "Build & debug active file",
|
||||
"type": "cppdbg",
|
||||
"request": "launch",
|
||||
"program": "${workspaceFolder}/build/bayesnet",
|
||||
"args": [],
|
||||
"stopAtEntry": false,
|
||||
"cwd": "${workspaceFolder}",
|
||||
"environment": [],
|
||||
"externalConsole": false,
|
||||
"MIMode": "lldb",
|
||||
"preLaunchTask": "CMake: build"
|
||||
"cwd": "${workspaceFolder}/build_debug/tests",
|
||||
}
|
||||
]
|
||||
}
|
109
.vscode/settings.json
vendored
109
.vscode/settings.json
vendored
@@ -1,109 +0,0 @@
|
||||
{
|
||||
"files.associations": {
|
||||
"*.rmd": "markdown",
|
||||
"*.py": "python",
|
||||
"vector": "cpp",
|
||||
"__bit_reference": "cpp",
|
||||
"__bits": "cpp",
|
||||
"__config": "cpp",
|
||||
"__debug": "cpp",
|
||||
"__errc": "cpp",
|
||||
"__hash_table": "cpp",
|
||||
"__locale": "cpp",
|
||||
"__mutex_base": "cpp",
|
||||
"__node_handle": "cpp",
|
||||
"__nullptr": "cpp",
|
||||
"__split_buffer": "cpp",
|
||||
"__string": "cpp",
|
||||
"__threading_support": "cpp",
|
||||
"__tuple": "cpp",
|
||||
"array": "cpp",
|
||||
"atomic": "cpp",
|
||||
"bitset": "cpp",
|
||||
"cctype": "cpp",
|
||||
"chrono": "cpp",
|
||||
"clocale": "cpp",
|
||||
"cmath": "cpp",
|
||||
"compare": "cpp",
|
||||
"complex": "cpp",
|
||||
"concepts": "cpp",
|
||||
"cstdarg": "cpp",
|
||||
"cstddef": "cpp",
|
||||
"cstdint": "cpp",
|
||||
"cstdio": "cpp",
|
||||
"cstdlib": "cpp",
|
||||
"cstring": "cpp",
|
||||
"ctime": "cpp",
|
||||
"cwchar": "cpp",
|
||||
"cwctype": "cpp",
|
||||
"exception": "cpp",
|
||||
"initializer_list": "cpp",
|
||||
"ios": "cpp",
|
||||
"iosfwd": "cpp",
|
||||
"istream": "cpp",
|
||||
"limits": "cpp",
|
||||
"locale": "cpp",
|
||||
"memory": "cpp",
|
||||
"mutex": "cpp",
|
||||
"new": "cpp",
|
||||
"optional": "cpp",
|
||||
"ostream": "cpp",
|
||||
"ratio": "cpp",
|
||||
"sstream": "cpp",
|
||||
"stdexcept": "cpp",
|
||||
"streambuf": "cpp",
|
||||
"string": "cpp",
|
||||
"string_view": "cpp",
|
||||
"system_error": "cpp",
|
||||
"tuple": "cpp",
|
||||
"type_traits": "cpp",
|
||||
"typeinfo": "cpp",
|
||||
"unordered_map": "cpp",
|
||||
"variant": "cpp",
|
||||
"algorithm": "cpp",
|
||||
"iostream": "cpp",
|
||||
"iomanip": "cpp",
|
||||
"numeric": "cpp",
|
||||
"set": "cpp",
|
||||
"__tree": "cpp",
|
||||
"deque": "cpp",
|
||||
"list": "cpp",
|
||||
"map": "cpp",
|
||||
"unordered_set": "cpp",
|
||||
"any": "cpp",
|
||||
"condition_variable": "cpp",
|
||||
"forward_list": "cpp",
|
||||
"fstream": "cpp",
|
||||
"stack": "cpp",
|
||||
"thread": "cpp",
|
||||
"__memory": "cpp",
|
||||
"filesystem": "cpp",
|
||||
"*.toml": "toml",
|
||||
"utility": "cpp",
|
||||
"__verbose_abort": "cpp",
|
||||
"bit": "cpp",
|
||||
"random": "cpp",
|
||||
"*.tcc": "cpp",
|
||||
"functional": "cpp",
|
||||
"iterator": "cpp",
|
||||
"memory_resource": "cpp",
|
||||
"format": "cpp",
|
||||
"valarray": "cpp",
|
||||
"regex": "cpp",
|
||||
"span": "cpp",
|
||||
"cfenv": "cpp",
|
||||
"cinttypes": "cpp",
|
||||
"csetjmp": "cpp",
|
||||
"future": "cpp",
|
||||
"queue": "cpp",
|
||||
"typeindex": "cpp",
|
||||
"shared_mutex": "cpp",
|
||||
"*.ipp": "cpp",
|
||||
"cassert": "cpp",
|
||||
"charconv": "cpp",
|
||||
"source_location": "cpp",
|
||||
"ranges": "cpp"
|
||||
},
|
||||
"cmake.configureOnOpen": false,
|
||||
"C_Cpp.default.configurationProvider": "ms-vscode.cmake-tools"
|
||||
}
|
23
.vscode/tasks.json
vendored
23
.vscode/tasks.json
vendored
@@ -32,6 +32,29 @@
|
||||
],
|
||||
"group": "build",
|
||||
"detail": "Task generated by Debugger."
|
||||
},
|
||||
{
|
||||
"type": "cppbuild",
|
||||
"label": "C/C++: g++ build active file",
|
||||
"command": "/usr/bin/g++",
|
||||
"args": [
|
||||
"-fdiagnostics-color=always",
|
||||
"-g",
|
||||
"${file}",
|
||||
"-o",
|
||||
"${fileDirname}/${fileBasenameNoExtension}"
|
||||
],
|
||||
"options": {
|
||||
"cwd": "${fileDirname}"
|
||||
},
|
||||
"problemMatcher": [
|
||||
"$gcc"
|
||||
],
|
||||
"group": {
|
||||
"kind": "build",
|
||||
"isDefault": true
|
||||
},
|
||||
"detail": "Task generated by Debugger."
|
||||
}
|
||||
]
|
||||
}
|
64
CHANGELOG.md
Normal file
64
CHANGELOG.md
Normal file
@@ -0,0 +1,64 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [unreleased]
|
||||
|
||||
### Added
|
||||
|
||||
- Install command and instructions in README.md
|
||||
|
||||
### Changed
|
||||
|
||||
- Sample app now is a separate target in the Makefile and shows how to use the library with a sample dataset
|
||||
|
||||
## [1.0.4] 2024-03-06
|
||||
|
||||
### Added
|
||||
|
||||
- Change _ascending_ hyperparameter to _order_ with these possible values _{"asc", "desc", "rand"}_, Default is _"desc"_.
|
||||
- Add the _predict_single_ hyperparameter to control if only the last model created is used to predict in boost training or the whole ensemble (all the models built so far). Default is true.
|
||||
- sample app to show how to use the library (make sample)
|
||||
|
||||
### Changed
|
||||
|
||||
- Change the library structure adding folders for each group of classes (classifiers, ensembles, etc).
|
||||
- The significances of the models generated under the feature selection algorithm are now computed after all the models have been generated and an α<sub>t</sub> value is computed and assigned to each model.
|
||||
|
||||
## [1.0.3] 2024-02-25
|
||||
|
||||
### Added
|
||||
|
||||
- Voting / probability aggregation in Ensemble classes
|
||||
- predict_proba method in Classifier
|
||||
- predict_proba method in BoostAODE
|
||||
- predict_voting parameter in BoostAODE constructor to use voting or probability to predict (default is voting)
|
||||
- hyperparameter predict_voting to AODE, AODELd and BoostAODE (Ensemble child classes)
|
||||
- tests to check predict & predict_proba coherence
|
||||
|
||||
## [1.0.2] - 2024-02-20
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix bug in BoostAODE: do not include the model if epsilon sub t is greater than 0.5
|
||||
- Fix bug in BoostAODE: compare accuracy with previous accuracy instead of the first of the ensemble if convergence true
|
||||
|
||||
## [1.0.1] - 2024-02-12
|
||||
|
||||
### Added
|
||||
|
||||
- Notes in Classifier class
|
||||
- BoostAODE: Add note with used features in initialization with feature selection
|
||||
- BoostAODE: Add note with the number of models
|
||||
- BoostAODE: Add note with the number of features used to create models if not all features are used
|
||||
- Test version number in TestBayesModels
|
||||
- Add tests with feature_select and notes on BoostAODE
|
||||
|
||||
### Fixed
|
||||
|
||||
- Network predict test
|
||||
- Network predict_proba test
|
||||
- Network score test
|
@@ -1,7 +1,7 @@
|
||||
cmake_minimum_required(VERSION 3.20)
|
||||
|
||||
project(BayesNet
|
||||
VERSION 0.1.0
|
||||
VERSION 1.0.4
|
||||
DESCRIPTION "Bayesian Network and basic classifiers Library."
|
||||
HOMEPAGE_URL "https://github.com/rmontanana/bayesnet"
|
||||
LANGUAGES CXX
|
||||
@@ -24,25 +24,32 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")
|
||||
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread")
|
||||
# Options
|
||||
# -------
|
||||
option(ENABLE_CLANG_TIDY "Enable to add clang tidy." OFF)
|
||||
option(ENABLE_TESTING "Unit testing build" OFF)
|
||||
option(CODE_COVERAGE "Collect coverage from test library" OFF)
|
||||
option(INSTALL_GTEST "Enable installation of googletest." OFF)
|
||||
|
||||
# CMakes modules
|
||||
# --------------
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules ${CMAKE_MODULE_PATH})
|
||||
|
||||
include(AddGitSubmodule)
|
||||
|
||||
if (CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
MESSAGE("Debug mode")
|
||||
set(ENABLE_TESTING ON)
|
||||
set(CODE_COVERAGE ON)
|
||||
endif (CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
|
||||
|
||||
if (CODE_COVERAGE)
|
||||
enable_testing()
|
||||
include(CodeCoverage)
|
||||
MESSAGE("Code coverage enabled")
|
||||
set(CMAKE_C_FLAGS " ${CMAKE_C_FLAGS} -fprofile-arcs -ftest-coverage")
|
||||
set(CMAKE_CXX_FLAGS " ${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage")
|
||||
SET(GCC_COVERAGE_LINK_FLAGS " ${GCC_COVERAGE_LINK_FLAGS} -lgcov --coverage")
|
||||
enable_testing()
|
||||
include(CodeCoverage)
|
||||
MESSAGE("Code coverage enabled")
|
||||
set(CMAKE_CXX_FLAGS " ${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage -O0 -g")
|
||||
SET(GCC_COVERAGE_LINK_FLAGS " ${GCC_COVERAGE_LINK_FLAGS} -lgcov --coverage")
|
||||
endif (CODE_COVERAGE)
|
||||
|
||||
if (ENABLE_CLANG_TIDY)
|
||||
@@ -53,28 +60,28 @@ endif (ENABLE_CLANG_TIDY)
|
||||
# ---------------------------------------------
|
||||
# include(FetchContent)
|
||||
add_git_submodule("lib/mdlp")
|
||||
add_git_submodule("lib/argparse")
|
||||
add_git_submodule("lib/json")
|
||||
|
||||
# Subdirectories
|
||||
# --------------
|
||||
add_subdirectory(config)
|
||||
add_subdirectory(lib/Files)
|
||||
add_subdirectory(src/BayesNet)
|
||||
add_subdirectory(src/Platform)
|
||||
add_subdirectory(sample)
|
||||
|
||||
file(GLOB BayesNet_HEADERS CONFIGURE_DEPENDS ${BayesNet_SOURCE_DIR}/src/BayesNet/*.h ${BayesNet_SOURCE_DIR}/BayesNet/*.hpp)
|
||||
file(GLOB BayesNet_SOURCES CONFIGURE_DEPENDS ${BayesNet_SOURCE_DIR}/src/BayesNet/*.cc ${BayesNet_SOURCE_DIR}/src/BayesNet/*.cpp)
|
||||
file(GLOB Platform_SOURCES CONFIGURE_DEPENDS ${BayesNet_SOURCE_DIR}/src/Platform/*.cc ${BayesNet_SOURCE_DIR}/src/Platform/*.cpp)
|
||||
add_subdirectory(bayesnet)
|
||||
|
||||
# Testing
|
||||
# -------
|
||||
|
||||
if (ENABLE_TESTING)
|
||||
MESSAGE("Testing enabled")
|
||||
add_git_submodule("lib/catch2")
|
||||
|
||||
add_git_submodule("lib/catch2")
|
||||
include(CTest)
|
||||
add_subdirectory(tests)
|
||||
endif (ENABLE_TESTING)
|
||||
|
||||
# Installation
|
||||
# ------------
|
||||
install(TARGETS BayesNet
|
||||
ARCHIVE DESTINATION lib
|
||||
LIBRARY DESTINATION lib
|
||||
CONFIGURATIONS Release)
|
||||
install(DIRECTORY bayesnet/ DESTINATION include/bayesnet FILES_MATCHING CONFIGURATIONS Release PATTERN "*.h")
|
||||
install(FILES ${CMAKE_BINARY_DIR}/configured_files/include/bayesnet/config.h DESTINATION include/bayesnet CONFIGURATIONS Release)
|
106
Makefile
106
Makefile
@@ -1,6 +1,26 @@
|
||||
SHELL := /bin/bash
|
||||
.DEFAULT_GOAL := help
|
||||
.PHONY: coverage setup help build test
|
||||
.PHONY: coverage setup help buildr buildd test clean debug release sample
|
||||
|
||||
f_release = build_release
|
||||
f_debug = build_debug
|
||||
app_targets = BayesNet
|
||||
test_targets = unit_tests_bayesnet
|
||||
n_procs = -j 16
|
||||
|
||||
define ClearTests
|
||||
@for t in $(test_targets); do \
|
||||
if [ -f $(f_debug)/tests/$$t ]; then \
|
||||
echo ">>> Cleaning $$t..." ; \
|
||||
rm -f $(f_debug)/tests/$$t ; \
|
||||
fi ; \
|
||||
done
|
||||
@nfiles="$(find . -name "*.gcda" -print0)" ; \
|
||||
if test "${nfiles}" != "" ; then \
|
||||
find . -name "*.gcda" -print0 | xargs -0 rm 2>/dev/null ;\
|
||||
fi ;
|
||||
endef
|
||||
|
||||
|
||||
setup: ## Install dependencies for tests and coverage
|
||||
@if [ "$(shell uname)" = "Darwin" ]; then \
|
||||
@@ -12,48 +32,72 @@ setup: ## Install dependencies for tests and coverage
|
||||
fi
|
||||
|
||||
dependency: ## Create a dependency graph diagram of the project (build/dependency.png)
|
||||
cd build && cmake .. --graphviz=dependency.dot && dot -Tpng dependency.dot -o dependency.png
|
||||
@echo ">>> Creating dependency graph diagram of the project...";
|
||||
$(MAKE) debug
|
||||
cd $(f_debug) && cmake .. --graphviz=dependency.dot && dot -Tpng dependency.dot -o dependency.png
|
||||
|
||||
build: ## Build the main and BayesNetSample
|
||||
cmake --build build -t main -t BayesNetSample -j 32
|
||||
buildd: ## Build the debug targets
|
||||
cmake --build $(f_debug) -t $(app_targets) $(n_procs)
|
||||
|
||||
clean: ## Clean the debug info
|
||||
@echo ">>> Cleaning Debug BayesNet ...";
|
||||
find . -name "*.gcda" -print0 | xargs -0 rm
|
||||
buildr: ## Build the release targets
|
||||
cmake --build $(f_release) -t $(app_targets) $(n_procs)
|
||||
|
||||
clean: ## Clean the tests info
|
||||
@echo ">>> Cleaning Debug BayesNet tests...";
|
||||
$(call ClearTests)
|
||||
@echo ">>> Done";
|
||||
|
||||
uninstall: ## Uninstall library
|
||||
@echo ">>> Uninstalling BayesNet...";
|
||||
xargs rm < $(f_release)/install_manifest.txt
|
||||
@echo ">>> Done";
|
||||
|
||||
install: ## Install library
|
||||
@echo ">>> Installing BayesNet...";
|
||||
@cmake --install $(f_release)
|
||||
@echo ">>> Done";
|
||||
|
||||
debug: ## Build a debug version of the project
|
||||
@echo ">>> Building Debug BayesNet ...";
|
||||
@if [ -d ./build ]; then rm -rf ./build; fi
|
||||
@mkdir build;
|
||||
cmake -S . -B build -D CMAKE_BUILD_TYPE=Debug -D ENABLE_TESTING=ON -D CODE_COVERAGE=ON; \
|
||||
cmake --build build -j 32;
|
||||
@echo ">>> Building Debug BayesNet...";
|
||||
@if [ -d ./$(f_debug) ]; then rm -rf ./$(f_debug); fi
|
||||
@mkdir $(f_debug);
|
||||
@cmake -S . -B $(f_debug) -D CMAKE_BUILD_TYPE=Debug -D ENABLE_TESTING=ON -D CODE_COVERAGE=ON
|
||||
@echo ">>> Done";
|
||||
|
||||
release: ## Build a Release version of the project
|
||||
@echo ">>> Building Release BayesNet ...";
|
||||
@if [ -d ./build ]; then rm -rf ./build; fi
|
||||
@mkdir build;
|
||||
cmake -S . -B build -D CMAKE_BUILD_TYPE=Release; \
|
||||
cmake --build build -t main -t BayesNetSample -j 32;
|
||||
@echo ">>> Building Release BayesNet...";
|
||||
@if [ -d ./$(f_release) ]; then rm -rf ./$(f_release); fi
|
||||
@mkdir $(f_release);
|
||||
@cmake -S . -B $(f_release) -D CMAKE_BUILD_TYPE=Release
|
||||
@echo ">>> Done";
|
||||
|
||||
fname = "tests/data/iris.arff"
|
||||
sample: ## Build sample
|
||||
@echo ">>> Building Sample...";
|
||||
@if [ -d ./sample/build ]; then rm -rf ./sample/build; fi
|
||||
@cd sample && cmake -B build -S . && cmake --build build -t bayesnet_sample
|
||||
sample/build/bayesnet_sample $(fname)
|
||||
@echo ">>> Done";
|
||||
|
||||
test: ## Run tests
|
||||
@echo "* Running tests...";
|
||||
find . -name "*.gcda" -print0 | xargs -0 rm
|
||||
@cd build; \
|
||||
cmake --build . --target unit_tests ;
|
||||
@cd build/tests; \
|
||||
./unit_tests;
|
||||
opt = ""
|
||||
test: ## Run tests (opt="-s") to verbose output the tests, (opt="-c='Test Maximum Spanning Tree'") to run only that section
|
||||
@echo ">>> Running BayesNet & Platform tests...";
|
||||
@$(MAKE) clean
|
||||
@cmake --build $(f_debug) -t $(test_targets) $(n_procs)
|
||||
@for t in $(test_targets); do \
|
||||
if [ -f $(f_debug)/tests/$$t ]; then \
|
||||
cd $(f_debug)/tests ; \
|
||||
./$$t $(opt) ; \
|
||||
fi ; \
|
||||
done
|
||||
@echo ">>> Done";
|
||||
|
||||
coverage: ## Run tests and generate coverage report (build/index.html)
|
||||
@echo "*Building tests...";
|
||||
find . -name "*.gcda" -print0 | xargs -0 rm
|
||||
@cd build; \
|
||||
cmake --build . --target unit_tests ;
|
||||
@cd build/tests; \
|
||||
./unit_tests;
|
||||
gcovr ;
|
||||
@echo ">>> Building tests with coverage..."
|
||||
@$(MAKE) test
|
||||
@gcovr $(f_debug)/tests
|
||||
@echo ">>> Done";
|
||||
|
||||
|
||||
help: ## Show help message
|
||||
@IFS=$$'\n' ; \
|
||||
|
35
README.md
35
README.md
@@ -1,5 +1,36 @@
|
||||
# BayesNet
|
||||
|
||||
Bayesian Network Classifier with libtorch from scratch
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
|
||||
## 1. Introduction
|
||||
Bayesian Network Classifiers using libtorch from scratch
|
||||
|
||||
## Installation
|
||||
|
||||
### Release
|
||||
|
||||
```bash
|
||||
make release
|
||||
make buildr
|
||||
sudo make install
|
||||
```
|
||||
|
||||
### Debug & Tests
|
||||
|
||||
```bash
|
||||
make debug
|
||||
make test
|
||||
make coverage
|
||||
```
|
||||
|
||||
### Sample app
|
||||
|
||||
After building and installing the release version, you can run the sample app with the following commands:
|
||||
|
||||
```bash
|
||||
make sample
|
||||
make sample fname=tests/data/glass.arff
|
||||
```
|
||||
|
||||
## Models
|
||||
|
||||
### [BoostAODE](docs/BoostAODE.md)
|
||||
|
12
TAN_iris.dot
12
TAN_iris.dot
@@ -1,12 +0,0 @@
|
||||
digraph BayesNet {
|
||||
label=<BayesNet >
|
||||
fontsize=30
|
||||
fontcolor=blue
|
||||
labelloc=t
|
||||
layout=circo
|
||||
class [shape=circle, fontcolor=red, fillcolor=lightblue, style=filled ]
|
||||
class -> sepallength class -> sepalwidth class -> petallength class -> petalwidth petallength [shape=circle]
|
||||
petallength -> sepallength petalwidth [shape=circle]
|
||||
sepallength [shape=circle]
|
||||
sepallength -> sepalwidth sepalwidth [shape=circle]
|
||||
sepalwidth -> petalwidth }
|
41
bayesnet/BaseClassifier.h
Normal file
41
bayesnet/BaseClassifier.h
Normal file
@@ -0,0 +1,41 @@
|
||||
#ifndef BASE_H
|
||||
#define BASE_H
|
||||
#include <vector>
|
||||
#include <torch/torch.h>
|
||||
#include <nlohmann/json.hpp>
|
||||
namespace bayesnet {
|
||||
enum status_t { NORMAL, WARNING, ERROR };
|
||||
class BaseClassifier {
|
||||
public:
|
||||
// X is nxm std::vector, y is nx1 std::vector
|
||||
virtual BaseClassifier& fit(std::vector<std::vector<int>>& X, std::vector<int>& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states) = 0;
|
||||
// X is nxm tensor, y is nx1 tensor
|
||||
virtual BaseClassifier& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states) = 0;
|
||||
virtual BaseClassifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states) = 0;
|
||||
virtual BaseClassifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights) = 0;
|
||||
virtual ~BaseClassifier() = default;
|
||||
torch::Tensor virtual predict(torch::Tensor& X) = 0;
|
||||
std::vector<int> virtual predict(std::vector<std::vector<int >>& X) = 0;
|
||||
torch::Tensor virtual predict_proba(torch::Tensor& X) = 0;
|
||||
std::vector<std::vector<double>> virtual predict_proba(std::vector<std::vector<int >>& X) = 0;
|
||||
status_t virtual getStatus() const = 0;
|
||||
float virtual score(std::vector<std::vector<int>>& X, std::vector<int>& y) = 0;
|
||||
float virtual score(torch::Tensor& X, torch::Tensor& y) = 0;
|
||||
int virtual getNumberOfNodes()const = 0;
|
||||
int virtual getNumberOfEdges()const = 0;
|
||||
int virtual getNumberOfStates() const = 0;
|
||||
int virtual getClassNumStates() const = 0;
|
||||
std::vector<std::string> virtual show() const = 0;
|
||||
std::vector<std::string> virtual graph(const std::string& title = "") const = 0;
|
||||
virtual std::string getVersion() = 0;
|
||||
std::vector<std::string> virtual topological_order() = 0;
|
||||
std::vector<std::string> virtual getNotes() const = 0;
|
||||
void virtual dump_cpt()const = 0;
|
||||
virtual void setHyperparameters(const nlohmann::json& hyperparameters) = 0;
|
||||
std::vector<std::string>& getValidHyperparameters() { return validHyperparameters; }
|
||||
protected:
|
||||
virtual void trainModel(const torch::Tensor& weights) = 0;
|
||||
std::vector<std::string> validHyperparameters;
|
||||
};
|
||||
}
|
||||
#endif
|
13
bayesnet/CMakeLists.txt
Normal file
13
bayesnet/CMakeLists.txt
Normal file
@@ -0,0 +1,13 @@
|
||||
include_directories(
|
||||
${BayesNet_SOURCE_DIR}/lib/mdlp
|
||||
${BayesNet_SOURCE_DIR}/lib/Files
|
||||
${BayesNet_SOURCE_DIR}/lib/folding
|
||||
${BayesNet_SOURCE_DIR}/lib/json/include
|
||||
${BayesNet_SOURCE_DIR}
|
||||
${CMAKE_BINARY_DIR}/configured_files/include
|
||||
)
|
||||
|
||||
file(GLOB_RECURSE Sources "*.cc")
|
||||
|
||||
add_library(BayesNet ${Sources})
|
||||
target_link_libraries(BayesNet mdlp "${TORCH_LIBRARIES}")
|
184
bayesnet/classifiers/Classifier.cc
Normal file
184
bayesnet/classifiers/Classifier.cc
Normal file
@@ -0,0 +1,184 @@
|
||||
#include "bayesnet/utils/bayesnetUtils.h"
|
||||
#include "Classifier.h"
|
||||
|
||||
namespace bayesnet {
|
||||
Classifier::Classifier(Network model) : model(model), m(0), n(0), metrics(Metrics()), fitted(false) {}
|
||||
const std::string CLASSIFIER_NOT_FITTED = "Classifier has not been fitted";
|
||||
Classifier& Classifier::build(const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights)
|
||||
{
|
||||
this->features = features;
|
||||
this->className = className;
|
||||
this->states = states;
|
||||
m = dataset.size(1);
|
||||
n = dataset.size(0) - 1;
|
||||
checkFitParameters();
|
||||
auto n_classes = states.at(className).size();
|
||||
metrics = Metrics(dataset, features, className, n_classes);
|
||||
model.initialize();
|
||||
buildModel(weights);
|
||||
trainModel(weights);
|
||||
fitted = true;
|
||||
return *this;
|
||||
}
|
||||
void Classifier::buildDataset(torch::Tensor& ytmp)
|
||||
{
|
||||
try {
|
||||
auto yresized = torch::transpose(ytmp.view({ ytmp.size(0), 1 }), 0, 1);
|
||||
dataset = torch::cat({ dataset, yresized }, 0);
|
||||
}
|
||||
catch (const std::exception& e) {
|
||||
std::cerr << e.what() << '\n';
|
||||
std::cout << "X dimensions: " << dataset.sizes() << "\n";
|
||||
std::cout << "y dimensions: " << ytmp.sizes() << "\n";
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
void Classifier::trainModel(const torch::Tensor& weights)
|
||||
{
|
||||
model.fit(dataset, weights, features, className, states);
|
||||
}
|
||||
// X is nxm where n is the number of features and m the number of samples
|
||||
Classifier& Classifier::fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states)
|
||||
{
|
||||
dataset = X;
|
||||
buildDataset(y);
|
||||
const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);
|
||||
return build(features, className, states, weights);
|
||||
}
|
||||
// X is nxm where n is the number of features and m the number of samples
|
||||
Classifier& Classifier::fit(std::vector<std::vector<int>>& X, std::vector<int>& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states)
|
||||
{
|
||||
dataset = torch::zeros({ static_cast<int>(X.size()), static_cast<int>(X[0].size()) }, torch::kInt32);
|
||||
for (int i = 0; i < X.size(); ++i) {
|
||||
dataset.index_put_({ i, "..." }, torch::tensor(X[i], torch::kInt32));
|
||||
}
|
||||
auto ytmp = torch::tensor(y, torch::kInt32);
|
||||
buildDataset(ytmp);
|
||||
const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);
|
||||
return build(features, className, states, weights);
|
||||
}
|
||||
Classifier& Classifier::fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states)
|
||||
{
|
||||
this->dataset = dataset;
|
||||
const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);
|
||||
return build(features, className, states, weights);
|
||||
}
|
||||
Classifier& Classifier::fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights)
|
||||
{
|
||||
this->dataset = dataset;
|
||||
return build(features, className, states, weights);
|
||||
}
|
||||
void Classifier::checkFitParameters()
|
||||
{
|
||||
if (torch::is_floating_point(dataset)) {
|
||||
throw std::invalid_argument("dataset (X, y) must be of type Integer");
|
||||
}
|
||||
if (n != features.size()) {
|
||||
throw std::invalid_argument("Classifier: X " + std::to_string(n) + " and features " + std::to_string(features.size()) + " must have the same number of features");
|
||||
}
|
||||
if (states.find(className) == states.end()) {
|
||||
throw std::invalid_argument("className not found in states");
|
||||
}
|
||||
for (auto feature : features) {
|
||||
if (states.find(feature) == states.end()) {
|
||||
throw std::invalid_argument("feature [" + feature + "] not found in states");
|
||||
}
|
||||
}
|
||||
}
|
||||
torch::Tensor Classifier::predict(torch::Tensor& X)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(CLASSIFIER_NOT_FITTED);
|
||||
}
|
||||
return model.predict(X);
|
||||
}
|
||||
std::vector<int> Classifier::predict(std::vector<std::vector<int>>& X)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(CLASSIFIER_NOT_FITTED);
|
||||
}
|
||||
auto m_ = X[0].size();
|
||||
auto n_ = X.size();
|
||||
std::vector<std::vector<int>> Xd(n_, std::vector<int>(m_, 0));
|
||||
for (auto i = 0; i < n_; i++) {
|
||||
Xd[i] = std::vector<int>(X[i].begin(), X[i].end());
|
||||
}
|
||||
auto yp = model.predict(Xd);
|
||||
return yp;
|
||||
}
|
||||
torch::Tensor Classifier::predict_proba(torch::Tensor& X)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(CLASSIFIER_NOT_FITTED);
|
||||
}
|
||||
return model.predict_proba(X);
|
||||
}
|
||||
std::vector<std::vector<double>> Classifier::predict_proba(std::vector<std::vector<int>>& X)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(CLASSIFIER_NOT_FITTED);
|
||||
}
|
||||
auto m_ = X[0].size();
|
||||
auto n_ = X.size();
|
||||
std::vector<std::vector<int>> Xd(n_, std::vector<int>(m_, 0));
|
||||
// Convert to nxm vector
|
||||
for (auto i = 0; i < n_; i++) {
|
||||
Xd[i] = std::vector<int>(X[i].begin(), X[i].end());
|
||||
}
|
||||
auto yp = model.predict_proba(Xd);
|
||||
return yp;
|
||||
}
|
||||
float Classifier::score(torch::Tensor& X, torch::Tensor& y)
|
||||
{
|
||||
torch::Tensor y_pred = predict(X);
|
||||
return (y_pred == y).sum().item<float>() / y.size(0);
|
||||
}
|
||||
float Classifier::score(std::vector<std::vector<int>>& X, std::vector<int>& y)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(CLASSIFIER_NOT_FITTED);
|
||||
}
|
||||
return model.score(X, y);
|
||||
}
|
||||
std::vector<std::string> Classifier::show() const
|
||||
{
|
||||
return model.show();
|
||||
}
|
||||
void Classifier::addNodes()
|
||||
{
|
||||
// Add all nodes to the network
|
||||
for (const auto& feature : features) {
|
||||
model.addNode(feature);
|
||||
}
|
||||
model.addNode(className);
|
||||
}
|
||||
int Classifier::getNumberOfNodes() const
|
||||
{
|
||||
// Features does not include class
|
||||
return fitted ? model.getFeatures().size() : 0;
|
||||
}
|
||||
int Classifier::getNumberOfEdges() const
|
||||
{
|
||||
return fitted ? model.getNumEdges() : 0;
|
||||
}
|
||||
int Classifier::getNumberOfStates() const
|
||||
{
|
||||
return fitted ? model.getStates() : 0;
|
||||
}
|
||||
int Classifier::getClassNumStates() const
|
||||
{
|
||||
return fitted ? model.getClassNumStates() : 0;
|
||||
}
|
||||
std::vector<std::string> Classifier::topological_order()
|
||||
{
|
||||
return model.topological_sort();
|
||||
}
|
||||
void Classifier::dump_cpt() const
|
||||
{
|
||||
model.dump_cpt();
|
||||
}
|
||||
void Classifier::setHyperparameters(const nlohmann::json& hyperparameters)
|
||||
{
|
||||
//For classifiers that don't have hyperparameters
|
||||
}
|
||||
}
|
59
bayesnet/classifiers/Classifier.h
Normal file
59
bayesnet/classifiers/Classifier.h
Normal file
@@ -0,0 +1,59 @@
|
||||
#ifndef CLASSIFIER_H
|
||||
#define CLASSIFIER_H
|
||||
#include <torch/torch.h>
|
||||
#include "bayesnet/utils/BayesMetrics.h"
|
||||
#include "bayesnet/network/Network.h"
|
||||
#include "bayesnet/BaseClassifier.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class Classifier : public BaseClassifier {
|
||||
public:
|
||||
Classifier(Network model);
|
||||
virtual ~Classifier() = default;
|
||||
Classifier& fit(std::vector<std::vector<int>>& X, std::vector<int>& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states) override;
|
||||
Classifier& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states) override;
|
||||
Classifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states) override;
|
||||
Classifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights) override;
|
||||
void addNodes();
|
||||
int getNumberOfNodes() const override;
|
||||
int getNumberOfEdges() const override;
|
||||
int getNumberOfStates() const override;
|
||||
int getClassNumStates() const override;
|
||||
torch::Tensor predict(torch::Tensor& X) override;
|
||||
std::vector<int> predict(std::vector<std::vector<int>>& X) override;
|
||||
torch::Tensor predict_proba(torch::Tensor& X) override;
|
||||
std::vector<std::vector<double>> predict_proba(std::vector<std::vector<int>>& X) override;
|
||||
status_t getStatus() const override { return status; }
|
||||
std::string getVersion() override { return { project_version.begin(), project_version.end() }; };
|
||||
float score(torch::Tensor& X, torch::Tensor& y) override;
|
||||
float score(std::vector<std::vector<int>>& X, std::vector<int>& y) override;
|
||||
std::vector<std::string> show() const override;
|
||||
std::vector<std::string> topological_order() override;
|
||||
std::vector<std::string> getNotes() const override { return notes; }
|
||||
void dump_cpt() const override;
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters) override; //For classifiers that don't have hyperparameters
|
||||
protected:
|
||||
bool fitted;
|
||||
unsigned int m, n; // m: number of samples, n: number of features
|
||||
Network model;
|
||||
Metrics metrics;
|
||||
std::vector<std::string> features;
|
||||
std::string className;
|
||||
std::map<std::string, std::vector<int>> states;
|
||||
torch::Tensor dataset; // (n+1)xm tensor
|
||||
status_t status = NORMAL;
|
||||
std::vector<std::string> notes; // Used to store messages occurred during the fit process
|
||||
void checkFitParameters();
|
||||
virtual void buildModel(const torch::Tensor& weights) = 0;
|
||||
void trainModel(const torch::Tensor& weights) override;
|
||||
void buildDataset(torch::Tensor& y);
|
||||
private:
|
||||
Classifier& build(const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights);
|
||||
};
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
||||
|
@@ -1,10 +1,21 @@
|
||||
#include "KDB.h"
|
||||
|
||||
namespace bayesnet {
|
||||
using namespace torch;
|
||||
KDB::KDB(int k, float theta) : Classifier(Network()), k(k), theta(theta)
|
||||
{
|
||||
validHyperparameters = { "k", "theta" };
|
||||
|
||||
KDB::KDB(int k, float theta) : Classifier(Network()), k(k), theta(theta) {}
|
||||
void KDB::buildModel()
|
||||
}
|
||||
void KDB::setHyperparameters(const nlohmann::json& hyperparameters)
|
||||
{
|
||||
if (hyperparameters.contains("k")) {
|
||||
k = hyperparameters["k"];
|
||||
}
|
||||
if (hyperparameters.contains("theta")) {
|
||||
theta = hyperparameters["theta"];
|
||||
}
|
||||
}
|
||||
void KDB::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
/*
|
||||
1. For each feature Xi, compute mutual information, I(X;C),
|
||||
@@ -28,16 +39,16 @@ namespace bayesnet {
|
||||
// 1. For each feature Xi, compute mutual information, I(X;C),
|
||||
// where C is the class.
|
||||
addNodes();
|
||||
const Tensor& y = dataset.index({ -1, "..." });
|
||||
vector <float> mi;
|
||||
const torch::Tensor& y = dataset.index({ -1, "..." });
|
||||
std::vector<double> mi;
|
||||
for (auto i = 0; i < features.size(); i++) {
|
||||
Tensor firstFeature = dataset.index({ i, "..." });
|
||||
mi.push_back(metrics.mutualInformation(firstFeature, y));
|
||||
torch::Tensor firstFeature = dataset.index({ i, "..." });
|
||||
mi.push_back(metrics.mutualInformation(firstFeature, y, weights));
|
||||
}
|
||||
// 2. Compute class conditional mutual information I(Xi;XjIC), f or each
|
||||
auto conditionalEdgeWeights = metrics.conditionalEdge();
|
||||
auto conditionalEdgeWeights = metrics.conditionalEdge(weights);
|
||||
// 3. Let the used variable list, S, be empty.
|
||||
vector<int> S;
|
||||
std::vector<int> S;
|
||||
// 4. Let the DAG network being constructed, BN, begin with a single
|
||||
// class node, C.
|
||||
// 5. Repeat until S includes all domain features
|
||||
@@ -55,9 +66,9 @@ namespace bayesnet {
|
||||
S.push_back(idx);
|
||||
}
|
||||
}
|
||||
void KDB::add_m_edges(int idx, vector<int>& S, Tensor& weights)
|
||||
void KDB::add_m_edges(int idx, std::vector<int>& S, torch::Tensor& weights)
|
||||
{
|
||||
auto n_edges = min(k, static_cast<int>(S.size()));
|
||||
auto n_edges = std::min(k, static_cast<int>(S.size()));
|
||||
auto cond_w = clone(weights);
|
||||
bool exit_cond = k == 0;
|
||||
int num = 0;
|
||||
@@ -69,7 +80,7 @@ namespace bayesnet {
|
||||
model.addEdge(features[max_minfo], features[idx]);
|
||||
num++;
|
||||
}
|
||||
catch (const invalid_argument& e) {
|
||||
catch (const std::invalid_argument& e) {
|
||||
// Loops are not allowed
|
||||
}
|
||||
}
|
||||
@@ -79,11 +90,11 @@ namespace bayesnet {
|
||||
exit_cond = num == n_edges || candidates.size(0) == 0;
|
||||
}
|
||||
}
|
||||
vector<string> KDB::graph(const string& title) const
|
||||
std::vector<std::string> KDB::graph(const std::string& title) const
|
||||
{
|
||||
string header{ title };
|
||||
std::string header{ title };
|
||||
if (title == "KDB") {
|
||||
header += " (k=" + to_string(k) + ", theta=" + to_string(theta) + ")";
|
||||
header += " (k=" + std::to_string(k) + ", theta=" + std::to_string(theta) + ")";
|
||||
}
|
||||
return model.graph(header);
|
||||
}
|
21
bayesnet/classifiers/KDB.h
Normal file
21
bayesnet/classifiers/KDB.h
Normal file
@@ -0,0 +1,21 @@
|
||||
#ifndef KDB_H
|
||||
#define KDB_H
|
||||
#include <torch/torch.h>
|
||||
#include "bayesnet/utils/bayesnetUtils.h"
|
||||
#include "Classifier.h"
|
||||
namespace bayesnet {
|
||||
class KDB : public Classifier {
|
||||
private:
|
||||
int k;
|
||||
float theta;
|
||||
void add_m_edges(int idx, std::vector<int>& S, torch::Tensor& weights);
|
||||
protected:
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
public:
|
||||
explicit KDB(int k, float theta = 0.03);
|
||||
virtual ~KDB() = default;
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters) override;
|
||||
std::vector<std::string> graph(const std::string& name = "KDB") const override;
|
||||
};
|
||||
}
|
||||
#endif
|
@@ -1,16 +1,15 @@
|
||||
#include "KDBLd.h"
|
||||
|
||||
namespace bayesnet {
|
||||
using namespace std;
|
||||
KDBLd::KDBLd(int k) : KDB(k), Proposal(dataset, features, className) {}
|
||||
KDBLd& KDBLd::fit(torch::Tensor& X_, torch::Tensor& y_, vector<string>& features_, string className_, map<string, vector<int>>& states_)
|
||||
KDBLd& KDBLd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_)
|
||||
{
|
||||
// This first part should go in a Classifier method called fit_local_discretization o fit_float...
|
||||
checkInput(X_, y_);
|
||||
features = features_;
|
||||
className = className_;
|
||||
Xf = X_;
|
||||
y = y_;
|
||||
// Fills vectors Xv & yv with the data from tensors X_ (discretized) & y
|
||||
// Fills std::vectors Xv & yv with the data from tensors X_ (discretized) & y
|
||||
states = fit_local_discretization(y);
|
||||
// We have discretized the input data
|
||||
// 1st we need to fit the model to build the normal KDB structure, KDB::fit initializes the base Bayesian network
|
||||
@@ -18,12 +17,12 @@ namespace bayesnet {
|
||||
states = localDiscretizationProposal(states, model);
|
||||
return *this;
|
||||
}
|
||||
Tensor KDBLd::predict(Tensor& X)
|
||||
torch::Tensor KDBLd::predict(torch::Tensor& X)
|
||||
{
|
||||
auto Xt = prepareX(X);
|
||||
return KDB::predict(Xt);
|
||||
}
|
||||
vector<string> KDBLd::graph(const string& name) const
|
||||
std::vector<std::string> KDBLd::graph(const std::string& name) const
|
||||
{
|
||||
return KDB::graph(name);
|
||||
}
|
18
bayesnet/classifiers/KDBLd.h
Normal file
18
bayesnet/classifiers/KDBLd.h
Normal file
@@ -0,0 +1,18 @@
|
||||
#ifndef KDBLD_H
|
||||
#define KDBLD_H
|
||||
#include "Proposal.h"
|
||||
#include "KDB.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class KDBLd : public KDB, public Proposal {
|
||||
private:
|
||||
public:
|
||||
explicit KDBLd(int k);
|
||||
virtual ~KDBLd() = default;
|
||||
KDBLd& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states) override;
|
||||
std::vector<std::string> graph(const std::string& name = "KDB") const override;
|
||||
torch::Tensor predict(torch::Tensor& X) override;
|
||||
static inline std::string version() { return "0.0.1"; };
|
||||
};
|
||||
}
|
||||
#endif // !KDBLD_H
|
@@ -1,22 +1,31 @@
|
||||
#include <ArffFiles.h>
|
||||
#include "Proposal.h"
|
||||
#include "ArffFiles.h"
|
||||
|
||||
namespace bayesnet {
|
||||
Proposal::Proposal(torch::Tensor& dataset_, vector<string>& features_, string& className_) : pDataset(dataset_), pFeatures(features_), pClassName(className_) {}
|
||||
Proposal::Proposal(torch::Tensor& dataset_, std::vector<std::string>& features_, std::string& className_) : pDataset(dataset_), pFeatures(features_), pClassName(className_) {}
|
||||
Proposal::~Proposal()
|
||||
{
|
||||
for (auto& [key, value] : discretizers) {
|
||||
delete value;
|
||||
}
|
||||
}
|
||||
map<string, vector<int>> Proposal::localDiscretizationProposal(const map<string, vector<int>>& oldStates, Network& model)
|
||||
void Proposal::checkInput(const torch::Tensor& X, const torch::Tensor& y)
|
||||
{
|
||||
if (!torch::is_floating_point(X)) {
|
||||
throw std::invalid_argument("X must be a floating point tensor");
|
||||
}
|
||||
if (torch::is_floating_point(y)) {
|
||||
throw std::invalid_argument("y must be an integer tensor");
|
||||
}
|
||||
}
|
||||
map<std::string, std::vector<int>> Proposal::localDiscretizationProposal(const map<std::string, std::vector<int>>& oldStates, Network& model)
|
||||
{
|
||||
// order of local discretization is important. no good 0, 1, 2...
|
||||
// although we rediscretize features after the local discretization of every feature
|
||||
auto order = model.topological_sort();
|
||||
auto& nodes = model.getNodes();
|
||||
map<string, vector<int>> states = oldStates;
|
||||
vector<int> indicesToReDiscretize;
|
||||
map<std::string, std::vector<int>> states = oldStates;
|
||||
std::vector<int> indicesToReDiscretize;
|
||||
bool upgrade = false; // Flag to check if we need to upgrade the model
|
||||
for (auto feature : order) {
|
||||
auto nodeParents = nodes[feature]->getParents();
|
||||
@@ -24,16 +33,16 @@ namespace bayesnet {
|
||||
upgrade = true;
|
||||
int index = find(pFeatures.begin(), pFeatures.end(), feature) - pFeatures.begin();
|
||||
indicesToReDiscretize.push_back(index); // We need to re-discretize this feature
|
||||
vector<string> parents;
|
||||
std::vector<std::string> parents;
|
||||
transform(nodeParents.begin(), nodeParents.end(), back_inserter(parents), [](const auto& p) { return p->getName(); });
|
||||
// Remove class as parent as it will be added later
|
||||
parents.erase(remove(parents.begin(), parents.end(), pClassName), parents.end());
|
||||
// Get the indices of the parents
|
||||
vector<int> indices;
|
||||
std::vector<int> indices;
|
||||
indices.push_back(-1); // Add class index
|
||||
transform(parents.begin(), parents.end(), back_inserter(indices), [&](const auto& p) {return find(pFeatures.begin(), pFeatures.end(), p) - pFeatures.begin(); });
|
||||
// Now we fit the discretizer of the feature, conditioned on its parents and the class i.e. discretizer.fit(X[index], X[indices] + y)
|
||||
vector<string> yJoinParents(Xf.size(1));
|
||||
std::vector<std::string> yJoinParents(Xf.size(1));
|
||||
for (auto idx : indices) {
|
||||
for (int i = 0; i < Xf.size(1); ++i) {
|
||||
yJoinParents[i] += to_string(pDataset.index({ idx, i }).item<int>());
|
||||
@@ -42,55 +51,47 @@ namespace bayesnet {
|
||||
auto arff = ArffFiles();
|
||||
auto yxv = arff.factorize(yJoinParents);
|
||||
auto xvf_ptr = Xf.index({ index }).data_ptr<float>();
|
||||
auto xvf = vector<mdlp::precision_t>(xvf_ptr, xvf_ptr + Xf.size(1));
|
||||
auto xvf = std::vector<mdlp::precision_t>(xvf_ptr, xvf_ptr + Xf.size(1));
|
||||
discretizers[feature]->fit(xvf, yxv);
|
||||
//
|
||||
//
|
||||
//
|
||||
// auto tmp = discretizers[feature]->transform(xvf);
|
||||
// Xv[index] = tmp;
|
||||
// auto xStates = vector<int>(discretizers[pFeatures[index]]->getCutPoints().size() + 1);
|
||||
// iota(xStates.begin(), xStates.end(), 0);
|
||||
// //Update new states of the feature/node
|
||||
// states[feature] = xStates;
|
||||
}
|
||||
if (upgrade) {
|
||||
// Discretize again X (only the affected indices) with the new fitted discretizers
|
||||
for (auto index : indicesToReDiscretize) {
|
||||
auto Xt_ptr = Xf.index({ index }).data_ptr<float>();
|
||||
auto Xt = vector<float>(Xt_ptr, Xt_ptr + Xf.size(1));
|
||||
auto Xt = std::vector<float>(Xt_ptr, Xt_ptr + Xf.size(1));
|
||||
pDataset.index_put_({ index, "..." }, torch::tensor(discretizers[pFeatures[index]]->transform(Xt)));
|
||||
auto xStates = vector<int>(discretizers[pFeatures[index]]->getCutPoints().size() + 1);
|
||||
auto xStates = std::vector<int>(discretizers[pFeatures[index]]->getCutPoints().size() + 1);
|
||||
iota(xStates.begin(), xStates.end(), 0);
|
||||
//Update new states of the feature/node
|
||||
states[pFeatures[index]] = xStates;
|
||||
}
|
||||
model.fit(pDataset, pFeatures, pClassName, states);
|
||||
const torch::Tensor weights = torch::full({ pDataset.size(1) }, 1.0 / pDataset.size(1), torch::kDouble);
|
||||
model.fit(pDataset, weights, pFeatures, pClassName, states);
|
||||
}
|
||||
return states;
|
||||
}
|
||||
map<string, vector<int>> Proposal::fit_local_discretization(const torch::Tensor& y)
|
||||
map<std::string, std::vector<int>> Proposal::fit_local_discretization(const torch::Tensor& y)
|
||||
{
|
||||
// Discretize the continuous input data and build pDataset (Classifier::dataset)
|
||||
int m = Xf.size(1);
|
||||
int n = Xf.size(0);
|
||||
map<string, vector<int>> states;
|
||||
pDataset = torch::zeros({ n + 1, m }, kInt32);
|
||||
auto yv = vector<int>(y.data_ptr<int>(), y.data_ptr<int>() + y.size(0));
|
||||
map<std::string, std::vector<int>> states;
|
||||
pDataset = torch::zeros({ n + 1, m }, torch::kInt32);
|
||||
auto yv = std::vector<int>(y.data_ptr<int>(), y.data_ptr<int>() + y.size(0));
|
||||
// discretize input data by feature(row)
|
||||
for (auto i = 0; i < pFeatures.size(); ++i) {
|
||||
auto* discretizer = new mdlp::CPPFImdlp();
|
||||
auto Xt_ptr = Xf.index({ i }).data_ptr<float>();
|
||||
auto Xt = vector<float>(Xt_ptr, Xt_ptr + Xf.size(1));
|
||||
auto Xt = std::vector<float>(Xt_ptr, Xt_ptr + Xf.size(1));
|
||||
discretizer->fit(Xt, yv);
|
||||
pDataset.index_put_({ i, "..." }, torch::tensor(discretizer->transform(Xt)));
|
||||
auto xStates = vector<int>(discretizer->getCutPoints().size() + 1);
|
||||
auto xStates = std::vector<int>(discretizer->getCutPoints().size() + 1);
|
||||
iota(xStates.begin(), xStates.end(), 0);
|
||||
states[pFeatures[i]] = xStates;
|
||||
discretizers[pFeatures[i]] = discretizer;
|
||||
}
|
||||
int n_classes = torch::max(y).item<int>() + 1;
|
||||
auto yStates = vector<int>(n_classes);
|
||||
auto yStates = std::vector<int>(n_classes);
|
||||
iota(yStates.begin(), yStates.end(), 0);
|
||||
states[pClassName] = yStates;
|
||||
pDataset.index_put_({ n, "..." }, y);
|
||||
@@ -100,7 +101,7 @@ namespace bayesnet {
|
||||
{
|
||||
auto Xtd = torch::zeros_like(X, torch::kInt32);
|
||||
for (int i = 0; i < X.size(0); ++i) {
|
||||
auto Xt = vector<float>(X[i].data_ptr<float>(), X[i].data_ptr<float>() + X.size(1));
|
||||
auto Xt = std::vector<float>(X[i].data_ptr<float>(), X[i].data_ptr<float>() + X.size(1));
|
||||
auto Xd = discretizers[pFeatures[i]]->transform(Xt);
|
||||
Xtd.index_put_({ i }, torch::tensor(Xd, torch::kInt32));
|
||||
}
|
30
bayesnet/classifiers/Proposal.h
Normal file
30
bayesnet/classifiers/Proposal.h
Normal file
@@ -0,0 +1,30 @@
|
||||
#ifndef PROPOSAL_H
|
||||
#define PROPOSAL_H
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <torch/torch.h>
|
||||
#include <CPPFImdlp.h>
|
||||
#include "bayesnet/network/Network.h"
|
||||
#include "Classifier.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class Proposal {
|
||||
public:
|
||||
Proposal(torch::Tensor& pDataset, std::vector<std::string>& features_, std::string& className_);
|
||||
virtual ~Proposal();
|
||||
protected:
|
||||
void checkInput(const torch::Tensor& X, const torch::Tensor& y);
|
||||
torch::Tensor prepareX(torch::Tensor& X);
|
||||
map<std::string, std::vector<int>> localDiscretizationProposal(const map<std::string, std::vector<int>>& states, Network& model);
|
||||
map<std::string, std::vector<int>> fit_local_discretization(const torch::Tensor& y);
|
||||
torch::Tensor Xf; // X continuous nxm tensor
|
||||
torch::Tensor y; // y discrete nx1 tensor
|
||||
map<std::string, mdlp::CPPFImdlp*> discretizers;
|
||||
private:
|
||||
torch::Tensor& pDataset; // (n+1)xm tensor
|
||||
std::vector<std::string>& pFeatures;
|
||||
std::string& pClassName;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
@@ -4,7 +4,7 @@ namespace bayesnet {
|
||||
|
||||
SPODE::SPODE(int root) : Classifier(Network()), root(root) {}
|
||||
|
||||
void SPODE::buildModel()
|
||||
void SPODE::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
// 0. Add all nodes to the model
|
||||
addNodes();
|
||||
@@ -17,7 +17,7 @@ namespace bayesnet {
|
||||
}
|
||||
}
|
||||
}
|
||||
vector<string> SPODE::graph(const string& name) const
|
||||
std::vector<std::string> SPODE::graph(const std::string& name) const
|
||||
{
|
||||
return model.graph(name);
|
||||
}
|
@@ -7,11 +7,11 @@ namespace bayesnet {
|
||||
private:
|
||||
int root;
|
||||
protected:
|
||||
void buildModel() override;
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
public:
|
||||
explicit SPODE(int root);
|
||||
virtual ~SPODE() {};
|
||||
vector<string> graph(const string& name = "SPODE") const override;
|
||||
virtual ~SPODE() = default;
|
||||
std::vector<std::string> graph(const std::string& name = "SPODE") const override;
|
||||
};
|
||||
}
|
||||
#endif
|
@@ -1,16 +1,15 @@
|
||||
#include "SPODELd.h"
|
||||
|
||||
namespace bayesnet {
|
||||
using namespace std;
|
||||
SPODELd::SPODELd(int root) : SPODE(root), Proposal(dataset, features, className) {}
|
||||
SPODELd& SPODELd::fit(torch::Tensor& X_, torch::Tensor& y_, vector<string>& features_, string className_, map<string, vector<int>>& states_)
|
||||
SPODELd& SPODELd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_)
|
||||
{
|
||||
// This first part should go in a Classifier method called fit_local_discretization o fit_float...
|
||||
checkInput(X_, y_);
|
||||
features = features_;
|
||||
className = className_;
|
||||
Xf = X_;
|
||||
y = y_;
|
||||
// Fills vectors Xv & yv with the data from tensors X_ (discretized) & y
|
||||
// Fills std::vectors Xv & yv with the data from tensors X_ (discretized) & y
|
||||
states = fit_local_discretization(y);
|
||||
// We have discretized the input data
|
||||
// 1st we need to fit the model to build the normal SPODE structure, SPODE::fit initializes the base Bayesian network
|
||||
@@ -18,15 +17,16 @@ namespace bayesnet {
|
||||
states = localDiscretizationProposal(states, model);
|
||||
return *this;
|
||||
}
|
||||
SPODELd& SPODELd::fit(torch::Tensor& dataset, vector<string>& features_, string className_, map<string, vector<int>>& states_)
|
||||
SPODELd& SPODELd::fit(torch::Tensor& dataset, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_)
|
||||
{
|
||||
if (!torch::is_floating_point(dataset)) {
|
||||
throw std::runtime_error("Dataset must be a floating point tensor");
|
||||
}
|
||||
Xf = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), "..." }).clone();
|
||||
cout << "Xf " << Xf.sizes() << " dtype: " << Xf.dtype() << endl;
|
||||
y = dataset.index({ -1, "..." }).clone();
|
||||
// This first part should go in a Classifier method called fit_local_discretization o fit_float...
|
||||
features = features_;
|
||||
className = className_;
|
||||
// Fills vectors Xv & yv with the data from tensors X_ (discretized) & y
|
||||
// Fills std::vectors Xv & yv with the data from tensors X_ (discretized) & y
|
||||
states = fit_local_discretization(y);
|
||||
// We have discretized the input data
|
||||
// 1st we need to fit the model to build the normal SPODE structure, SPODE::fit initializes the base Bayesian network
|
||||
@@ -35,12 +35,12 @@ namespace bayesnet {
|
||||
return *this;
|
||||
}
|
||||
|
||||
Tensor SPODELd::predict(Tensor& X)
|
||||
torch::Tensor SPODELd::predict(torch::Tensor& X)
|
||||
{
|
||||
auto Xt = prepareX(X);
|
||||
return SPODE::predict(Xt);
|
||||
}
|
||||
vector<string> SPODELd::graph(const string& name) const
|
||||
std::vector<std::string> SPODELd::graph(const std::string& name) const
|
||||
{
|
||||
return SPODE::graph(name);
|
||||
}
|
18
bayesnet/classifiers/SPODELd.h
Normal file
18
bayesnet/classifiers/SPODELd.h
Normal file
@@ -0,0 +1,18 @@
|
||||
#ifndef SPODELD_H
|
||||
#define SPODELD_H
|
||||
#include "SPODE.h"
|
||||
#include "Proposal.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class SPODELd : public SPODE, public Proposal {
|
||||
public:
|
||||
explicit SPODELd(int root);
|
||||
virtual ~SPODELd() = default;
|
||||
SPODELd& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states) override;
|
||||
SPODELd& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states) override;
|
||||
std::vector<std::string> graph(const std::string& name = "SPODE") const override;
|
||||
torch::Tensor predict(torch::Tensor& X) override;
|
||||
static inline std::string version() { return "0.0.1"; };
|
||||
};
|
||||
}
|
||||
#endif // !SPODELD_H
|
@@ -1,29 +1,27 @@
|
||||
#include "TAN.h"
|
||||
|
||||
namespace bayesnet {
|
||||
using namespace torch;
|
||||
|
||||
TAN::TAN() : Classifier(Network()) {}
|
||||
|
||||
void TAN::buildModel()
|
||||
void TAN::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
// 0. Add all nodes to the model
|
||||
addNodes();
|
||||
// 1. Compute mutual information between each feature and the class and set the root node
|
||||
// as the highest mutual information with the class
|
||||
auto mi = vector <pair<int, float >>();
|
||||
Tensor class_dataset = dataset.index({ -1, "..." });
|
||||
auto mi = std::vector <std::pair<int, float >>();
|
||||
torch::Tensor class_dataset = dataset.index({ -1, "..." });
|
||||
for (int i = 0; i < static_cast<int>(features.size()); ++i) {
|
||||
Tensor feature_dataset = dataset.index({ i, "..." });
|
||||
auto mi_value = metrics.mutualInformation(class_dataset, feature_dataset);
|
||||
torch::Tensor feature_dataset = dataset.index({ i, "..." });
|
||||
auto mi_value = metrics.mutualInformation(class_dataset, feature_dataset, weights);
|
||||
mi.push_back({ i, mi_value });
|
||||
}
|
||||
sort(mi.begin(), mi.end(), [](const auto& left, const auto& right) {return left.second < right.second;});
|
||||
auto root = mi[mi.size() - 1].first;
|
||||
// 2. Compute mutual information between each feature and the class
|
||||
auto weights = metrics.conditionalEdge();
|
||||
auto weights_matrix = metrics.conditionalEdge(weights);
|
||||
// 3. Compute the maximum spanning tree
|
||||
auto mst = metrics.maximumSpanningTree(features, weights, root);
|
||||
auto mst = metrics.maximumSpanningTree(features, weights_matrix, root);
|
||||
// 4. Add edges from the maximum spanning tree to the model
|
||||
for (auto i = 0; i < mst.size(); ++i) {
|
||||
auto [from, to] = mst[i];
|
||||
@@ -34,7 +32,7 @@ namespace bayesnet {
|
||||
model.addEdge(className, feature);
|
||||
}
|
||||
}
|
||||
vector<string> TAN::graph(const string& title) const
|
||||
std::vector<std::string> TAN::graph(const std::string& title) const
|
||||
{
|
||||
return model.graph(title);
|
||||
}
|
15
bayesnet/classifiers/TAN.h
Normal file
15
bayesnet/classifiers/TAN.h
Normal file
@@ -0,0 +1,15 @@
|
||||
#ifndef TAN_H
|
||||
#define TAN_H
|
||||
#include "Classifier.h"
|
||||
namespace bayesnet {
|
||||
class TAN : public Classifier {
|
||||
private:
|
||||
protected:
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
public:
|
||||
TAN();
|
||||
virtual ~TAN() = default;
|
||||
std::vector<std::string> graph(const std::string& name = "TAN") const override;
|
||||
};
|
||||
}
|
||||
#endif
|
@@ -1,16 +1,15 @@
|
||||
#include "TANLd.h"
|
||||
|
||||
namespace bayesnet {
|
||||
using namespace std;
|
||||
TANLd::TANLd() : TAN(), Proposal(dataset, features, className) {}
|
||||
TANLd& TANLd::fit(torch::Tensor& X_, torch::Tensor& y_, vector<string>& features_, string className_, map<string, vector<int>>& states_)
|
||||
TANLd& TANLd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_)
|
||||
{
|
||||
// This first part should go in a Classifier method called fit_local_discretization o fit_float...
|
||||
checkInput(X_, y_);
|
||||
features = features_;
|
||||
className = className_;
|
||||
Xf = X_;
|
||||
y = y_;
|
||||
// Fills vectors Xv & yv with the data from tensors X_ (discretized) & y
|
||||
// Fills std::vectors Xv & yv with the data from tensors X_ (discretized) & y
|
||||
states = fit_local_discretization(y);
|
||||
// We have discretized the input data
|
||||
// 1st we need to fit the model to build the normal TAN structure, TAN::fit initializes the base Bayesian network
|
||||
@@ -19,12 +18,12 @@ namespace bayesnet {
|
||||
return *this;
|
||||
|
||||
}
|
||||
Tensor TANLd::predict(Tensor& X)
|
||||
torch::Tensor TANLd::predict(torch::Tensor& X)
|
||||
{
|
||||
auto Xt = prepareX(X);
|
||||
return TAN::predict(Xt);
|
||||
}
|
||||
vector<string> TANLd::graph(const string& name) const
|
||||
std::vector<std::string> TANLd::graph(const std::string& name) const
|
||||
{
|
||||
return TAN::graph(name);
|
||||
}
|
18
bayesnet/classifiers/TANLd.h
Normal file
18
bayesnet/classifiers/TANLd.h
Normal file
@@ -0,0 +1,18 @@
|
||||
#ifndef TANLD_H
|
||||
#define TANLD_H
|
||||
#include "TAN.h"
|
||||
#include "Proposal.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class TANLd : public TAN, public Proposal {
|
||||
private:
|
||||
public:
|
||||
TANLd();
|
||||
virtual ~TANLd() = default;
|
||||
TANLd& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states) override;
|
||||
std::vector<std::string> graph(const std::string& name = "TAN") const override;
|
||||
torch::Tensor predict(torch::Tensor& X) override;
|
||||
static inline std::string version() { return "0.0.1"; };
|
||||
};
|
||||
}
|
||||
#endif // !TANLD_H
|
34
bayesnet/ensembles/AODE.cc
Normal file
34
bayesnet/ensembles/AODE.cc
Normal file
@@ -0,0 +1,34 @@
|
||||
#include "AODE.h"
|
||||
|
||||
namespace bayesnet {
|
||||
AODE::AODE(bool predict_voting) : Ensemble(predict_voting)
|
||||
{
|
||||
validHyperparameters = { "predict_voting" };
|
||||
|
||||
}
|
||||
void AODE::setHyperparameters(const nlohmann::json& hyperparameters_)
|
||||
{
|
||||
auto hyperparameters = hyperparameters_;
|
||||
if (hyperparameters.contains("predict_voting")) {
|
||||
predict_voting = hyperparameters["predict_voting"];
|
||||
hyperparameters.erase("predict_voting");
|
||||
}
|
||||
if (!hyperparameters.empty()) {
|
||||
throw std::invalid_argument("Invalid hyperparameters" + hyperparameters.dump());
|
||||
}
|
||||
}
|
||||
void AODE::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
models.clear();
|
||||
significanceModels.clear();
|
||||
for (int i = 0; i < features.size(); ++i) {
|
||||
models.push_back(std::make_unique<SPODE>(i));
|
||||
}
|
||||
n_models = models.size();
|
||||
significanceModels = std::vector<double>(n_models, 1.0);
|
||||
}
|
||||
std::vector<std::string> AODE::graph(const std::string& title) const
|
||||
{
|
||||
return Ensemble::graph(title);
|
||||
}
|
||||
}
|
16
bayesnet/ensembles/AODE.h
Normal file
16
bayesnet/ensembles/AODE.h
Normal file
@@ -0,0 +1,16 @@
|
||||
#ifndef AODE_H
|
||||
#define AODE_H
|
||||
#include "bayesnet/classifiers/SPODE.h"
|
||||
#include "Ensemble.h"
|
||||
namespace bayesnet {
|
||||
class AODE : public Ensemble {
|
||||
public:
|
||||
AODE(bool predict_voting = true);
|
||||
virtual ~AODE() {};
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters) override;
|
||||
std::vector<std::string> graph(const std::string& title = "AODE") const override;
|
||||
protected:
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
};
|
||||
}
|
||||
#endif
|
54
bayesnet/ensembles/AODELd.cc
Normal file
54
bayesnet/ensembles/AODELd.cc
Normal file
@@ -0,0 +1,54 @@
|
||||
#include "AODELd.h"
|
||||
|
||||
namespace bayesnet {
|
||||
AODELd::AODELd(bool predict_voting) : Ensemble(predict_voting), Proposal(dataset, features, className)
|
||||
{
|
||||
validHyperparameters = { "predict_voting" };
|
||||
|
||||
}
|
||||
void AODELd::setHyperparameters(const nlohmann::json& hyperparameters_)
|
||||
{
|
||||
auto hyperparameters = hyperparameters_;
|
||||
if (hyperparameters.contains("predict_voting")) {
|
||||
predict_voting = hyperparameters["predict_voting"];
|
||||
hyperparameters.erase("predict_voting");
|
||||
}
|
||||
if (!hyperparameters.empty()) {
|
||||
throw std::invalid_argument("Invalid hyperparameters" + hyperparameters.dump());
|
||||
}
|
||||
}
|
||||
AODELd& AODELd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_)
|
||||
{
|
||||
checkInput(X_, y_);
|
||||
features = features_;
|
||||
className = className_;
|
||||
Xf = X_;
|
||||
y = y_;
|
||||
// Fills std::vectors Xv & yv with the data from tensors X_ (discretized) & y
|
||||
states = fit_local_discretization(y);
|
||||
// We have discretized the input data
|
||||
// 1st we need to fit the model to build the normal TAN structure, TAN::fit initializes the base Bayesian network
|
||||
Ensemble::fit(dataset, features, className, states);
|
||||
return *this;
|
||||
|
||||
}
|
||||
void AODELd::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
models.clear();
|
||||
for (int i = 0; i < features.size(); ++i) {
|
||||
models.push_back(std::make_unique<SPODELd>(i));
|
||||
}
|
||||
n_models = models.size();
|
||||
significanceModels = std::vector<double>(n_models, 1.0);
|
||||
}
|
||||
void AODELd::trainModel(const torch::Tensor& weights)
|
||||
{
|
||||
for (const auto& model : models) {
|
||||
model->fit(Xf, y, features, className, states);
|
||||
}
|
||||
}
|
||||
std::vector<std::string> AODELd::graph(const std::string& name) const
|
||||
{
|
||||
return Ensemble::graph(name);
|
||||
}
|
||||
}
|
20
bayesnet/ensembles/AODELd.h
Normal file
20
bayesnet/ensembles/AODELd.h
Normal file
@@ -0,0 +1,20 @@
|
||||
#ifndef AODELD_H
|
||||
#define AODELD_H
|
||||
#include "bayesnet/classifiers/Proposal.h"
|
||||
#include "bayesnet/classifiers/SPODELd.h"
|
||||
#include "Ensemble.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class AODELd : public Ensemble, public Proposal {
|
||||
public:
|
||||
AODELd(bool predict_voting = true);
|
||||
virtual ~AODELd() = default;
|
||||
AODELd& fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_) override;
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters) override;
|
||||
std::vector<std::string> graph(const std::string& name = "AODELd") const override;
|
||||
protected:
|
||||
void trainModel(const torch::Tensor& weights) override;
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
};
|
||||
}
|
||||
#endif // !AODELD_H
|
296
bayesnet/ensembles/BoostAODE.cc
Normal file
296
bayesnet/ensembles/BoostAODE.cc
Normal file
@@ -0,0 +1,296 @@
|
||||
#include <set>
|
||||
#include <functional>
|
||||
#include <limits.h>
|
||||
#include <tuple>
|
||||
#include <folding.hpp>
|
||||
#include "bayesnet/feature_selection/CFS.h"
|
||||
#include "bayesnet/feature_selection/FCBF.h"
|
||||
#include "bayesnet/feature_selection/IWSS.h"
|
||||
#include "BoostAODE.h"
|
||||
|
||||
namespace bayesnet {
|
||||
struct {
|
||||
std::string CFS = "CFS";
|
||||
std::string FCBF = "FCBF";
|
||||
std::string IWSS = "IWSS";
|
||||
}SelectFeatures;
|
||||
struct {
|
||||
std::string ASC = "asc";
|
||||
std::string DESC = "desc";
|
||||
std::string RAND = "rand";
|
||||
}Orders;
|
||||
BoostAODE::BoostAODE(bool predict_voting) : Ensemble(predict_voting)
|
||||
{
|
||||
validHyperparameters = {
|
||||
"repeatSparent", "maxModels", "order", "convergence", "threshold",
|
||||
"select_features", "tolerance", "predict_voting", "predict_single"
|
||||
};
|
||||
|
||||
}
|
||||
void BoostAODE::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
// Models shall be built in trainModel
|
||||
models.clear();
|
||||
significanceModels.clear();
|
||||
n_models = 0;
|
||||
// Prepare the validation dataset
|
||||
auto y_ = dataset.index({ -1, "..." });
|
||||
if (convergence) {
|
||||
// Prepare train & validation sets from train data
|
||||
auto fold = folding::StratifiedKFold(5, y_, 271);
|
||||
dataset_ = torch::clone(dataset);
|
||||
// save input dataset
|
||||
auto [train, test] = fold.getFold(0);
|
||||
auto train_t = torch::tensor(train);
|
||||
auto test_t = torch::tensor(test);
|
||||
// Get train and validation sets
|
||||
X_train = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), train_t });
|
||||
y_train = dataset.index({ -1, train_t });
|
||||
X_test = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), test_t });
|
||||
y_test = dataset.index({ -1, test_t });
|
||||
dataset = X_train;
|
||||
m = X_train.size(1);
|
||||
auto n_classes = states.at(className).size();
|
||||
metrics = Metrics(dataset, features, className, n_classes);
|
||||
// Build dataset with train data
|
||||
buildDataset(y_train);
|
||||
} else {
|
||||
// Use all data to train
|
||||
X_train = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), "..." });
|
||||
y_train = y_;
|
||||
}
|
||||
}
|
||||
void BoostAODE::setHyperparameters(const nlohmann::json& hyperparameters_)
|
||||
{
|
||||
auto hyperparameters = hyperparameters_;
|
||||
if (hyperparameters.contains("repeatSparent")) {
|
||||
repeatSparent = hyperparameters["repeatSparent"];
|
||||
hyperparameters.erase("repeatSparent");
|
||||
}
|
||||
if (hyperparameters.contains("maxModels")) {
|
||||
maxModels = hyperparameters["maxModels"];
|
||||
hyperparameters.erase("maxModels");
|
||||
}
|
||||
if (hyperparameters.contains("order")) {
|
||||
std::vector<std::string> algos = { Orders.ASC, Orders.DESC, Orders.RAND };
|
||||
order_algorithm = hyperparameters["order"];
|
||||
if (std::find(algos.begin(), algos.end(), order_algorithm) == algos.end()) {
|
||||
throw std::invalid_argument("Invalid order algorithm, valid values [" + Orders.ASC + ", " + Orders.DESC + ", " + Orders.RAND + "]");
|
||||
}
|
||||
hyperparameters.erase("order");
|
||||
}
|
||||
if (hyperparameters.contains("convergence")) {
|
||||
convergence = hyperparameters["convergence"];
|
||||
hyperparameters.erase("convergence");
|
||||
}
|
||||
if (hyperparameters.contains("predict_single")) {
|
||||
predict_single = hyperparameters["predict_single"];
|
||||
hyperparameters.erase("predict_single");
|
||||
}
|
||||
if (hyperparameters.contains("threshold")) {
|
||||
threshold = hyperparameters["threshold"];
|
||||
hyperparameters.erase("threshold");
|
||||
}
|
||||
if (hyperparameters.contains("tolerance")) {
|
||||
tolerance = hyperparameters["tolerance"];
|
||||
hyperparameters.erase("tolerance");
|
||||
}
|
||||
if (hyperparameters.contains("predict_voting")) {
|
||||
predict_voting = hyperparameters["predict_voting"];
|
||||
hyperparameters.erase("predict_voting");
|
||||
}
|
||||
if (hyperparameters.contains("select_features")) {
|
||||
auto selectedAlgorithm = hyperparameters["select_features"];
|
||||
std::vector<std::string> algos = { SelectFeatures.IWSS, SelectFeatures.CFS, SelectFeatures.FCBF };
|
||||
selectFeatures = true;
|
||||
select_features_algorithm = selectedAlgorithm;
|
||||
if (std::find(algos.begin(), algos.end(), selectedAlgorithm) == algos.end()) {
|
||||
throw std::invalid_argument("Invalid selectFeatures value, valid values [" + SelectFeatures.IWSS + ", " + SelectFeatures.CFS + ", " + SelectFeatures.FCBF + "]");
|
||||
}
|
||||
hyperparameters.erase("select_features");
|
||||
}
|
||||
if (!hyperparameters.empty()) {
|
||||
throw std::invalid_argument("Invalid hyperparameters" + hyperparameters.dump());
|
||||
}
|
||||
}
|
||||
std::tuple<torch::Tensor&, double, bool> update_weights(torch::Tensor& ytrain, torch::Tensor& ypred, torch::Tensor& weights)
|
||||
{
|
||||
bool terminate = false;
|
||||
double alpha_t = 0;
|
||||
auto mask_wrong = ypred != ytrain;
|
||||
auto mask_right = ypred == ytrain;
|
||||
auto masked_weights = weights * mask_wrong.to(weights.dtype());
|
||||
double epsilon_t = masked_weights.sum().item<double>();
|
||||
if (epsilon_t > 0.5) {
|
||||
// Inverse the weights policy (plot ln(wt))
|
||||
// "In each round of AdaBoost, there is a sanity check to ensure that the current base
|
||||
// learner is better than random guess" (Zhi-Hua Zhou, 2012)
|
||||
terminate = true;
|
||||
} else {
|
||||
double wt = (1 - epsilon_t) / epsilon_t;
|
||||
alpha_t = epsilon_t == 0 ? 1 : 0.5 * log(wt);
|
||||
// Step 3.2: Update weights for next classifier
|
||||
// Step 3.2.1: Update weights of wrong samples
|
||||
weights += mask_wrong.to(weights.dtype()) * exp(alpha_t) * weights;
|
||||
// Step 3.2.2: Update weights of right samples
|
||||
weights += mask_right.to(weights.dtype()) * exp(-alpha_t) * weights;
|
||||
// Step 3.3: Normalise the weights
|
||||
double totalWeights = torch::sum(weights).item<double>();
|
||||
weights = weights / totalWeights;
|
||||
}
|
||||
return { weights, alpha_t, terminate };
|
||||
}
|
||||
std::unordered_set<int> BoostAODE::initializeModels()
|
||||
{
|
||||
std::unordered_set<int> featuresUsed;
|
||||
torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);
|
||||
int maxFeatures = 0;
|
||||
if (select_features_algorithm == SelectFeatures.CFS) {
|
||||
featureSelector = new CFS(dataset, features, className, maxFeatures, states.at(className).size(), weights_);
|
||||
} else if (select_features_algorithm == SelectFeatures.IWSS) {
|
||||
if (threshold < 0 || threshold >0.5) {
|
||||
throw std::invalid_argument("Invalid threshold value for " + SelectFeatures.IWSS + " [0, 0.5]");
|
||||
}
|
||||
featureSelector = new IWSS(dataset, features, className, maxFeatures, states.at(className).size(), weights_, threshold);
|
||||
} else if (select_features_algorithm == SelectFeatures.FCBF) {
|
||||
if (threshold < 1e-7 || threshold > 1) {
|
||||
throw std::invalid_argument("Invalid threshold value for " + SelectFeatures.FCBF + " [1e-7, 1]");
|
||||
}
|
||||
featureSelector = new FCBF(dataset, features, className, maxFeatures, states.at(className).size(), weights_, threshold);
|
||||
}
|
||||
featureSelector->fit();
|
||||
auto cfsFeatures = featureSelector->getFeatures();
|
||||
for (const int& feature : cfsFeatures) {
|
||||
featuresUsed.insert(feature);
|
||||
std::unique_ptr<Classifier> model = std::make_unique<SPODE>(feature);
|
||||
model->fit(dataset, features, className, states, weights_);
|
||||
models.push_back(std::move(model));
|
||||
significanceModels.push_back(1.0);
|
||||
n_models++;
|
||||
}
|
||||
notes.push_back("Used features in initialization: " + std::to_string(featuresUsed.size()) + " of " + std::to_string(features.size()) + " with " + select_features_algorithm);
|
||||
delete featureSelector;
|
||||
return featuresUsed;
|
||||
}
|
||||
torch::Tensor BoostAODE::ensemble_predict(torch::Tensor& X, SPODE* model)
|
||||
{
|
||||
if (initialize_prob_table) {
|
||||
initialize_prob_table = false;
|
||||
prob_table = model->predict_proba(X) * 1.0;
|
||||
} else {
|
||||
prob_table += model->predict_proba(X) * 1.0;
|
||||
}
|
||||
// prob_table doesn't store probabilities but the sum of them
|
||||
// to have them we need to divide by the sum of the "weights" used to
|
||||
// consider the results obtanined in the model's predict_proba.
|
||||
return prob_table.argmax(1);
|
||||
}
|
||||
void BoostAODE::trainModel(const torch::Tensor& weights)
|
||||
{
|
||||
// Algorithm based on the adaboost algorithm for classification
|
||||
// as explained in Ensemble methods (Zhi-Hua Zhou, 2012)
|
||||
initialize_prob_table = true;
|
||||
fitted = true;
|
||||
double alpha_t = 0;
|
||||
torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);
|
||||
bool exitCondition = false;
|
||||
std::unordered_set<int> featuresUsed;
|
||||
if (selectFeatures) {
|
||||
featuresUsed = initializeModels();
|
||||
auto ypred = predict(X_train);
|
||||
std::tie(weights_, alpha_t, exitCondition) = update_weights(y_train, ypred, weights_);
|
||||
// Update significance of the models
|
||||
for (int i = 0; i < n_models; ++i) {
|
||||
significanceModels[i] = alpha_t;
|
||||
}
|
||||
if (exitCondition) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
bool resetMaxModels = false;
|
||||
if (maxModels == 0) {
|
||||
maxModels = .1 * n > 10 ? .1 * n : n;
|
||||
resetMaxModels = true; // Flag to unset maxModels
|
||||
}
|
||||
// Variables to control the accuracy finish condition
|
||||
double priorAccuracy = 0.0;
|
||||
double delta = 1.0;
|
||||
double convergence_threshold = 1e-4;
|
||||
int count = 0; // number of times the accuracy is lower than the convergence_threshold
|
||||
// Step 0: Set the finish condition
|
||||
// if not repeatSparent a finish condition is run out of features
|
||||
// n_models == maxModels
|
||||
// epsilon sub t > 0.5 => inverse the weights policy
|
||||
// validation error is not decreasing
|
||||
bool ascending = order_algorithm == Orders.ASC;
|
||||
std::mt19937 g{ 173 };
|
||||
while (!exitCondition) {
|
||||
// Step 1: Build ranking with mutual information
|
||||
auto featureSelection = metrics.SelectKBestWeighted(weights_, ascending, n); // Get all the features sorted
|
||||
if (order_algorithm == Orders.RAND) {
|
||||
std::shuffle(featureSelection.begin(), featureSelection.end(), g);
|
||||
}
|
||||
auto feature = featureSelection[0];
|
||||
if (!repeatSparent || featuresUsed.size() < featureSelection.size()) {
|
||||
bool used = true;
|
||||
for (const auto& feat : featureSelection) {
|
||||
if (std::find(featuresUsed.begin(), featuresUsed.end(), feat) != featuresUsed.end()) {
|
||||
continue;
|
||||
}
|
||||
used = false;
|
||||
feature = feat;
|
||||
break;
|
||||
}
|
||||
if (used) {
|
||||
exitCondition = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
std::unique_ptr<Classifier> model;
|
||||
model = std::make_unique<SPODE>(feature);
|
||||
model->fit(dataset, features, className, states, weights_);
|
||||
torch::Tensor ypred;
|
||||
if (predict_single) {
|
||||
ypred = model->predict(X_train);
|
||||
} else {
|
||||
ypred = ensemble_predict(X_train, dynamic_cast<SPODE*>(model.get()));
|
||||
}
|
||||
// Step 3.1: Compute the classifier amout of say
|
||||
std::tie(weights_, alpha_t, exitCondition) = update_weights(y_train, ypred, weights_);
|
||||
if (exitCondition) {
|
||||
break;
|
||||
}
|
||||
// Step 3.4: Store classifier and its accuracy to weigh its future vote
|
||||
featuresUsed.insert(feature);
|
||||
models.push_back(std::move(model));
|
||||
significanceModels.push_back(alpha_t);
|
||||
n_models++;
|
||||
if (convergence) {
|
||||
auto y_val_predict = predict(X_test);
|
||||
double accuracy = (y_val_predict == y_test).sum().item<double>() / (double)y_test.size(0);
|
||||
if (priorAccuracy == 0) {
|
||||
priorAccuracy = accuracy;
|
||||
} else {
|
||||
delta = accuracy - priorAccuracy;
|
||||
}
|
||||
if (delta < convergence_threshold) {
|
||||
count++;
|
||||
}
|
||||
priorAccuracy = accuracy;
|
||||
}
|
||||
exitCondition = n_models >= maxModels && repeatSparent || count > tolerance;
|
||||
}
|
||||
if (featuresUsed.size() != features.size()) {
|
||||
notes.push_back("Used features in train: " + std::to_string(featuresUsed.size()) + " of " + std::to_string(features.size()));
|
||||
status = WARNING;
|
||||
}
|
||||
notes.push_back("Number of models: " + std::to_string(n_models));
|
||||
if (resetMaxModels) {
|
||||
maxModels = 0;
|
||||
}
|
||||
}
|
||||
std::vector<std::string> BoostAODE::graph(const std::string& title) const
|
||||
{
|
||||
return Ensemble::graph(title);
|
||||
}
|
||||
}
|
37
bayesnet/ensembles/BoostAODE.h
Normal file
37
bayesnet/ensembles/BoostAODE.h
Normal file
@@ -0,0 +1,37 @@
|
||||
#ifndef BOOSTAODE_H
|
||||
#define BOOSTAODE_H
|
||||
#include <map>
|
||||
#include "bayesnet/classifiers/SPODE.h"
|
||||
#include "bayesnet/feature_selection/FeatureSelect.h"
|
||||
#include "Ensemble.h"
|
||||
namespace bayesnet {
|
||||
class BoostAODE : public Ensemble {
|
||||
public:
|
||||
BoostAODE(bool predict_voting = true);
|
||||
virtual ~BoostAODE() = default;
|
||||
std::vector<std::string> graph(const std::string& title = "BoostAODE") const override;
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters) override;
|
||||
protected:
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
void trainModel(const torch::Tensor& weights) override;
|
||||
private:
|
||||
std::unordered_set<int> initializeModels();
|
||||
torch::Tensor ensemble_predict(torch::Tensor& X, SPODE* model);
|
||||
torch::Tensor dataset_;
|
||||
torch::Tensor X_train, y_train, X_test, y_test;
|
||||
// Hyperparameters
|
||||
bool repeatSparent = false; // if true, a feature can be selected more than once
|
||||
int maxModels = 0;
|
||||
int tolerance = 0;
|
||||
bool predict_single = true; // wether the last model is used to predict in training or the whole ensemble
|
||||
std::string order_algorithm; // order to process the KBest features asc, desc, rand
|
||||
bool convergence = false; //if true, stop when the model does not improve
|
||||
bool selectFeatures = false; // if true, use feature selection
|
||||
std::string select_features_algorithm = "desc"; // Selected feature selection algorithm
|
||||
bool initialize_prob_table; // if true, initialize the prob_table with the first model (used in train)
|
||||
torch::Tensor prob_table; // Table of probabilities for ensemble predicting if predict_single is false
|
||||
FeatureSelect* featureSelector = nullptr;
|
||||
double threshold = -1;
|
||||
};
|
||||
}
|
||||
#endif
|
216
bayesnet/ensembles/Ensemble.cc
Normal file
216
bayesnet/ensembles/Ensemble.cc
Normal file
@@ -0,0 +1,216 @@
|
||||
#include "Ensemble.h"
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
Ensemble::Ensemble(bool predict_voting) : Classifier(Network()), n_models(0), predict_voting(predict_voting)
|
||||
{
|
||||
|
||||
};
|
||||
const std::string ENSEMBLE_NOT_FITTED = "Ensemble has not been fitted";
|
||||
void Ensemble::trainModel(const torch::Tensor& weights)
|
||||
{
|
||||
n_models = models.size();
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
// fit with std::vectors
|
||||
models[i]->fit(dataset, features, className, states);
|
||||
}
|
||||
}
|
||||
std::vector<int> Ensemble::compute_arg_max(std::vector<std::vector<double>>& X)
|
||||
{
|
||||
std::vector<int> y_pred;
|
||||
for (auto i = 0; i < X.size(); ++i) {
|
||||
auto max = std::max_element(X[i].begin(), X[i].end());
|
||||
y_pred.push_back(std::distance(X[i].begin(), max));
|
||||
}
|
||||
return y_pred;
|
||||
}
|
||||
torch::Tensor Ensemble::compute_arg_max(torch::Tensor& X)
|
||||
{
|
||||
auto y_pred = torch::argmax(X, 1);
|
||||
return y_pred;
|
||||
}
|
||||
torch::Tensor Ensemble::voting(torch::Tensor& votes)
|
||||
{
|
||||
// Convert m x n_models tensor to a m x n_class_states with voting probabilities
|
||||
auto y_pred_ = votes.accessor<int, 2>();
|
||||
std::vector<int> y_pred_final;
|
||||
int numClasses = states.at(className).size();
|
||||
// votes is m x n_models with the prediction of every model for each sample
|
||||
auto result = torch::zeros({ votes.size(0), numClasses }, torch::kFloat32);
|
||||
auto sum = std::reduce(significanceModels.begin(), significanceModels.end());
|
||||
for (int i = 0; i < votes.size(0); ++i) {
|
||||
// n_votes store in each index (value of class) the significance added by each model
|
||||
// i.e. n_votes[0] contains how much value has the value 0 of class. That value is generated by the models predictions
|
||||
std::vector<double> n_votes(numClasses, 0.0);
|
||||
for (int j = 0; j < n_models; ++j) {
|
||||
n_votes[y_pred_[i][j]] += significanceModels.at(j);
|
||||
}
|
||||
result[i] = torch::tensor(n_votes);
|
||||
}
|
||||
// To only do one division and gain precision
|
||||
result /= sum;
|
||||
return result;
|
||||
}
|
||||
std::vector<std::vector<double>> Ensemble::predict_proba(std::vector<std::vector<int>>& X)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(ENSEMBLE_NOT_FITTED);
|
||||
}
|
||||
return predict_voting ? predict_average_voting(X) : predict_average_proba(X);
|
||||
}
|
||||
torch::Tensor Ensemble::predict_proba(torch::Tensor& X)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(ENSEMBLE_NOT_FITTED);
|
||||
}
|
||||
return predict_voting ? predict_average_voting(X) : predict_average_proba(X);
|
||||
}
|
||||
std::vector<int> Ensemble::predict(std::vector<std::vector<int>>& X)
|
||||
{
|
||||
auto res = predict_proba(X);
|
||||
return compute_arg_max(res);
|
||||
}
|
||||
torch::Tensor Ensemble::predict(torch::Tensor& X)
|
||||
{
|
||||
auto res = predict_proba(X);
|
||||
return compute_arg_max(res);
|
||||
}
|
||||
torch::Tensor Ensemble::predict_average_proba(torch::Tensor& X)
|
||||
{
|
||||
auto n_states = models[0]->getClassNumStates();
|
||||
torch::Tensor y_pred = torch::zeros({ X.size(1), n_states }, torch::kFloat32);
|
||||
auto threads{ std::vector<std::thread>() };
|
||||
std::mutex mtx;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
threads.push_back(std::thread([&, i]() {
|
||||
auto ypredict = models[i]->predict_proba(X);
|
||||
std::lock_guard<std::mutex> lock(mtx);
|
||||
y_pred += ypredict * significanceModels[i];
|
||||
}));
|
||||
}
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
auto sum = std::reduce(significanceModels.begin(), significanceModels.end());
|
||||
y_pred /= sum;
|
||||
return y_pred;
|
||||
}
|
||||
std::vector<std::vector<double>> Ensemble::predict_average_proba(std::vector<std::vector<int>>& X)
|
||||
{
|
||||
auto n_states = models[0]->getClassNumStates();
|
||||
std::vector<std::vector<double>> y_pred(X[0].size(), std::vector<double>(n_states, 0.0));
|
||||
auto threads{ std::vector<std::thread>() };
|
||||
std::mutex mtx;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
threads.push_back(std::thread([&, i]() {
|
||||
auto ypredict = models[i]->predict_proba(X);
|
||||
assert(ypredict.size() == y_pred.size());
|
||||
assert(ypredict[0].size() == y_pred[0].size());
|
||||
std::lock_guard<std::mutex> lock(mtx);
|
||||
// Multiply each prediction by the significance of the model and then add it to the final prediction
|
||||
for (auto j = 0; j < ypredict.size(); ++j) {
|
||||
std::transform(y_pred[j].begin(), y_pred[j].end(), ypredict[j].begin(), y_pred[j].begin(),
|
||||
[significanceModels = significanceModels[i]](double x, double y) { return x + y * significanceModels; });
|
||||
}
|
||||
}));
|
||||
}
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
auto sum = std::reduce(significanceModels.begin(), significanceModels.end());
|
||||
//Divide each element of the prediction by the sum of the significances
|
||||
for (auto j = 0; j < y_pred.size(); ++j) {
|
||||
std::transform(y_pred[j].begin(), y_pred[j].end(), y_pred[j].begin(), [sum](double x) { return x / sum; });
|
||||
}
|
||||
return y_pred;
|
||||
}
|
||||
std::vector<std::vector<double>> Ensemble::predict_average_voting(std::vector<std::vector<int>>& X)
|
||||
{
|
||||
torch::Tensor Xt = bayesnet::vectorToTensor(X, false);
|
||||
auto y_pred = predict_average_voting(Xt);
|
||||
std::vector<std::vector<double>> result = tensorToVectorDouble(y_pred);
|
||||
return result;
|
||||
}
|
||||
torch::Tensor Ensemble::predict_average_voting(torch::Tensor& X)
|
||||
{
|
||||
// Build a m x n_models tensor with the predictions of each model
|
||||
torch::Tensor y_pred = torch::zeros({ X.size(1), n_models }, torch::kInt32);
|
||||
auto threads{ std::vector<std::thread>() };
|
||||
std::mutex mtx;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
threads.push_back(std::thread([&, i]() {
|
||||
auto ypredict = models[i]->predict(X);
|
||||
std::lock_guard<std::mutex> lock(mtx);
|
||||
y_pred.index_put_({ "...", i }, ypredict);
|
||||
}));
|
||||
}
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
return voting(y_pred);
|
||||
}
|
||||
float Ensemble::score(torch::Tensor& X, torch::Tensor& y)
|
||||
{
|
||||
auto y_pred = predict(X);
|
||||
int correct = 0;
|
||||
for (int i = 0; i < y_pred.size(0); ++i) {
|
||||
if (y_pred[i].item<int>() == y[i].item<int>()) {
|
||||
correct++;
|
||||
}
|
||||
}
|
||||
return (double)correct / y_pred.size(0);
|
||||
}
|
||||
float Ensemble::score(std::vector<std::vector<int>>& X, std::vector<int>& y)
|
||||
{
|
||||
auto y_pred = predict(X);
|
||||
int correct = 0;
|
||||
for (int i = 0; i < y_pred.size(); ++i) {
|
||||
if (y_pred[i] == y[i]) {
|
||||
correct++;
|
||||
}
|
||||
}
|
||||
return (double)correct / y_pred.size();
|
||||
}
|
||||
std::vector<std::string> Ensemble::show() const
|
||||
{
|
||||
auto result = std::vector<std::string>();
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
auto res = models[i]->show();
|
||||
result.insert(result.end(), res.begin(), res.end());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
std::vector<std::string> Ensemble::graph(const std::string& title) const
|
||||
{
|
||||
auto result = std::vector<std::string>();
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
auto res = models[i]->graph(title + "_" + std::to_string(i));
|
||||
result.insert(result.end(), res.begin(), res.end());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
int Ensemble::getNumberOfNodes() const
|
||||
{
|
||||
int nodes = 0;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
nodes += models[i]->getNumberOfNodes();
|
||||
}
|
||||
return nodes;
|
||||
}
|
||||
int Ensemble::getNumberOfEdges() const
|
||||
{
|
||||
int edges = 0;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
edges += models[i]->getNumberOfEdges();
|
||||
}
|
||||
return edges;
|
||||
}
|
||||
int Ensemble::getNumberOfStates() const
|
||||
{
|
||||
int nstates = 0;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
nstates += models[i]->getNumberOfStates();
|
||||
}
|
||||
return nstates;
|
||||
}
|
||||
}
|
46
bayesnet/ensembles/Ensemble.h
Normal file
46
bayesnet/ensembles/Ensemble.h
Normal file
@@ -0,0 +1,46 @@
|
||||
#ifndef ENSEMBLE_H
|
||||
#define ENSEMBLE_H
|
||||
#include <torch/torch.h>
|
||||
#include "bayesnet/utils/BayesMetrics.h"
|
||||
#include "bayesnet/utils/bayesnetUtils.h"
|
||||
#include "bayesnet/classifiers/Classifier.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class Ensemble : public Classifier {
|
||||
public:
|
||||
Ensemble(bool predict_voting = true);
|
||||
virtual ~Ensemble() = default;
|
||||
torch::Tensor predict(torch::Tensor& X) override;
|
||||
std::vector<int> predict(std::vector<std::vector<int>>& X) override;
|
||||
torch::Tensor predict_proba(torch::Tensor& X) override;
|
||||
std::vector<std::vector<double>> predict_proba(std::vector<std::vector<int>>& X) override;
|
||||
float score(torch::Tensor& X, torch::Tensor& y) override;
|
||||
float score(std::vector<std::vector<int>>& X, std::vector<int>& y) override;
|
||||
int getNumberOfNodes() const override;
|
||||
int getNumberOfEdges() const override;
|
||||
int getNumberOfStates() const override;
|
||||
std::vector<std::string> show() const override;
|
||||
std::vector<std::string> graph(const std::string& title) const override;
|
||||
std::vector<std::string> topological_order() override
|
||||
{
|
||||
return std::vector<std::string>();
|
||||
}
|
||||
void dump_cpt() const override
|
||||
{
|
||||
}
|
||||
protected:
|
||||
torch::Tensor predict_average_voting(torch::Tensor& X);
|
||||
std::vector<std::vector<double>> predict_average_voting(std::vector<std::vector<int>>& X);
|
||||
torch::Tensor predict_average_proba(torch::Tensor& X);
|
||||
std::vector<std::vector<double>> predict_average_proba(std::vector<std::vector<int>>& X);
|
||||
torch::Tensor compute_arg_max(torch::Tensor& X);
|
||||
std::vector<int> compute_arg_max(std::vector<std::vector<double>>& X);
|
||||
torch::Tensor voting(torch::Tensor& votes);
|
||||
unsigned n_models;
|
||||
std::vector<std::unique_ptr<Classifier>> models;
|
||||
std::vector<double> significanceModels;
|
||||
void trainModel(const torch::Tensor& weights) override;
|
||||
bool predict_voting;
|
||||
};
|
||||
}
|
||||
#endif
|
72
bayesnet/feature_selection/CFS.cc
Normal file
72
bayesnet/feature_selection/CFS.cc
Normal file
@@ -0,0 +1,72 @@
|
||||
#include <limits>
|
||||
#include "bayesnet/utils/bayesnetUtils.h"
|
||||
#include "CFS.h"
|
||||
namespace bayesnet {
|
||||
void CFS::fit()
|
||||
{
|
||||
initialize();
|
||||
computeSuLabels();
|
||||
auto featureOrder = argsort(suLabels); // sort descending order
|
||||
auto continueCondition = true;
|
||||
auto feature = featureOrder[0];
|
||||
selectedFeatures.push_back(feature);
|
||||
selectedScores.push_back(suLabels[feature]);
|
||||
selectedFeatures.erase(selectedFeatures.begin());
|
||||
while (continueCondition) {
|
||||
double merit = std::numeric_limits<double>::lowest();
|
||||
int bestFeature = -1;
|
||||
for (auto feature : featureOrder) {
|
||||
selectedFeatures.push_back(feature);
|
||||
// Compute merit with selectedFeatures
|
||||
auto meritNew = computeMeritCFS();
|
||||
if (meritNew > merit) {
|
||||
merit = meritNew;
|
||||
bestFeature = feature;
|
||||
}
|
||||
selectedFeatures.pop_back();
|
||||
}
|
||||
if (bestFeature == -1) {
|
||||
// meritNew has to be nan due to constant features
|
||||
break;
|
||||
}
|
||||
selectedFeatures.push_back(bestFeature);
|
||||
selectedScores.push_back(merit);
|
||||
featureOrder.erase(remove(featureOrder.begin(), featureOrder.end(), bestFeature), featureOrder.end());
|
||||
continueCondition = computeContinueCondition(featureOrder);
|
||||
}
|
||||
fitted = true;
|
||||
}
|
||||
bool CFS::computeContinueCondition(const std::vector<int>& featureOrder)
|
||||
{
|
||||
if (selectedFeatures.size() == maxFeatures || featureOrder.size() == 0) {
|
||||
return false;
|
||||
}
|
||||
if (selectedScores.size() >= 5) {
|
||||
/*
|
||||
"To prevent the best first search from exploring the entire
|
||||
feature subset search space, a stopping criterion is imposed.
|
||||
The search will terminate if five consecutive fully expanded
|
||||
subsets show no improvement over the current best subset."
|
||||
as stated in Mark A.Hall Thesis
|
||||
*/
|
||||
double item_ant = std::numeric_limits<double>::lowest();
|
||||
int num = 0;
|
||||
std::vector<double> lastFive(selectedScores.end() - 5, selectedScores.end());
|
||||
for (auto item : lastFive) {
|
||||
if (item_ant == std::numeric_limits<double>::lowest()) {
|
||||
item_ant = item;
|
||||
}
|
||||
if (item > item_ant) {
|
||||
break;
|
||||
} else {
|
||||
num++;
|
||||
item_ant = item;
|
||||
}
|
||||
}
|
||||
if (num == 5) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
20
bayesnet/feature_selection/CFS.h
Normal file
20
bayesnet/feature_selection/CFS.h
Normal file
@@ -0,0 +1,20 @@
|
||||
#ifndef CFS_H
|
||||
#define CFS_H
|
||||
#include <torch/torch.h>
|
||||
#include <vector>
|
||||
#include "bayesnet/feature_selection/FeatureSelect.h"
|
||||
namespace bayesnet {
|
||||
class CFS : public FeatureSelect {
|
||||
public:
|
||||
// dataset is a n+1xm tensor of integers where dataset[-1] is the y std::vector
|
||||
CFS(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights) :
|
||||
FeatureSelect(samples, features, className, maxFeatures, classNumStates, weights)
|
||||
{
|
||||
}
|
||||
virtual ~CFS() {};
|
||||
void fit() override;
|
||||
private:
|
||||
bool computeContinueCondition(const std::vector<int>& featureOrder);
|
||||
};
|
||||
}
|
||||
#endif
|
44
bayesnet/feature_selection/FCBF.cc
Normal file
44
bayesnet/feature_selection/FCBF.cc
Normal file
@@ -0,0 +1,44 @@
|
||||
#include "bayesnet/utils/bayesnetUtils.h"
|
||||
#include "FCBF.h"
|
||||
namespace bayesnet {
|
||||
|
||||
FCBF::FCBF(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights, const double threshold) :
|
||||
FeatureSelect(samples, features, className, maxFeatures, classNumStates, weights), threshold(threshold)
|
||||
{
|
||||
if (threshold < 1e-7) {
|
||||
throw std::invalid_argument("Threshold cannot be less than 1e-7");
|
||||
}
|
||||
}
|
||||
void FCBF::fit()
|
||||
{
|
||||
initialize();
|
||||
computeSuLabels();
|
||||
auto featureOrder = argsort(suLabels); // sort descending order
|
||||
auto featureOrderCopy = featureOrder;
|
||||
for (const auto& feature : featureOrder) {
|
||||
// Don't self compare
|
||||
featureOrderCopy.erase(featureOrderCopy.begin());
|
||||
if (suLabels.at(feature) == 0.0) {
|
||||
// The feature has been removed from the list
|
||||
continue;
|
||||
}
|
||||
if (suLabels.at(feature) < threshold) {
|
||||
break;
|
||||
}
|
||||
// Remove redundant features
|
||||
for (const auto& featureCopy : featureOrderCopy) {
|
||||
double value = computeSuFeatures(feature, featureCopy);
|
||||
if (value >= suLabels.at(featureCopy)) {
|
||||
// Remove feature from list
|
||||
suLabels[featureCopy] = 0.0;
|
||||
}
|
||||
}
|
||||
selectedFeatures.push_back(feature);
|
||||
selectedScores.push_back(suLabels[feature]);
|
||||
if (selectedFeatures.size() == maxFeatures) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
fitted = true;
|
||||
}
|
||||
}
|
17
bayesnet/feature_selection/FCBF.h
Normal file
17
bayesnet/feature_selection/FCBF.h
Normal file
@@ -0,0 +1,17 @@
|
||||
#ifndef FCBF_H
|
||||
#define FCBF_H
|
||||
#include <torch/torch.h>
|
||||
#include <vector>
|
||||
#include "bayesnet/feature_selection/FeatureSelect.h"
|
||||
namespace bayesnet {
|
||||
class FCBF : public FeatureSelect {
|
||||
public:
|
||||
// dataset is a n+1xm tensor of integers where dataset[-1] is the y std::vector
|
||||
FCBF(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights, const double threshold);
|
||||
virtual ~FCBF() {};
|
||||
void fit() override;
|
||||
private:
|
||||
double threshold = -1;
|
||||
};
|
||||
}
|
||||
#endif
|
78
bayesnet/feature_selection/FeatureSelect.cc
Normal file
78
bayesnet/feature_selection/FeatureSelect.cc
Normal file
@@ -0,0 +1,78 @@
|
||||
#include <limits>
|
||||
#include "bayesnet/utils/bayesnetUtils.h"
|
||||
#include "FeatureSelect.h"
|
||||
namespace bayesnet {
|
||||
FeatureSelect::FeatureSelect(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights) :
|
||||
Metrics(samples, features, className, classNumStates), maxFeatures(maxFeatures == 0 ? samples.size(0) - 1 : maxFeatures), weights(weights)
|
||||
|
||||
{
|
||||
}
|
||||
void FeatureSelect::initialize()
|
||||
{
|
||||
selectedFeatures.clear();
|
||||
selectedScores.clear();
|
||||
}
|
||||
double FeatureSelect::symmetricalUncertainty(int a, int b)
|
||||
{
|
||||
/*
|
||||
Compute symmetrical uncertainty. Normalize* information gain (mutual
|
||||
information) with the entropies of the features in order to compensate
|
||||
the bias due to high cardinality features. *Range [0, 1]
|
||||
(https://www.sciencedirect.com/science/article/pii/S0020025519303603)
|
||||
*/
|
||||
auto x = samples.index({ a, "..." });
|
||||
auto y = samples.index({ b, "..." });
|
||||
auto mu = mutualInformation(x, y, weights);
|
||||
auto hx = entropy(x, weights);
|
||||
auto hy = entropy(y, weights);
|
||||
return 2.0 * mu / (hx + hy);
|
||||
}
|
||||
void FeatureSelect::computeSuLabels()
|
||||
{
|
||||
// Compute Simmetrical Uncertainty between features and labels
|
||||
// https://en.wikipedia.org/wiki/Symmetric_uncertainty
|
||||
for (int i = 0; i < features.size(); ++i) {
|
||||
suLabels.push_back(symmetricalUncertainty(i, -1));
|
||||
}
|
||||
}
|
||||
double FeatureSelect::computeSuFeatures(const int firstFeature, const int secondFeature)
|
||||
{
|
||||
// Compute Simmetrical Uncertainty between features
|
||||
// https://en.wikipedia.org/wiki/Symmetric_uncertainty
|
||||
try {
|
||||
return suFeatures.at({ firstFeature, secondFeature });
|
||||
}
|
||||
catch (const std::out_of_range& e) {
|
||||
double result = symmetricalUncertainty(firstFeature, secondFeature);
|
||||
suFeatures[{firstFeature, secondFeature}] = result;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
double FeatureSelect::computeMeritCFS()
|
||||
{
|
||||
double rcf = 0;
|
||||
for (auto feature : selectedFeatures) {
|
||||
rcf += suLabels[feature];
|
||||
}
|
||||
double rff = 0;
|
||||
int n = selectedFeatures.size();
|
||||
for (const auto& item : doCombinations(selectedFeatures)) {
|
||||
rff += computeSuFeatures(item.first, item.second);
|
||||
}
|
||||
return rcf / sqrt(n + (n * n - n) * rff);
|
||||
}
|
||||
std::vector<int> FeatureSelect::getFeatures() const
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::runtime_error("FeatureSelect not fitted");
|
||||
}
|
||||
return selectedFeatures;
|
||||
}
|
||||
std::vector<double> FeatureSelect::getScores() const
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::runtime_error("FeatureSelect not fitted");
|
||||
}
|
||||
return selectedScores;
|
||||
}
|
||||
}
|
30
bayesnet/feature_selection/FeatureSelect.h
Normal file
30
bayesnet/feature_selection/FeatureSelect.h
Normal file
@@ -0,0 +1,30 @@
|
||||
#ifndef FEATURE_SELECT_H
|
||||
#define FEATURE_SELECT_H
|
||||
#include <torch/torch.h>
|
||||
#include <vector>
|
||||
#include "bayesnet/utils/BayesMetrics.h"
|
||||
namespace bayesnet {
|
||||
class FeatureSelect : public Metrics {
|
||||
public:
|
||||
// dataset is a n+1xm tensor of integers where dataset[-1] is the y std::vector
|
||||
FeatureSelect(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights);
|
||||
virtual ~FeatureSelect() {};
|
||||
virtual void fit() = 0;
|
||||
std::vector<int> getFeatures() const;
|
||||
std::vector<double> getScores() const;
|
||||
protected:
|
||||
void initialize();
|
||||
void computeSuLabels();
|
||||
double computeSuFeatures(const int a, const int b);
|
||||
double symmetricalUncertainty(int a, int b);
|
||||
double computeMeritCFS();
|
||||
const torch::Tensor& weights;
|
||||
int maxFeatures;
|
||||
std::vector<int> selectedFeatures;
|
||||
std::vector<double> selectedScores;
|
||||
std::vector<double> suLabels;
|
||||
std::map<std::pair<int, int>, double> suFeatures;
|
||||
bool fitted = false;
|
||||
};
|
||||
}
|
||||
#endif
|
47
bayesnet/feature_selection/IWSS.cc
Normal file
47
bayesnet/feature_selection/IWSS.cc
Normal file
@@ -0,0 +1,47 @@
|
||||
#include <limits>
|
||||
#include "bayesnet/utils/bayesnetUtils.h"
|
||||
#include "IWSS.h"
|
||||
namespace bayesnet {
|
||||
IWSS::IWSS(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights, const double threshold) :
|
||||
FeatureSelect(samples, features, className, maxFeatures, classNumStates, weights), threshold(threshold)
|
||||
{
|
||||
if (threshold < 0 || threshold > .5) {
|
||||
throw std::invalid_argument("Threshold has to be in [0, 0.5]");
|
||||
}
|
||||
}
|
||||
void IWSS::fit()
|
||||
{
|
||||
initialize();
|
||||
computeSuLabels();
|
||||
auto featureOrder = argsort(suLabels); // sort descending order
|
||||
auto featureOrderCopy = featureOrder;
|
||||
// Add first and second features to result
|
||||
// First with its own score
|
||||
auto first_feature = pop_first(featureOrderCopy);
|
||||
selectedFeatures.push_back(first_feature);
|
||||
selectedScores.push_back(suLabels.at(first_feature));
|
||||
// Second with the score of the candidates
|
||||
selectedFeatures.push_back(pop_first(featureOrderCopy));
|
||||
auto merit = computeMeritCFS();
|
||||
selectedScores.push_back(merit);
|
||||
for (const auto feature : featureOrderCopy) {
|
||||
selectedFeatures.push_back(feature);
|
||||
// Compute merit with selectedFeatures
|
||||
auto meritNew = computeMeritCFS();
|
||||
double delta = merit != 0.0 ? std::abs(merit - meritNew) / merit : 0.0;
|
||||
if (meritNew > merit || delta < threshold) {
|
||||
if (meritNew > merit) {
|
||||
merit = meritNew;
|
||||
}
|
||||
selectedScores.push_back(meritNew);
|
||||
} else {
|
||||
selectedFeatures.pop_back();
|
||||
break;
|
||||
}
|
||||
if (selectedFeatures.size() == maxFeatures) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
fitted = true;
|
||||
}
|
||||
}
|
17
bayesnet/feature_selection/IWSS.h
Normal file
17
bayesnet/feature_selection/IWSS.h
Normal file
@@ -0,0 +1,17 @@
|
||||
#ifndef IWSS_H
|
||||
#define IWSS_H
|
||||
#include <vector>
|
||||
#include <torch/torch.h>
|
||||
#include "FeatureSelect.h"
|
||||
namespace bayesnet {
|
||||
class IWSS : public FeatureSelect {
|
||||
public:
|
||||
// dataset is a n+1xm tensor of integers where dataset[-1] is the y std::vector
|
||||
IWSS(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights, const double threshold);
|
||||
virtual ~IWSS() {};
|
||||
void fit() override;
|
||||
private:
|
||||
double threshold = -1;
|
||||
};
|
||||
}
|
||||
#endif
|
@@ -1,21 +1,20 @@
|
||||
#include <thread>
|
||||
#include <mutex>
|
||||
#include "Network.h"
|
||||
#include "bayesnetUtils.h"
|
||||
#include "bayesnet/utils/bayesnetUtils.h"
|
||||
namespace bayesnet {
|
||||
Network::Network() : features(vector<string>()), className(""), classNumStates(0), fitted(false) {}
|
||||
Network::Network(float maxT) : features(vector<string>()), className(""), classNumStates(0), maxThreads(maxT), fitted(false) {}
|
||||
Network::Network(float maxT, int smoothing) : laplaceSmoothing(smoothing), features(vector<string>()), className(""), classNumStates(0), maxThreads(maxT), fitted(false) {}
|
||||
Network::Network() : features(std::vector<std::string>()), className(""), classNumStates(0), fitted(false), laplaceSmoothing(0) {}
|
||||
Network::Network(float maxT) : features(std::vector<std::string>()), className(""), classNumStates(0), maxThreads(maxT), fitted(false), laplaceSmoothing(0) {}
|
||||
Network::Network(Network& other) : laplaceSmoothing(other.laplaceSmoothing), features(other.features), className(other.className), classNumStates(other.getClassNumStates()), maxThreads(other.
|
||||
getmaxThreads()), fitted(other.fitted)
|
||||
{
|
||||
for (const auto& pair : other.nodes) {
|
||||
nodes[pair.first] = std::make_unique<Node>(*pair.second);
|
||||
for (const auto& node : other.nodes) {
|
||||
nodes[node.first] = std::make_unique<Node>(*node.second);
|
||||
}
|
||||
}
|
||||
void Network::initialize()
|
||||
{
|
||||
features = vector<string>();
|
||||
features = std::vector<std::string>();
|
||||
className = "";
|
||||
classNumStates = 0;
|
||||
fitted = false;
|
||||
@@ -30,10 +29,10 @@ namespace bayesnet {
|
||||
{
|
||||
return samples;
|
||||
}
|
||||
void Network::addNode(const string& name)
|
||||
void Network::addNode(const std::string& name)
|
||||
{
|
||||
if (name == "") {
|
||||
throw invalid_argument("Node name cannot be empty");
|
||||
throw std::invalid_argument("Node name cannot be empty");
|
||||
}
|
||||
if (nodes.find(name) != nodes.end()) {
|
||||
return;
|
||||
@@ -43,7 +42,7 @@ namespace bayesnet {
|
||||
}
|
||||
nodes[name] = std::make_unique<Node>(name);
|
||||
}
|
||||
vector<string> Network::getFeatures() const
|
||||
std::vector<std::string> Network::getFeatures() const
|
||||
{
|
||||
return features;
|
||||
}
|
||||
@@ -59,11 +58,11 @@ namespace bayesnet {
|
||||
}
|
||||
return result;
|
||||
}
|
||||
string Network::getClassName() const
|
||||
std::string Network::getClassName() const
|
||||
{
|
||||
return className;
|
||||
}
|
||||
bool Network::isCyclic(const string& nodeId, unordered_set<string>& visited, unordered_set<string>& recStack)
|
||||
bool Network::isCyclic(const std::string& nodeId, std::unordered_set<std::string>& visited, std::unordered_set<std::string>& recStack)
|
||||
{
|
||||
if (visited.find(nodeId) == visited.end()) // if node hasn't been visited yet
|
||||
{
|
||||
@@ -72,92 +71,96 @@ namespace bayesnet {
|
||||
for (Node* child : nodes[nodeId]->getChildren()) {
|
||||
if (visited.find(child->getName()) == visited.end() && isCyclic(child->getName(), visited, recStack))
|
||||
return true;
|
||||
else if (recStack.find(child->getName()) != recStack.end())
|
||||
if (recStack.find(child->getName()) != recStack.end())
|
||||
return true;
|
||||
}
|
||||
}
|
||||
recStack.erase(nodeId); // remove node from recursion stack before function ends
|
||||
return false;
|
||||
}
|
||||
void Network::addEdge(const string& parent, const string& child)
|
||||
void Network::addEdge(const std::string& parent, const std::string& child)
|
||||
{
|
||||
if (nodes.find(parent) == nodes.end()) {
|
||||
throw invalid_argument("Parent node " + parent + " does not exist");
|
||||
throw std::invalid_argument("Parent node " + parent + " does not exist");
|
||||
}
|
||||
if (nodes.find(child) == nodes.end()) {
|
||||
throw invalid_argument("Child node " + child + " does not exist");
|
||||
throw std::invalid_argument("Child node " + child + " does not exist");
|
||||
}
|
||||
// Temporarily add edge to check for cycles
|
||||
nodes[parent]->addChild(nodes[child].get());
|
||||
nodes[child]->addParent(nodes[parent].get());
|
||||
unordered_set<string> visited;
|
||||
unordered_set<string> recStack;
|
||||
std::unordered_set<std::string> visited;
|
||||
std::unordered_set<std::string> recStack;
|
||||
if (isCyclic(nodes[child]->getName(), visited, recStack)) // if adding this edge forms a cycle
|
||||
{
|
||||
// remove problematic edge
|
||||
nodes[parent]->removeChild(nodes[child].get());
|
||||
nodes[child]->removeParent(nodes[parent].get());
|
||||
throw invalid_argument("Adding this edge forms a cycle in the graph.");
|
||||
throw std::invalid_argument("Adding this edge forms a cycle in the graph.");
|
||||
}
|
||||
}
|
||||
map<string, std::unique_ptr<Node>>& Network::getNodes()
|
||||
std::map<std::string, std::unique_ptr<Node>>& Network::getNodes()
|
||||
{
|
||||
return nodes;
|
||||
}
|
||||
void Network::checkFitData(int n_samples, int n_features, int n_samples_y, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states)
|
||||
void Network::checkFitData(int n_samples, int n_features, int n_samples_y, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights)
|
||||
{
|
||||
if (weights.size(0) != n_samples) {
|
||||
throw std::invalid_argument("Weights (" + std::to_string(weights.size(0)) + ") must have the same number of elements as samples (" + std::to_string(n_samples) + ") in Network::fit");
|
||||
}
|
||||
if (n_samples != n_samples_y) {
|
||||
throw invalid_argument("X and y must have the same number of samples in Network::fit (" + to_string(n_samples) + " != " + to_string(n_samples_y) + ")");
|
||||
throw std::invalid_argument("X and y must have the same number of samples in Network::fit (" + std::to_string(n_samples) + " != " + std::to_string(n_samples_y) + ")");
|
||||
}
|
||||
if (n_features != featureNames.size()) {
|
||||
throw invalid_argument("X and features must have the same number of features in Network::fit (" + to_string(n_features) + " != " + to_string(featureNames.size()) + ")");
|
||||
throw std::invalid_argument("X and features must have the same number of features in Network::fit (" + std::to_string(n_features) + " != " + std::to_string(featureNames.size()) + ")");
|
||||
}
|
||||
if (n_features != features.size() - 1) {
|
||||
throw invalid_argument("X and local features must have the same number of features in Network::fit (" + to_string(n_features) + " != " + to_string(features.size() - 1) + ")");
|
||||
throw std::invalid_argument("X and local features must have the same number of features in Network::fit (" + std::to_string(n_features) + " != " + std::to_string(features.size() - 1) + ")");
|
||||
}
|
||||
if (find(features.begin(), features.end(), className) == features.end()) {
|
||||
throw invalid_argument("className not found in Network::features");
|
||||
throw std::invalid_argument("className not found in Network::features");
|
||||
}
|
||||
for (auto& feature : featureNames) {
|
||||
if (find(features.begin(), features.end(), feature) == features.end()) {
|
||||
throw invalid_argument("Feature " + feature + " not found in Network::features");
|
||||
throw std::invalid_argument("Feature " + feature + " not found in Network::features");
|
||||
}
|
||||
if (states.find(feature) == states.end()) {
|
||||
throw invalid_argument("Feature " + feature + " not found in states");
|
||||
throw std::invalid_argument("Feature " + feature + " not found in states");
|
||||
}
|
||||
}
|
||||
}
|
||||
void Network::setStates(const map<string, vector<int>>& states)
|
||||
void Network::setStates(const std::map<std::string, std::vector<int>>& states)
|
||||
{
|
||||
// Set states to every Node in the network
|
||||
for (int i = 0; i < features.size(); ++i) {
|
||||
nodes[features[i]]->setNumStates(states.at(features[i]).size());
|
||||
}
|
||||
classNumStates = nodes[className]->getNumStates();
|
||||
for_each(features.begin(), features.end(), [this, &states](const std::string& feature) {
|
||||
nodes.at(feature)->setNumStates(states.at(feature).size());
|
||||
});
|
||||
classNumStates = nodes.at(className)->getNumStates();
|
||||
}
|
||||
// X comes in nxm, where n is the number of features and m the number of samples
|
||||
void Network::fit(const torch::Tensor& X, const torch::Tensor& y, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states)
|
||||
void Network::fit(const torch::Tensor& X, const torch::Tensor& y, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states)
|
||||
{
|
||||
checkFitData(X.size(1), X.size(0), y.size(0), featureNames, className, states);
|
||||
checkFitData(X.size(1), X.size(0), y.size(0), featureNames, className, states, weights);
|
||||
this->className = className;
|
||||
Tensor ytmp = torch::transpose(y.view({ y.size(0), 1 }), 0, 1);
|
||||
torch::Tensor ytmp = torch::transpose(y.view({ y.size(0), 1 }), 0, 1);
|
||||
samples = torch::cat({ X , ytmp }, 0);
|
||||
for (int i = 0; i < featureNames.size(); ++i) {
|
||||
auto row_feature = X.index({ i, "..." });
|
||||
}
|
||||
completeFit(states);
|
||||
completeFit(states, weights);
|
||||
}
|
||||
void Network::fit(const torch::Tensor& samples, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states)
|
||||
void Network::fit(const torch::Tensor& samples, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states)
|
||||
{
|
||||
checkFitData(samples.size(1), samples.size(0) - 1, samples.size(1), featureNames, className, states);
|
||||
checkFitData(samples.size(1), samples.size(0) - 1, samples.size(1), featureNames, className, states, weights);
|
||||
this->className = className;
|
||||
this->samples = samples;
|
||||
completeFit(states);
|
||||
completeFit(states, weights);
|
||||
}
|
||||
// input_data comes in nxm, where n is the number of features and m the number of samples
|
||||
void Network::fit(const vector<vector<int>>& input_data, const vector<int>& labels, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states)
|
||||
void Network::fit(const std::vector<std::vector<int>>& input_data, const std::vector<int>& labels, const std::vector<double>& weights_, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states)
|
||||
{
|
||||
checkFitData(input_data[0].size(), input_data.size(), labels.size(), featureNames, className, states);
|
||||
const torch::Tensor weights = torch::tensor(weights_, torch::kFloat64);
|
||||
checkFitData(input_data[0].size(), input_data.size(), labels.size(), featureNames, className, states, weights);
|
||||
this->className = className;
|
||||
// Build tensor of samples (nxm) (n+1 because of the class)
|
||||
samples = torch::zeros({ static_cast<int>(input_data.size() + 1), static_cast<int>(input_data[0].size()) }, torch::kInt32);
|
||||
@@ -165,42 +168,17 @@ namespace bayesnet {
|
||||
samples.index_put_({ i, "..." }, torch::tensor(input_data[i], torch::kInt32));
|
||||
}
|
||||
samples.index_put_({ -1, "..." }, torch::tensor(labels, torch::kInt32));
|
||||
completeFit(states);
|
||||
completeFit(states, weights);
|
||||
}
|
||||
void Network::completeFit(const map<string, vector<int>>& states)
|
||||
void Network::completeFit(const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights)
|
||||
{
|
||||
setStates(states);
|
||||
int maxThreadsRunning = static_cast<int>(std::thread::hardware_concurrency() * maxThreads);
|
||||
if (maxThreadsRunning < 1) {
|
||||
maxThreadsRunning = 1;
|
||||
}
|
||||
vector<thread> threads;
|
||||
mutex mtx;
|
||||
condition_variable cv;
|
||||
int activeThreads = 0;
|
||||
int nextNodeIndex = 0;
|
||||
while (nextNodeIndex < nodes.size()) {
|
||||
unique_lock<mutex> lock(mtx);
|
||||
cv.wait(lock, [&activeThreads, &maxThreadsRunning]() { return activeThreads < maxThreadsRunning; });
|
||||
threads.emplace_back([this, &nextNodeIndex, &mtx, &cv, &activeThreads]() {
|
||||
while (true) {
|
||||
unique_lock<mutex> lock(mtx);
|
||||
if (nextNodeIndex >= nodes.size()) {
|
||||
break; // No more work remaining
|
||||
}
|
||||
auto& pair = *std::next(nodes.begin(), nextNodeIndex);
|
||||
++nextNodeIndex;
|
||||
lock.unlock();
|
||||
pair.second->computeCPT(samples, features, laplaceSmoothing);
|
||||
lock.lock();
|
||||
nodes[pair.first] = std::move(pair.second);
|
||||
lock.unlock();
|
||||
}
|
||||
lock_guard<mutex> lock(mtx);
|
||||
--activeThreads;
|
||||
cv.notify_one();
|
||||
laplaceSmoothing = 1.0 / samples.size(1); // To use in CPT computation
|
||||
std::vector<std::thread> threads;
|
||||
for (auto& node : nodes) {
|
||||
threads.emplace_back([this, &node, &weights]() {
|
||||
node.second->computeCPT(samples, features, laplaceSmoothing, weights);
|
||||
});
|
||||
++activeThreads;
|
||||
}
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
@@ -210,12 +188,12 @@ namespace bayesnet {
|
||||
torch::Tensor Network::predict_tensor(const torch::Tensor& samples, const bool proba)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw logic_error("You must call fit() before calling predict()");
|
||||
throw std::logic_error("You must call fit() before calling predict()");
|
||||
}
|
||||
torch::Tensor result;
|
||||
result = torch::zeros({ samples.size(1), classNumStates }, torch::kFloat64);
|
||||
for (int i = 0; i < samples.size(1); ++i) {
|
||||
const Tensor sample = samples.index({ "...", i });
|
||||
const torch::Tensor sample = samples.index({ "...", i });
|
||||
auto psample = predict_sample(sample);
|
||||
auto temp = torch::tensor(psample, torch::kFloat64);
|
||||
// result.index_put_({ i, "..." }, torch::tensor(predict_sample(sample), torch::kFloat64));
|
||||
@@ -223,36 +201,35 @@ namespace bayesnet {
|
||||
}
|
||||
if (proba)
|
||||
return result;
|
||||
else
|
||||
return result.argmax(1);
|
||||
return result.argmax(1);
|
||||
}
|
||||
// Return mxn tensor of probabilities
|
||||
Tensor Network::predict_proba(const Tensor& samples)
|
||||
torch::Tensor Network::predict_proba(const torch::Tensor& samples)
|
||||
{
|
||||
return predict_tensor(samples, true);
|
||||
}
|
||||
|
||||
// Return mxn tensor of probabilities
|
||||
Tensor Network::predict(const Tensor& samples)
|
||||
torch::Tensor Network::predict(const torch::Tensor& samples)
|
||||
{
|
||||
return predict_tensor(samples, false);
|
||||
}
|
||||
|
||||
// Return mx1 vector of predictions
|
||||
// tsamples is nxm vector of samples
|
||||
vector<int> Network::predict(const vector<vector<int>>& tsamples)
|
||||
// Return mx1 std::vector of predictions
|
||||
// tsamples is nxm std::vector of samples
|
||||
std::vector<int> Network::predict(const std::vector<std::vector<int>>& tsamples)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw logic_error("You must call fit() before calling predict()");
|
||||
throw std::logic_error("You must call fit() before calling predict()");
|
||||
}
|
||||
vector<int> predictions;
|
||||
vector<int> sample;
|
||||
std::vector<int> predictions;
|
||||
std::vector<int> sample;
|
||||
for (int row = 0; row < tsamples[0].size(); ++row) {
|
||||
sample.clear();
|
||||
for (int col = 0; col < tsamples.size(); ++col) {
|
||||
sample.push_back(tsamples[col][row]);
|
||||
}
|
||||
vector<double> classProbabilities = predict_sample(sample);
|
||||
std::vector<double> classProbabilities = predict_sample(sample);
|
||||
// Find the class with the maximum posterior probability
|
||||
auto maxElem = max_element(classProbabilities.begin(), classProbabilities.end());
|
||||
int predictedClass = distance(classProbabilities.begin(), maxElem);
|
||||
@@ -260,14 +237,15 @@ namespace bayesnet {
|
||||
}
|
||||
return predictions;
|
||||
}
|
||||
// Return mxn vector of probabilities
|
||||
vector<vector<double>> Network::predict_proba(const vector<vector<int>>& tsamples)
|
||||
// Return mxn std::vector of probabilities
|
||||
// tsamples is nxm std::vector of samples
|
||||
std::vector<std::vector<double>> Network::predict_proba(const std::vector<std::vector<int>>& tsamples)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw logic_error("You must call fit() before calling predict_proba()");
|
||||
throw std::logic_error("You must call fit() before calling predict_proba()");
|
||||
}
|
||||
vector<vector<double>> predictions;
|
||||
vector<int> sample;
|
||||
std::vector<std::vector<double>> predictions;
|
||||
std::vector<int> sample;
|
||||
for (int row = 0; row < tsamples[0].size(); ++row) {
|
||||
sample.clear();
|
||||
for (int col = 0; col < tsamples.size(); ++col) {
|
||||
@@ -277,9 +255,9 @@ namespace bayesnet {
|
||||
}
|
||||
return predictions;
|
||||
}
|
||||
double Network::score(const vector<vector<int>>& tsamples, const vector<int>& labels)
|
||||
double Network::score(const std::vector<std::vector<int>>& tsamples, const std::vector<int>& labels)
|
||||
{
|
||||
vector<int> y_pred = predict(tsamples);
|
||||
std::vector<int> y_pred = predict(tsamples);
|
||||
int correct = 0;
|
||||
for (int i = 0; i < y_pred.size(); ++i) {
|
||||
if (y_pred[i] == labels[i]) {
|
||||
@@ -288,35 +266,35 @@ namespace bayesnet {
|
||||
}
|
||||
return (double)correct / y_pred.size();
|
||||
}
|
||||
// Return 1xn vector of probabilities
|
||||
vector<double> Network::predict_sample(const vector<int>& sample)
|
||||
// Return 1xn std::vector of probabilities
|
||||
std::vector<double> Network::predict_sample(const std::vector<int>& sample)
|
||||
{
|
||||
// Ensure the sample size is equal to the number of features
|
||||
if (sample.size() != features.size() - 1) {
|
||||
throw invalid_argument("Sample size (" + to_string(sample.size()) +
|
||||
") does not match the number of features (" + to_string(features.size() - 1) + ")");
|
||||
throw std::invalid_argument("Sample size (" + std::to_string(sample.size()) +
|
||||
") does not match the number of features (" + std::to_string(features.size() - 1) + ")");
|
||||
}
|
||||
map<string, int> evidence;
|
||||
std::map<std::string, int> evidence;
|
||||
for (int i = 0; i < sample.size(); ++i) {
|
||||
evidence[features[i]] = sample[i];
|
||||
}
|
||||
return exactInference(evidence);
|
||||
}
|
||||
// Return 1xn vector of probabilities
|
||||
vector<double> Network::predict_sample(const Tensor& sample)
|
||||
// Return 1xn std::vector of probabilities
|
||||
std::vector<double> Network::predict_sample(const torch::Tensor& sample)
|
||||
{
|
||||
// Ensure the sample size is equal to the number of features
|
||||
if (sample.size(0) != features.size() - 1) {
|
||||
throw invalid_argument("Sample size (" + to_string(sample.size(0)) +
|
||||
") does not match the number of features (" + to_string(features.size() - 1) + ")");
|
||||
throw std::invalid_argument("Sample size (" + std::to_string(sample.size(0)) +
|
||||
") does not match the number of features (" + std::to_string(features.size() - 1) + ")");
|
||||
}
|
||||
map<string, int> evidence;
|
||||
std::map<std::string, int> evidence;
|
||||
for (int i = 0; i < sample.size(0); ++i) {
|
||||
evidence[features[i]] = sample[i].item<int>();
|
||||
}
|
||||
return exactInference(evidence);
|
||||
}
|
||||
double Network::computeFactor(map<string, int>& completeEvidence)
|
||||
double Network::computeFactor(std::map<std::string, int>& completeEvidence)
|
||||
{
|
||||
double result = 1.0;
|
||||
for (auto& node : getNodes()) {
|
||||
@@ -324,17 +302,17 @@ namespace bayesnet {
|
||||
}
|
||||
return result;
|
||||
}
|
||||
vector<double> Network::exactInference(map<string, int>& evidence)
|
||||
std::vector<double> Network::exactInference(std::map<std::string, int>& evidence)
|
||||
{
|
||||
vector<double> result(classNumStates, 0.0);
|
||||
vector<thread> threads;
|
||||
mutex mtx;
|
||||
std::vector<double> result(classNumStates, 0.0);
|
||||
std::vector<std::thread> threads;
|
||||
std::mutex mtx;
|
||||
for (int i = 0; i < classNumStates; ++i) {
|
||||
threads.emplace_back([this, &result, &evidence, i, &mtx]() {
|
||||
auto completeEvidence = map<string, int>(evidence);
|
||||
auto completeEvidence = std::map<std::string, int>(evidence);
|
||||
completeEvidence[getClassName()] = i;
|
||||
double factor = computeFactor(completeEvidence);
|
||||
lock_guard<mutex> lock(mtx);
|
||||
std::lock_guard<std::mutex> lock(mtx);
|
||||
result[i] = factor;
|
||||
});
|
||||
}
|
||||
@@ -343,15 +321,15 @@ namespace bayesnet {
|
||||
}
|
||||
// Normalize result
|
||||
double sum = accumulate(result.begin(), result.end(), 0.0);
|
||||
transform(result.begin(), result.end(), result.begin(), [sum](double& value) { return value / sum; });
|
||||
transform(result.begin(), result.end(), result.begin(), [sum](const double& value) { return value / sum; });
|
||||
return result;
|
||||
}
|
||||
vector<string> Network::show() const
|
||||
std::vector<std::string> Network::show() const
|
||||
{
|
||||
vector<string> result;
|
||||
std::vector<std::string> result;
|
||||
// Draw the network
|
||||
for (auto& node : nodes) {
|
||||
string line = node.first + " -> ";
|
||||
std::string line = node.first + " -> ";
|
||||
for (auto child : node.second->getChildren()) {
|
||||
line += child->getName() + ", ";
|
||||
}
|
||||
@@ -359,12 +337,12 @@ namespace bayesnet {
|
||||
}
|
||||
return result;
|
||||
}
|
||||
vector<string> Network::graph(const string& title) const
|
||||
std::vector<std::string> Network::graph(const std::string& title) const
|
||||
{
|
||||
auto output = vector<string>();
|
||||
auto output = std::vector<std::string>();
|
||||
auto prefix = "digraph BayesNet {\nlabel=<BayesNet ";
|
||||
auto suffix = ">\nfontsize=30\nfontcolor=blue\nlabelloc=t\nlayout=circo\n";
|
||||
string header = prefix + title + suffix;
|
||||
std::string header = prefix + title + suffix;
|
||||
output.push_back(header);
|
||||
for (auto& node : nodes) {
|
||||
auto result = node.second->graph(className);
|
||||
@@ -373,9 +351,9 @@ namespace bayesnet {
|
||||
output.push_back("}\n");
|
||||
return output;
|
||||
}
|
||||
vector<pair<string, string>> Network::getEdges() const
|
||||
std::vector<std::pair<std::string, std::string>> Network::getEdges() const
|
||||
{
|
||||
auto edges = vector<pair<string, string>>();
|
||||
auto edges = std::vector<std::pair<std::string, std::string>>();
|
||||
for (const auto& node : nodes) {
|
||||
auto head = node.first;
|
||||
for (const auto& child : node.second->getChildren()) {
|
||||
@@ -389,13 +367,12 @@ namespace bayesnet {
|
||||
{
|
||||
return getEdges().size();
|
||||
}
|
||||
vector<string> Network::topological_sort()
|
||||
std::vector<std::string> Network::topological_sort()
|
||||
{
|
||||
/* Check if al the fathers of every node are before the node */
|
||||
auto result = features;
|
||||
result.erase(remove(result.begin(), result.end(), className), result.end());
|
||||
bool ending{ false };
|
||||
int idx = 0;
|
||||
while (!ending) {
|
||||
ending = true;
|
||||
for (auto feature : features) {
|
||||
@@ -417,10 +394,10 @@ namespace bayesnet {
|
||||
ending = false;
|
||||
}
|
||||
} else {
|
||||
throw logic_error("Error in topological sort because of node " + feature + " is not in result");
|
||||
throw std::logic_error("Error in topological sort because of node " + feature + " is not in result");
|
||||
}
|
||||
} else {
|
||||
throw logic_error("Error in topological sort because of node father " + fatherName + " is not in result");
|
||||
throw std::logic_error("Error in topological sort because of node father " + fatherName + " is not in result");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -430,7 +407,8 @@ namespace bayesnet {
|
||||
void Network::dump_cpt() const
|
||||
{
|
||||
for (auto& node : nodes) {
|
||||
cout << "* " << node.first << ": (" << node.second->getNumStates() << ") : " << node.second->getCPT().sizes() << endl;
|
||||
std::cout << "* " << node.first << ": (" << node.second->getNumStates() << ") : " << node.second->getCPT().sizes() << std::endl;
|
||||
std::cout << node.second->getCPT() << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
63
bayesnet/network/Network.h
Normal file
63
bayesnet/network/Network.h
Normal file
@@ -0,0 +1,63 @@
|
||||
#ifndef NETWORK_H
|
||||
#define NETWORK_H
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include "bayesnet/config.h"
|
||||
#include "Node.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class Network {
|
||||
public:
|
||||
Network();
|
||||
explicit Network(float);
|
||||
explicit Network(Network&);
|
||||
~Network() = default;
|
||||
torch::Tensor& getSamples();
|
||||
float getmaxThreads();
|
||||
void addNode(const std::string&);
|
||||
void addEdge(const std::string&, const std::string&);
|
||||
std::map<std::string, std::unique_ptr<Node>>& getNodes();
|
||||
std::vector<std::string> getFeatures() const;
|
||||
int getStates() const;
|
||||
std::vector<std::pair<std::string, std::string>> getEdges() const;
|
||||
int getNumEdges() const;
|
||||
int getClassNumStates() const;
|
||||
std::string getClassName() const;
|
||||
/*
|
||||
Notice: Nodes have to be inserted in the same order as they are in the dataset, i.e., first node is first column and so on.
|
||||
*/
|
||||
void fit(const std::vector<std::vector<int>>& input_data, const std::vector<int>& labels, const std::vector<double>& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states);
|
||||
void fit(const torch::Tensor& X, const torch::Tensor& y, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states);
|
||||
void fit(const torch::Tensor& samples, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states);
|
||||
std::vector<int> predict(const std::vector<std::vector<int>>&); // Return mx1 std::vector of predictions
|
||||
torch::Tensor predict(const torch::Tensor&); // Return mx1 tensor of predictions
|
||||
torch::Tensor predict_tensor(const torch::Tensor& samples, const bool proba);
|
||||
std::vector<std::vector<double>> predict_proba(const std::vector<std::vector<int>>&); // Return mxn std::vector of probabilities
|
||||
torch::Tensor predict_proba(const torch::Tensor&); // Return mxn tensor of probabilities
|
||||
double score(const std::vector<std::vector<int>>&, const std::vector<int>&);
|
||||
std::vector<std::string> topological_sort();
|
||||
std::vector<std::string> show() const;
|
||||
std::vector<std::string> graph(const std::string& title) const; // Returns a std::vector of std::strings representing the graph in graphviz format
|
||||
void initialize();
|
||||
void dump_cpt() const;
|
||||
inline std::string version() { return { project_version.begin(), project_version.end() }; }
|
||||
private:
|
||||
std::map<std::string, std::unique_ptr<Node>> nodes;
|
||||
bool fitted;
|
||||
float maxThreads = 0.95;
|
||||
int classNumStates;
|
||||
std::vector<std::string> features; // Including classname
|
||||
std::string className;
|
||||
double laplaceSmoothing;
|
||||
torch::Tensor samples; // nxm tensor used to fit the model
|
||||
bool isCyclic(const std::string&, std::unordered_set<std::string>&, std::unordered_set<std::string>&);
|
||||
std::vector<double> predict_sample(const std::vector<int>&);
|
||||
std::vector<double> predict_sample(const torch::Tensor&);
|
||||
std::vector<double> exactInference(std::map<std::string, int>&);
|
||||
double computeFactor(std::map<std::string, int>&);
|
||||
void completeFit(const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights);
|
||||
void checkFitData(int n_features, int n_samples, int n_samples_y, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights);
|
||||
void setStates(const std::map<std::string, std::vector<int>>&);
|
||||
};
|
||||
}
|
||||
#endif
|
@@ -3,7 +3,7 @@
|
||||
namespace bayesnet {
|
||||
|
||||
Node::Node(const std::string& name)
|
||||
: name(name), numStates(0), cpTable(torch::Tensor()), parents(vector<Node*>()), children(vector<Node*>())
|
||||
: name(name), numStates(0), cpTable(torch::Tensor()), parents(std::vector<Node*>()), children(std::vector<Node*>())
|
||||
{
|
||||
}
|
||||
void Node::clear()
|
||||
@@ -14,7 +14,7 @@ namespace bayesnet {
|
||||
dimensions.clear();
|
||||
numStates = 0;
|
||||
}
|
||||
string Node::getName() const
|
||||
std::string Node::getName() const
|
||||
{
|
||||
return name;
|
||||
}
|
||||
@@ -34,11 +34,11 @@ namespace bayesnet {
|
||||
{
|
||||
children.push_back(child);
|
||||
}
|
||||
vector<Node*>& Node::getParents()
|
||||
std::vector<Node*>& Node::getParents()
|
||||
{
|
||||
return parents;
|
||||
}
|
||||
vector<Node*>& Node::getChildren()
|
||||
std::vector<Node*>& Node::getChildren()
|
||||
{
|
||||
return children;
|
||||
}
|
||||
@@ -63,28 +63,28 @@ namespace bayesnet {
|
||||
*/
|
||||
unsigned Node::minFill()
|
||||
{
|
||||
unordered_set<string> neighbors;
|
||||
std::unordered_set<std::string> neighbors;
|
||||
for (auto child : children) {
|
||||
neighbors.emplace(child->getName());
|
||||
}
|
||||
for (auto parent : parents) {
|
||||
neighbors.emplace(parent->getName());
|
||||
}
|
||||
auto source = vector<string>(neighbors.begin(), neighbors.end());
|
||||
auto source = std::vector<std::string>(neighbors.begin(), neighbors.end());
|
||||
return combinations(source).size();
|
||||
}
|
||||
vector<pair<string, string>> Node::combinations(const vector<string>& source)
|
||||
std::vector<std::pair<std::string, std::string>> Node::combinations(const std::vector<std::string>& source)
|
||||
{
|
||||
vector<pair<string, string>> result;
|
||||
std::vector<std::pair<std::string, std::string>> result;
|
||||
for (int i = 0; i < source.size(); ++i) {
|
||||
string temp = source[i];
|
||||
std::string temp = source[i];
|
||||
for (int j = i + 1; j < source.size(); ++j) {
|
||||
result.push_back({ temp, source[j] });
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
void Node::computeCPT(const torch::Tensor& dataset, const vector<string>& features, const int laplaceSmoothing)
|
||||
void Node::computeCPT(const torch::Tensor& dataset, const std::vector<std::string>& features, const double laplaceSmoothing, const torch::Tensor& weights)
|
||||
{
|
||||
dimensions.clear();
|
||||
// Get dimensions of the CPT
|
||||
@@ -96,37 +96,37 @@ namespace bayesnet {
|
||||
// Fill table with counts
|
||||
auto pos = find(features.begin(), features.end(), name);
|
||||
if (pos == features.end()) {
|
||||
throw logic_error("Feature " + name + " not found in dataset");
|
||||
throw std::logic_error("Feature " + name + " not found in dataset");
|
||||
}
|
||||
int name_index = pos - features.begin();
|
||||
for (int n_sample = 0; n_sample < dataset.size(1); ++n_sample) {
|
||||
torch::List<c10::optional<torch::Tensor>> coordinates;
|
||||
c10::List<c10::optional<at::Tensor>> coordinates;
|
||||
coordinates.push_back(dataset.index({ name_index, n_sample }));
|
||||
for (auto parent : parents) {
|
||||
pos = find(features.begin(), features.end(), parent->getName());
|
||||
if (pos == features.end()) {
|
||||
throw logic_error("Feature parent " + parent->getName() + " not found in dataset");
|
||||
throw std::logic_error("Feature parent " + parent->getName() + " not found in dataset");
|
||||
}
|
||||
int parent_index = pos - features.begin();
|
||||
coordinates.push_back(dataset.index({ parent_index, n_sample }));
|
||||
}
|
||||
// Increment the count of the corresponding coordinate
|
||||
cpTable.index_put_({ coordinates }, cpTable.index({ coordinates }) + 1);
|
||||
cpTable.index_put_({ coordinates }, cpTable.index({ coordinates }) + weights.index({ n_sample }).item<double>());
|
||||
}
|
||||
// Normalize the counts
|
||||
cpTable = cpTable / cpTable.sum(0);
|
||||
}
|
||||
float Node::getFactorValue(map<string, int>& evidence)
|
||||
float Node::getFactorValue(std::map<std::string, int>& evidence)
|
||||
{
|
||||
torch::List<c10::optional<torch::Tensor>> coordinates;
|
||||
c10::List<c10::optional<at::Tensor>> coordinates;
|
||||
// following predetermined order of indices in the cpTable (see Node.h)
|
||||
coordinates.push_back(torch::tensor(evidence[name]));
|
||||
transform(parents.begin(), parents.end(), back_inserter(coordinates), [&evidence](const auto& parent) { return torch::tensor(evidence[parent->getName()]); });
|
||||
coordinates.push_back(at::tensor(evidence[name]));
|
||||
transform(parents.begin(), parents.end(), std::back_inserter(coordinates), [&evidence](const auto& parent) { return at::tensor(evidence[parent->getName()]); });
|
||||
return cpTable.index({ coordinates }).item<float>();
|
||||
}
|
||||
vector<string> Node::graph(const string& className)
|
||||
std::vector<std::string> Node::graph(const std::string& className)
|
||||
{
|
||||
auto output = vector<string>();
|
||||
auto output = std::vector<std::string>();
|
||||
auto suffix = name == className ? ", fontcolor=red, fillcolor=lightblue, style=filled " : "";
|
||||
output.push_back(name + " [shape=circle" + suffix + "] \n");
|
||||
transform(children.begin(), children.end(), back_inserter(output), [this](const auto& child) { return name + " -> " + child->getName(); });
|
36
bayesnet/network/Node.h
Normal file
36
bayesnet/network/Node.h
Normal file
@@ -0,0 +1,36 @@
|
||||
#ifndef NODE_H
|
||||
#define NODE_H
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <torch/torch.h>
|
||||
namespace bayesnet {
|
||||
class Node {
|
||||
private:
|
||||
std::string name;
|
||||
std::vector<Node*> parents;
|
||||
std::vector<Node*> children;
|
||||
int numStates; // number of states of the variable
|
||||
torch::Tensor cpTable; // Order of indices is 0-> node variable, 1-> 1st parent, 2-> 2nd parent, ...
|
||||
std::vector<int64_t> dimensions; // dimensions of the cpTable
|
||||
std::vector<std::pair<std::string, std::string>> combinations(const std::vector<std::string>&);
|
||||
public:
|
||||
explicit Node(const std::string&);
|
||||
void clear();
|
||||
void addParent(Node*);
|
||||
void addChild(Node*);
|
||||
void removeParent(Node*);
|
||||
void removeChild(Node*);
|
||||
std::string getName() const;
|
||||
std::vector<Node*>& getParents();
|
||||
std::vector<Node*>& getChildren();
|
||||
torch::Tensor& getCPT();
|
||||
void computeCPT(const torch::Tensor& dataset, const std::vector<std::string>& features, const double laplaceSmoothing, const torch::Tensor& weights);
|
||||
int getNumStates() const;
|
||||
void setNumStates(int);
|
||||
unsigned minFill();
|
||||
std::vector<std::string> graph(const std::string& clasName); // Returns a std::vector of std::strings representing the graph in graphviz format
|
||||
float getFactorValue(std::map<std::string, int>&);
|
||||
};
|
||||
}
|
||||
#endif
|
@@ -1,16 +1,16 @@
|
||||
#include "BayesMetrics.h"
|
||||
#include "Mst.h"
|
||||
#include "BayesMetrics.h"
|
||||
namespace bayesnet {
|
||||
//samples is nxm tensor used to fit the model
|
||||
Metrics::Metrics(const torch::Tensor& samples, const vector<string>& features, const string& className, const int classNumStates)
|
||||
//samples is n+1xm tensor used to fit the model
|
||||
Metrics::Metrics(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int classNumStates)
|
||||
: samples(samples)
|
||||
, features(features)
|
||||
, className(className)
|
||||
, classNumStates(classNumStates)
|
||||
{
|
||||
}
|
||||
//samples is nxm vector used to fit the model
|
||||
Metrics::Metrics(const vector<vector<int>>& vsamples, const vector<int>& labels, const vector<string>& features, const string& className, const int classNumStates)
|
||||
//samples is nxm std::vector used to fit the model
|
||||
Metrics::Metrics(const std::vector<std::vector<int>>& vsamples, const std::vector<int>& labels, const std::vector<std::string>& features, const std::string& className, const int classNumStates)
|
||||
: features(features)
|
||||
, className(className)
|
||||
, classNumStates(classNumStates)
|
||||
@@ -21,28 +21,57 @@ namespace bayesnet {
|
||||
}
|
||||
samples.index_put_({ -1, "..." }, torch::tensor(labels, torch::kInt32));
|
||||
}
|
||||
vector<pair<string, string>> Metrics::doCombinations(const vector<string>& source)
|
||||
std::vector<int> Metrics::SelectKBestWeighted(const torch::Tensor& weights, bool ascending, unsigned k)
|
||||
{
|
||||
vector<pair<string, string>> result;
|
||||
for (int i = 0; i < source.size(); ++i) {
|
||||
string temp = source[i];
|
||||
for (int j = i + 1; j < source.size(); ++j) {
|
||||
result.push_back({ temp, source[j] });
|
||||
}
|
||||
// Return the K Best features
|
||||
auto n = samples.size(0) - 1;
|
||||
if (k == 0) {
|
||||
k = n;
|
||||
}
|
||||
return result;
|
||||
// compute scores
|
||||
scoresKBest.clear();
|
||||
featuresKBest.clear();
|
||||
auto label = samples.index({ -1, "..." });
|
||||
for (int i = 0; i < n; ++i) {
|
||||
scoresKBest.push_back(mutualInformation(label, samples.index({ i, "..." }), weights));
|
||||
featuresKBest.push_back(i);
|
||||
}
|
||||
// sort & reduce scores and features
|
||||
if (ascending) {
|
||||
sort(featuresKBest.begin(), featuresKBest.end(), [&](int i, int j)
|
||||
{ return scoresKBest[i] < scoresKBest[j]; });
|
||||
sort(scoresKBest.begin(), scoresKBest.end(), std::less<double>());
|
||||
if (k < n) {
|
||||
for (int i = 0; i < n - k; ++i) {
|
||||
featuresKBest.erase(featuresKBest.begin());
|
||||
scoresKBest.erase(scoresKBest.begin());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
sort(featuresKBest.begin(), featuresKBest.end(), [&](int i, int j)
|
||||
{ return scoresKBest[i] > scoresKBest[j]; });
|
||||
sort(scoresKBest.begin(), scoresKBest.end(), std::greater<double>());
|
||||
featuresKBest.resize(k);
|
||||
scoresKBest.resize(k);
|
||||
}
|
||||
return featuresKBest;
|
||||
}
|
||||
torch::Tensor Metrics::conditionalEdge()
|
||||
std::vector<double> Metrics::getScoresKBest() const
|
||||
{
|
||||
auto result = vector<double>();
|
||||
auto source = vector<string>(features);
|
||||
return scoresKBest;
|
||||
}
|
||||
|
||||
torch::Tensor Metrics::conditionalEdge(const torch::Tensor& weights)
|
||||
{
|
||||
auto result = std::vector<double>();
|
||||
auto source = std::vector<std::string>(features);
|
||||
source.push_back(className);
|
||||
auto combinations = doCombinations(source);
|
||||
// Compute class prior
|
||||
auto margin = torch::zeros({ classNumStates });
|
||||
auto margin = torch::zeros({ classNumStates }, torch::kFloat);
|
||||
for (int value = 0; value < classNumStates; ++value) {
|
||||
auto mask = samples.index({ -1, "..." }) == value;
|
||||
margin[value] = mask.sum().item<float>() / samples.size(1);
|
||||
margin[value] = mask.sum().item<double>() / samples.size(1);
|
||||
}
|
||||
for (auto [first, second] : combinations) {
|
||||
int index_first = find(features.begin(), features.end(), first) - features.begin();
|
||||
@@ -52,8 +81,9 @@ namespace bayesnet {
|
||||
auto mask = samples.index({ -1, "..." }) == value;
|
||||
auto first_dataset = samples.index({ index_first, mask });
|
||||
auto second_dataset = samples.index({ index_second, mask });
|
||||
auto mi = mutualInformation(first_dataset, second_dataset);
|
||||
auto pb = margin[value].item<float>();
|
||||
auto weights_dataset = weights.index({ mask });
|
||||
auto mi = mutualInformation(first_dataset, second_dataset, weights_dataset);
|
||||
auto pb = margin[value].item<double>();
|
||||
accumulated += pb * mi;
|
||||
}
|
||||
result.push_back(accumulated);
|
||||
@@ -70,31 +100,32 @@ namespace bayesnet {
|
||||
return matrix;
|
||||
}
|
||||
// To use in Python
|
||||
vector<float> Metrics::conditionalEdgeWeights()
|
||||
std::vector<float> Metrics::conditionalEdgeWeights(std::vector<float>& weights_)
|
||||
{
|
||||
auto matrix = conditionalEdge();
|
||||
const torch::Tensor weights = torch::tensor(weights_);
|
||||
auto matrix = conditionalEdge(weights);
|
||||
std::vector<float> v(matrix.data_ptr<float>(), matrix.data_ptr<float>() + matrix.numel());
|
||||
return v;
|
||||
}
|
||||
double Metrics::entropy(const torch::Tensor& feature)
|
||||
double Metrics::entropy(const torch::Tensor& feature, const torch::Tensor& weights)
|
||||
{
|
||||
torch::Tensor counts = feature.bincount();
|
||||
int totalWeight = counts.sum().item<int>();
|
||||
torch::Tensor counts = feature.bincount(weights);
|
||||
double totalWeight = counts.sum().item<double>();
|
||||
torch::Tensor probs = counts.to(torch::kFloat) / totalWeight;
|
||||
torch::Tensor logProbs = torch::log(probs);
|
||||
torch::Tensor entropy = -probs * logProbs;
|
||||
return entropy.nansum().item<double>();
|
||||
}
|
||||
// H(Y|X) = sum_{x in X} p(x) H(Y|X=x)
|
||||
double Metrics::conditionalEntropy(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature)
|
||||
double Metrics::conditionalEntropy(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights)
|
||||
{
|
||||
int numSamples = firstFeature.sizes()[0];
|
||||
torch::Tensor featureCounts = secondFeature.bincount();
|
||||
unordered_map<int, unordered_map<int, double>> jointCounts;
|
||||
torch::Tensor featureCounts = secondFeature.bincount(weights);
|
||||
std::unordered_map<int, std::unordered_map<int, double>> jointCounts;
|
||||
double totalWeight = 0;
|
||||
for (auto i = 0; i < numSamples; i++) {
|
||||
jointCounts[secondFeature[i].item<int>()][firstFeature[i].item<int>()] += 1;
|
||||
totalWeight += 1;
|
||||
jointCounts[secondFeature[i].item<int>()][firstFeature[i].item<int>()] += weights[i].item<double>();
|
||||
totalWeight += weights[i].item<float>();
|
||||
}
|
||||
if (totalWeight == 0)
|
||||
return 0;
|
||||
@@ -115,16 +146,16 @@ namespace bayesnet {
|
||||
return entropyValue;
|
||||
}
|
||||
// I(X;Y) = H(Y) - H(Y|X)
|
||||
double Metrics::mutualInformation(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature)
|
||||
double Metrics::mutualInformation(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights)
|
||||
{
|
||||
return entropy(firstFeature) - conditionalEntropy(firstFeature, secondFeature);
|
||||
return entropy(firstFeature, weights) - conditionalEntropy(firstFeature, secondFeature, weights);
|
||||
}
|
||||
/*
|
||||
Compute the maximum spanning tree considering the weights as distances
|
||||
and the indices of the weights as nodes of this square matrix using
|
||||
Kruskal algorithm
|
||||
*/
|
||||
vector<pair<int, int>> Metrics::maximumSpanningTree(const vector<string>& features, const Tensor& weights, const int root)
|
||||
std::vector<std::pair<int, int>> Metrics::maximumSpanningTree(const std::vector<std::string>& features, const torch::Tensor& weights, const int root)
|
||||
{
|
||||
auto mst = MST(features, weights, root);
|
||||
return mst.maximumSpanningTree();
|
49
bayesnet/utils/BayesMetrics.h
Normal file
49
bayesnet/utils/BayesMetrics.h
Normal file
@@ -0,0 +1,49 @@
|
||||
#ifndef BAYESNET_METRICS_H
|
||||
#define BAYESNET_METRICS_H
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <torch/torch.h>
|
||||
namespace bayesnet {
|
||||
class Metrics {
|
||||
private:
|
||||
int classNumStates = 0;
|
||||
std::vector<double> scoresKBest;
|
||||
std::vector<int> featuresKBest; // sorted indices of the features
|
||||
double conditionalEntropy(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights);
|
||||
protected:
|
||||
torch::Tensor samples; // n+1xm torch::Tensor used to fit the model where samples[-1] is the y std::vector
|
||||
std::string className;
|
||||
double entropy(const torch::Tensor& feature, const torch::Tensor& weights);
|
||||
std::vector<std::string> features;
|
||||
template <class T>
|
||||
std::vector<std::pair<T, T>> doCombinations(const std::vector<T>& source)
|
||||
{
|
||||
std::vector<std::pair<T, T>> result;
|
||||
for (int i = 0; i < source.size(); ++i) {
|
||||
T temp = source[i];
|
||||
for (int j = i + 1; j < source.size(); ++j) {
|
||||
result.push_back({ temp, source[j] });
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
template <class T>
|
||||
T pop_first(std::vector<T>& v)
|
||||
{
|
||||
T temp = v[0];
|
||||
v.erase(v.begin());
|
||||
return temp;
|
||||
}
|
||||
public:
|
||||
Metrics() = default;
|
||||
Metrics(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int classNumStates);
|
||||
Metrics(const std::vector<std::vector<int>>& vsamples, const std::vector<int>& labels, const std::vector<std::string>& features, const std::string& className, const int classNumStates);
|
||||
std::vector<int> SelectKBestWeighted(const torch::Tensor& weights, bool ascending = false, unsigned k = 0);
|
||||
std::vector<double> getScoresKBest() const;
|
||||
double mutualInformation(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights);
|
||||
std::vector<float> conditionalEdgeWeights(std::vector<float>& weights); // To use in Python
|
||||
torch::Tensor conditionalEdge(const torch::Tensor& weights);
|
||||
std::vector<std::pair<int, int>> maximumSpanningTree(const std::vector<std::string>& features, const torch::Tensor& weights, const int root);
|
||||
};
|
||||
}
|
||||
#endif
|
@@ -1,13 +1,13 @@
|
||||
#include "Mst.h"
|
||||
#include <vector>
|
||||
#include <list>
|
||||
#include "Mst.h"
|
||||
/*
|
||||
Based on the code from https://www.softwaretestinghelp.com/minimum-spanning-tree-tutorial/
|
||||
|
||||
*/
|
||||
|
||||
namespace bayesnet {
|
||||
using namespace std;
|
||||
Graph::Graph(int V) : V(V), parent(vector<int>(V))
|
||||
Graph::Graph(int V) : V(V), parent(std::vector<int>(V))
|
||||
{
|
||||
for (int i = 0; i < V; i++)
|
||||
parent[i] = i;
|
||||
@@ -34,36 +34,45 @@ namespace bayesnet {
|
||||
void Graph::kruskal_algorithm()
|
||||
{
|
||||
// sort the edges ordered on decreasing weight
|
||||
sort(G.begin(), G.end(), [](const auto& left, const auto& right) {return left.first > right.first;});
|
||||
stable_sort(G.begin(), G.end(), [](const auto& left, const auto& right) {return left.first > right.first;});
|
||||
for (int i = 0; i < G.size(); i++) {
|
||||
int uSt, vEd;
|
||||
uSt = find_set(G[i].second.first);
|
||||
vEd = find_set(G[i].second.second);
|
||||
if (uSt != vEd) {
|
||||
T.push_back(G[i]); // add to mst vector
|
||||
T.push_back(G[i]); // add to mst std::vector
|
||||
union_set(uSt, vEd);
|
||||
}
|
||||
}
|
||||
}
|
||||
void Graph::display_mst()
|
||||
{
|
||||
cout << "Edge :" << " Weight" << endl;
|
||||
std::cout << "Edge :" << " Weight" << std::endl;
|
||||
for (int i = 0; i < T.size(); i++) {
|
||||
cout << T[i].second.first << " - " << T[i].second.second << " : "
|
||||
std::cout << T[i].second.first << " - " << T[i].second.second << " : "
|
||||
<< T[i].first;
|
||||
cout << endl;
|
||||
std::cout << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
vector<pair<int, int>> reorder(vector<pair<float, pair<int, int>>> T, int root_original)
|
||||
void insertElement(std::list<int>& variables, int variable)
|
||||
{
|
||||
auto result = vector<pair<int, int>>();
|
||||
auto visited = vector<int>();
|
||||
auto nextVariables = unordered_set<int>();
|
||||
nextVariables.emplace(root_original);
|
||||
if (std::find(variables.begin(), variables.end(), variable) == variables.end()) {
|
||||
variables.push_front(variable);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::pair<int, int>> reorder(std::vector<std::pair<float, std::pair<int, int>>> T, int root_original)
|
||||
{
|
||||
// Create the edges of a DAG from the MST
|
||||
// replacing unordered_set with list because unordered_set cannot guarantee the order of the elements inserted
|
||||
auto result = std::vector<std::pair<int, int>>();
|
||||
auto visited = std::vector<int>();
|
||||
auto nextVariables = std::list<int>();
|
||||
nextVariables.push_front(root_original);
|
||||
while (nextVariables.size() > 0) {
|
||||
int root = *nextVariables.begin();
|
||||
nextVariables.erase(nextVariables.begin());
|
||||
int root = nextVariables.front();
|
||||
nextVariables.pop_front();
|
||||
for (int i = 0; i < T.size(); ++i) {
|
||||
auto [weight, edge] = T[i];
|
||||
auto [from, to] = edge;
|
||||
@@ -71,10 +80,10 @@ namespace bayesnet {
|
||||
visited.insert(visited.begin(), i);
|
||||
if (from == root) {
|
||||
result.push_back({ from, to });
|
||||
nextVariables.emplace(to);
|
||||
insertElement(nextVariables, to);
|
||||
} else {
|
||||
result.push_back({ to, from });
|
||||
nextVariables.emplace(from);
|
||||
insertElement(nextVariables, from);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -94,12 +103,11 @@ namespace bayesnet {
|
||||
return result;
|
||||
}
|
||||
|
||||
MST::MST(const vector<string>& features, const Tensor& weights, const int root) : features(features), weights(weights), root(root) {}
|
||||
vector<pair<int, int>> MST::maximumSpanningTree()
|
||||
MST::MST(const std::vector<std::string>& features, const torch::Tensor& weights, const int root) : features(features), weights(weights), root(root) {}
|
||||
std::vector<std::pair<int, int>> MST::maximumSpanningTree()
|
||||
{
|
||||
auto num_features = features.size();
|
||||
Graph g(num_features);
|
||||
|
||||
// Make a complete graph
|
||||
for (int i = 0; i < num_features - 1; ++i) {
|
||||
for (int j = i + 1; j < num_features; ++j) {
|
33
bayesnet/utils/Mst.h
Normal file
33
bayesnet/utils/Mst.h
Normal file
@@ -0,0 +1,33 @@
|
||||
#ifndef MST_H
|
||||
#define MST_H
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <torch/torch.h>
|
||||
namespace bayesnet {
|
||||
class MST {
|
||||
private:
|
||||
torch::Tensor weights;
|
||||
std::vector<std::string> features;
|
||||
int root = 0;
|
||||
public:
|
||||
MST() = default;
|
||||
MST(const std::vector<std::string>& features, const torch::Tensor& weights, const int root);
|
||||
std::vector<std::pair<int, int>> maximumSpanningTree();
|
||||
};
|
||||
class Graph {
|
||||
private:
|
||||
int V; // number of nodes in graph
|
||||
std::vector <std::pair<float, std::pair<int, int>>> G; // std::vector for graph
|
||||
std::vector <std::pair<float, std::pair<int, int>>> T; // std::vector for mst
|
||||
std::vector<int> parent;
|
||||
public:
|
||||
explicit Graph(int V);
|
||||
void addEdge(int u, int v, float wt);
|
||||
int find_set(int i);
|
||||
void union_set(int u, int v);
|
||||
void kruskal_algorithm();
|
||||
void display_mst();
|
||||
std::vector <std::pair<float, std::pair<int, int>>> get_mst() { return T; }
|
||||
};
|
||||
}
|
||||
#endif
|
50
bayesnet/utils/bayesnetUtils.cc
Normal file
50
bayesnet/utils/bayesnetUtils.cc
Normal file
@@ -0,0 +1,50 @@
|
||||
|
||||
#include "bayesnetUtils.h"
|
||||
namespace bayesnet {
|
||||
// Return the indices in descending order
|
||||
std::vector<int> argsort(std::vector<double>& nums)
|
||||
{
|
||||
int n = nums.size();
|
||||
std::vector<int> indices(n);
|
||||
iota(indices.begin(), indices.end(), 0);
|
||||
sort(indices.begin(), indices.end(), [&nums](int i, int j) {return nums[i] > nums[j];});
|
||||
return indices;
|
||||
}
|
||||
std::vector<std::vector<int>> tensorToVector(torch::Tensor& dtensor)
|
||||
{
|
||||
// convert mxn tensor to nxm std::vector
|
||||
std::vector<std::vector<int>> result;
|
||||
// Iterate over cols
|
||||
for (int i = 0; i < dtensor.size(1); ++i) {
|
||||
auto col_tensor = dtensor.index({ "...", i });
|
||||
auto col = std::vector<int>(col_tensor.data_ptr<int>(), col_tensor.data_ptr<int>() + dtensor.size(0));
|
||||
result.push_back(col);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
std::vector<std::vector<double>> tensorToVectorDouble(torch::Tensor& dtensor)
|
||||
{
|
||||
// convert mxn tensor to mxn std::vector
|
||||
std::vector<std::vector<double>> result;
|
||||
// Iterate over cols
|
||||
for (int i = 0; i < dtensor.size(0); ++i) {
|
||||
auto col_tensor = dtensor.index({ i, "..." });
|
||||
auto col = std::vector<double>(col_tensor.data_ptr<float>(), col_tensor.data_ptr<float>() + dtensor.size(1));
|
||||
result.push_back(col);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
torch::Tensor vectorToTensor(std::vector<std::vector<int>>& vector, bool transpose)
|
||||
{
|
||||
// convert nxm std::vector to mxn tensor if transpose
|
||||
long int m = transpose ? vector[0].size() : vector.size();
|
||||
long int n = transpose ? vector.size() : vector[0].size();
|
||||
auto tensor = torch::zeros({ m, n }, torch::kInt32);
|
||||
for (int i = 0; i < m; ++i) {
|
||||
for (int j = 0; j < n; ++j) {
|
||||
tensor[i][j] = transpose ? vector[j][i] : vector[i][j];
|
||||
}
|
||||
}
|
||||
return tensor;
|
||||
}
|
||||
}
|
11
bayesnet/utils/bayesnetUtils.h
Normal file
11
bayesnet/utils/bayesnetUtils.h
Normal file
@@ -0,0 +1,11 @@
|
||||
#ifndef BAYESNET_UTILS_H
|
||||
#define BAYESNET_UTILS_H
|
||||
#include <vector>
|
||||
#include <torch/torch.h>
|
||||
namespace bayesnet {
|
||||
std::vector<int> argsort(std::vector<double>& nums);
|
||||
std::vector<std::vector<int>> tensorToVector(torch::Tensor& dtensor);
|
||||
std::vector<std::vector<double>> tensorToVectorDouble(torch::Tensor& dtensor);
|
||||
torch::Tensor vectorToTensor(std::vector<std::vector<int>>& vector, bool transpose = true);
|
||||
}
|
||||
#endif //BAYESNET_UTILS_H
|
@@ -1,4 +1,4 @@
|
||||
configure_file(
|
||||
"config.h.in"
|
||||
"${CMAKE_BINARY_DIR}/configured_files/include/config.h" ESCAPE_QUOTES
|
||||
"${CMAKE_BINARY_DIR}/configured_files/include/bayesnet/config.h" ESCAPE_QUOTES
|
||||
)
|
||||
|
@@ -7,7 +7,8 @@
|
||||
#define PROJECT_VERSION_MINOR @PROJECT_VERSION_MINOR @
|
||||
#define PROJECT_VERSION_PATCH @PROJECT_VERSION_PATCH @
|
||||
|
||||
static constexpr std::string_view project_name = " @PROJECT_NAME@ ";
|
||||
static constexpr std::string_view project_name = "@PROJECT_NAME@";
|
||||
static constexpr std::string_view project_version = "@PROJECT_VERSION@";
|
||||
static constexpr std::string_view project_description = "@PROJECT_DESCRIPTION@";
|
||||
static constexpr std::string_view git_sha = "@GIT_SHA@";
|
||||
static constexpr std::string_view data_path = "@BayesNet_SOURCE_DIR@/tests/data/";
|
@@ -1 +0,0 @@
|
||||
null
|
@@ -1,25 +0,0 @@
|
||||
Type Si
|
||||
Type Fe
|
||||
Type RI
|
||||
Type Na
|
||||
Type Ba
|
||||
Type Ca
|
||||
Type Al
|
||||
Type K
|
||||
Type Mg
|
||||
Fe RI
|
||||
Fe Ba
|
||||
Fe Ca
|
||||
RI Na
|
||||
RI Ba
|
||||
RI Ca
|
||||
RI Al
|
||||
RI K
|
||||
RI Mg
|
||||
Ba Ca
|
||||
Ba Al
|
||||
Ca Al
|
||||
Ca K
|
||||
Ca Mg
|
||||
Al K
|
||||
K Mg
|
@@ -1,645 +0,0 @@
|
||||
class att215
|
||||
class att25
|
||||
class att131
|
||||
class att95
|
||||
class att122
|
||||
class att17
|
||||
class att28
|
||||
class att5
|
||||
class att121
|
||||
class att214
|
||||
class att197
|
||||
class att116
|
||||
class att182
|
||||
class att60
|
||||
class att168
|
||||
class att178
|
||||
class att206
|
||||
class att89
|
||||
class att77
|
||||
class att209
|
||||
class att73
|
||||
class att126
|
||||
class att16
|
||||
class att74
|
||||
class att27
|
||||
class att61
|
||||
class att20
|
||||
class att101
|
||||
class att85
|
||||
class att76
|
||||
class att137
|
||||
class att211
|
||||
class att143
|
||||
class att14
|
||||
class att40
|
||||
class att210
|
||||
class att155
|
||||
class att170
|
||||
class att160
|
||||
class att23
|
||||
class att162
|
||||
class att203
|
||||
class att164
|
||||
class att107
|
||||
class att62
|
||||
class att42
|
||||
class att71
|
||||
class att128
|
||||
class att138
|
||||
class att83
|
||||
class att171
|
||||
class att92
|
||||
class att163
|
||||
class att49
|
||||
class att161
|
||||
class att158
|
||||
class att176
|
||||
class att11
|
||||
class att145
|
||||
class att4
|
||||
class att172
|
||||
class att196
|
||||
class att58
|
||||
class att68
|
||||
class att169
|
||||
class att80
|
||||
class att32
|
||||
class att175
|
||||
class att87
|
||||
class att88
|
||||
class att159
|
||||
class att18
|
||||
class att52
|
||||
class att98
|
||||
class att136
|
||||
class att150
|
||||
class att156
|
||||
class att110
|
||||
class att100
|
||||
class att63
|
||||
class att148
|
||||
class att90
|
||||
class att167
|
||||
class att35
|
||||
class att205
|
||||
class att51
|
||||
class att21
|
||||
class att142
|
||||
class att46
|
||||
class att134
|
||||
class att39
|
||||
class att102
|
||||
class att208
|
||||
class att130
|
||||
class att149
|
||||
class att96
|
||||
class att75
|
||||
class att118
|
||||
class att78
|
||||
class att213
|
||||
class att112
|
||||
class att38
|
||||
class att174
|
||||
class att189
|
||||
class att70
|
||||
class att179
|
||||
class att59
|
||||
class att79
|
||||
class att15
|
||||
class att47
|
||||
class att124
|
||||
class att34
|
||||
class att54
|
||||
class att191
|
||||
class att86
|
||||
class att56
|
||||
class att151
|
||||
class att66
|
||||
class att173
|
||||
class att44
|
||||
class att198
|
||||
class att139
|
||||
class att216
|
||||
class att129
|
||||
class att152
|
||||
class att69
|
||||
class att81
|
||||
class att50
|
||||
class att153
|
||||
class att41
|
||||
class att204
|
||||
class att188
|
||||
class att26
|
||||
class att13
|
||||
class att117
|
||||
class att114
|
||||
class att10
|
||||
class att64
|
||||
class att200
|
||||
class att9
|
||||
class att3
|
||||
class att119
|
||||
class att45
|
||||
class att104
|
||||
class att140
|
||||
class att30
|
||||
class att183
|
||||
class att146
|
||||
class att141
|
||||
class att202
|
||||
class att194
|
||||
class att24
|
||||
class att147
|
||||
class att8
|
||||
class att212
|
||||
class att123
|
||||
class att166
|
||||
class att187
|
||||
class att127
|
||||
class att190
|
||||
class att105
|
||||
class att106
|
||||
class att184
|
||||
class att82
|
||||
class att2
|
||||
class att135
|
||||
class att154
|
||||
class att111
|
||||
class att115
|
||||
class att99
|
||||
class att22
|
||||
class att84
|
||||
class att207
|
||||
class att94
|
||||
class att177
|
||||
class att103
|
||||
class att93
|
||||
class att201
|
||||
class att43
|
||||
class att36
|
||||
class att12
|
||||
class att125
|
||||
class att165
|
||||
class att180
|
||||
class att195
|
||||
class att157
|
||||
class att48
|
||||
class att6
|
||||
class att113
|
||||
class att193
|
||||
class att91
|
||||
class att72
|
||||
class att31
|
||||
class att132
|
||||
class att33
|
||||
class att57
|
||||
class att144
|
||||
class att192
|
||||
class att185
|
||||
class att37
|
||||
class att53
|
||||
class att120
|
||||
class att186
|
||||
class att199
|
||||
class att65
|
||||
class att108
|
||||
class att133
|
||||
class att29
|
||||
class att19
|
||||
class att7
|
||||
class att97
|
||||
class att67
|
||||
class att55
|
||||
class att1
|
||||
class att109
|
||||
class att181
|
||||
att215 att25
|
||||
att215 att131
|
||||
att215 att95
|
||||
att25 att131
|
||||
att25 att121
|
||||
att25 att73
|
||||
att25 att61
|
||||
att25 att85
|
||||
att25 att169
|
||||
att25 att13
|
||||
att131 att95
|
||||
att131 att122
|
||||
att131 att17
|
||||
att131 att28
|
||||
att131 att121
|
||||
att131 att214
|
||||
att131 att116
|
||||
att131 att126
|
||||
att131 att143
|
||||
att95 att122
|
||||
att95 att17
|
||||
att95 att28
|
||||
att95 att5
|
||||
att95 att214
|
||||
att95 att116
|
||||
att95 att60
|
||||
att95 att143
|
||||
att95 att155
|
||||
att95 att71
|
||||
att122 att182
|
||||
att122 att170
|
||||
att17 att5
|
||||
att17 att197
|
||||
att17 att89
|
||||
att17 att77
|
||||
att17 att161
|
||||
att28 att206
|
||||
att28 att16
|
||||
att28 att76
|
||||
att28 att172
|
||||
att28 att124
|
||||
att28 att64
|
||||
att5 att197
|
||||
att5 att89
|
||||
att5 att209
|
||||
att121 att73
|
||||
att214 att178
|
||||
att214 att58
|
||||
att214 att142
|
||||
att197 att209
|
||||
att197 att101
|
||||
att116 att182
|
||||
att116 att60
|
||||
att116 att168
|
||||
att116 att178
|
||||
att116 att206
|
||||
att116 att126
|
||||
att116 att16
|
||||
att116 att27
|
||||
att116 att20
|
||||
att116 att211
|
||||
att116 att164
|
||||
att116 att128
|
||||
att182 att27
|
||||
att182 att14
|
||||
att60 att168
|
||||
att60 att156
|
||||
att168 att156
|
||||
att168 att96
|
||||
att178 att20
|
||||
att178 att58
|
||||
att178 att142
|
||||
att178 att130
|
||||
att206 att74
|
||||
att206 att170
|
||||
att206 att158
|
||||
att89 att77
|
||||
att89 att137
|
||||
att89 att149
|
||||
att89 att173
|
||||
att77 att137
|
||||
att77 att161
|
||||
att209 att101
|
||||
att209 att41
|
||||
att73 att61
|
||||
att73 att157
|
||||
att126 att162
|
||||
att126 att138
|
||||
att126 att150
|
||||
att16 att74
|
||||
att16 att76
|
||||
att16 att40
|
||||
att16 att4
|
||||
att74 att14
|
||||
att74 att62
|
||||
att27 att171
|
||||
att61 att85
|
||||
att61 att169
|
||||
att20 att211
|
||||
att20 att210
|
||||
att20 att164
|
||||
att20 att176
|
||||
att101 att41
|
||||
att85 att13
|
||||
att76 att40
|
||||
att76 att160
|
||||
att137 att149
|
||||
att211 att210
|
||||
att211 att162
|
||||
att211 att171
|
||||
att211 att163
|
||||
att211 att175
|
||||
att211 att79
|
||||
att143 att155
|
||||
att143 att23
|
||||
att143 att71
|
||||
att143 att83
|
||||
att143 att11
|
||||
att14 att98
|
||||
att40 att160
|
||||
att40 att4
|
||||
att40 att196
|
||||
att40 att52
|
||||
att210 att42
|
||||
att210 att114
|
||||
att155 att23
|
||||
att155 att203
|
||||
att155 att107
|
||||
att155 att11
|
||||
att170 att158
|
||||
att160 att52
|
||||
att23 att203
|
||||
att162 att138
|
||||
att162 att18
|
||||
att162 att150
|
||||
att162 att90
|
||||
att162 att174
|
||||
att203 att107
|
||||
att203 att49
|
||||
att203 att59
|
||||
att203 att191
|
||||
att203 att119
|
||||
att164 att62
|
||||
att164 att42
|
||||
att164 att128
|
||||
att164 att92
|
||||
att164 att163
|
||||
att164 att176
|
||||
att164 att145
|
||||
att164 att68
|
||||
att164 att80
|
||||
att164 att98
|
||||
att164 att110
|
||||
att164 att205
|
||||
att164 att21
|
||||
att164 att213
|
||||
att164 att112
|
||||
att164 att38
|
||||
att164 att56
|
||||
att164 att44
|
||||
att107 att59
|
||||
att107 att47
|
||||
att107 att191
|
||||
att71 att83
|
||||
att71 att167
|
||||
att71 att35
|
||||
att128 att92
|
||||
att138 att18
|
||||
att83 att167
|
||||
att171 att87
|
||||
att171 att159
|
||||
att171 att63
|
||||
att171 att51
|
||||
att171 att39
|
||||
att171 att75
|
||||
att163 att49
|
||||
att163 att175
|
||||
att163 att87
|
||||
att163 att79
|
||||
att163 att151
|
||||
att163 att139
|
||||
att163 att187
|
||||
att163 att91
|
||||
att161 att173
|
||||
att176 att145
|
||||
att176 att172
|
||||
att176 att68
|
||||
att176 att80
|
||||
att176 att32
|
||||
att176 att110
|
||||
att176 att205
|
||||
att176 att21
|
||||
att176 att134
|
||||
att176 att56
|
||||
att4 att196
|
||||
att4 att88
|
||||
att4 att136
|
||||
att4 att100
|
||||
att4 att148
|
||||
att4 att208
|
||||
att172 att112
|
||||
att172 att184
|
||||
att196 att88
|
||||
att196 att136
|
||||
att196 att100
|
||||
att196 att208
|
||||
att58 att46
|
||||
att68 att32
|
||||
att32 att200
|
||||
att87 att159
|
||||
att87 att63
|
||||
att87 att75
|
||||
att87 att15
|
||||
att87 att99
|
||||
att159 att195
|
||||
att18 att90
|
||||
att18 att102
|
||||
att18 att78
|
||||
att18 att198
|
||||
att52 att124
|
||||
att98 att86
|
||||
att150 att174
|
||||
att150 att66
|
||||
att156 att96
|
||||
att156 att216
|
||||
att156 att204
|
||||
att156 att24
|
||||
att156 att84
|
||||
att100 att148
|
||||
att63 att51
|
||||
att63 att3
|
||||
att63 att183
|
||||
att90 att102
|
||||
att90 att78
|
||||
att167 att35
|
||||
att167 att179
|
||||
att35 att179
|
||||
att51 att39
|
||||
att51 att3
|
||||
att21 att134
|
||||
att21 att213
|
||||
att21 att38
|
||||
att21 att189
|
||||
att21 att129
|
||||
att21 att81
|
||||
att21 att117
|
||||
att21 att9
|
||||
att142 att46
|
||||
att142 att130
|
||||
att142 att118
|
||||
att142 att10
|
||||
att142 att202
|
||||
att142 att190
|
||||
att142 att106
|
||||
att46 att70
|
||||
att46 att34
|
||||
att46 att166
|
||||
att134 att2
|
||||
att102 att54
|
||||
att130 att118
|
||||
att130 att10
|
||||
att130 att202
|
||||
att149 att125
|
||||
att96 att216
|
||||
att96 att24
|
||||
att75 att15
|
||||
att75 att99
|
||||
att118 att70
|
||||
att78 att198
|
||||
att213 att189
|
||||
att38 att50
|
||||
att38 att26
|
||||
att174 att54
|
||||
att174 att66
|
||||
att174 att30
|
||||
att189 att86
|
||||
att189 att129
|
||||
att189 att69
|
||||
att189 att81
|
||||
att189 att153
|
||||
att189 att117
|
||||
att189 att9
|
||||
att189 att45
|
||||
att189 att105
|
||||
att70 att34
|
||||
att59 att47
|
||||
att79 att151
|
||||
att79 att139
|
||||
att79 att187
|
||||
att79 att127
|
||||
att79 att103
|
||||
att79 att43
|
||||
att79 att91
|
||||
att79 att19
|
||||
att124 att64
|
||||
att54 att114
|
||||
att54 att30
|
||||
att191 att119
|
||||
att86 att194
|
||||
att56 att44
|
||||
att56 att152
|
||||
att56 att50
|
||||
att56 att188
|
||||
att56 att26
|
||||
att56 att104
|
||||
att56 att140
|
||||
att56 att146
|
||||
att56 att194
|
||||
att56 att8
|
||||
att56 att2
|
||||
att56 att133
|
||||
att56 att1
|
||||
att173 att125
|
||||
att173 att113
|
||||
att44 att152
|
||||
att44 att188
|
||||
att44 att200
|
||||
att44 att212
|
||||
att44 att1
|
||||
att139 att103
|
||||
att139 att43
|
||||
att139 att31
|
||||
att139 att199
|
||||
att139 att7
|
||||
att216 att204
|
||||
att216 att36
|
||||
att216 att12
|
||||
att216 att180
|
||||
att216 att108
|
||||
att129 att69
|
||||
att152 att140
|
||||
att69 att153
|
||||
att81 att45
|
||||
att153 att141
|
||||
att41 att53
|
||||
att204 att12
|
||||
att13 att157
|
||||
att114 att6
|
||||
att114 att186
|
||||
att10 att190
|
||||
att64 att184
|
||||
att200 att104
|
||||
att9 att146
|
||||
att9 att141
|
||||
att9 att177
|
||||
att9 att37
|
||||
att9 att133
|
||||
att9 att109
|
||||
att9 att181
|
||||
att3 att183
|
||||
att3 att147
|
||||
att3 att123
|
||||
att3 att135
|
||||
att3 att111
|
||||
att45 att105
|
||||
att45 att177
|
||||
att45 att93
|
||||
att45 att201
|
||||
att45 att193
|
||||
att45 att37
|
||||
att45 att97
|
||||
att140 att8
|
||||
att30 att6
|
||||
att183 att147
|
||||
att183 att123
|
||||
att202 att166
|
||||
att202 att106
|
||||
att202 att82
|
||||
att24 att84
|
||||
att24 att36
|
||||
att147 att135
|
||||
att8 att212
|
||||
att166 att82
|
||||
att187 att127
|
||||
att187 att115
|
||||
att127 att115
|
||||
att105 att93
|
||||
att106 att154
|
||||
att82 att154
|
||||
att82 att22
|
||||
att135 att111
|
||||
att135 att207
|
||||
att154 att22
|
||||
att154 att94
|
||||
att111 att207
|
||||
att22 att94
|
||||
att84 att48
|
||||
att177 att165
|
||||
att103 att195
|
||||
att103 att109
|
||||
att93 att201
|
||||
att93 att165
|
||||
att93 att193
|
||||
att93 att33
|
||||
att201 att33
|
||||
att201 att57
|
||||
att36 att180
|
||||
att36 att72
|
||||
att36 att132
|
||||
att36 att144
|
||||
att125 att113
|
||||
att125 att185
|
||||
att125 att65
|
||||
att125 att29
|
||||
att180 att48
|
||||
att180 att72
|
||||
att180 att192
|
||||
att180 att108
|
||||
att6 att186
|
||||
att113 att185
|
||||
att113 att53
|
||||
att193 att97
|
||||
att91 att31
|
||||
att91 att19
|
||||
att72 att132
|
||||
att72 att192
|
||||
att31 att199
|
||||
att31 att67
|
||||
att132 att144
|
||||
att132 att120
|
||||
att33 att57
|
||||
att144 att120
|
||||
att185 att65
|
||||
att199 att7
|
||||
att199 att67
|
||||
att199 att55
|
||||
att65 att29
|
||||
att67 att55
|
||||
att109 att181
|
@@ -1,859 +0,0 @@
|
||||
class att215
|
||||
class att25
|
||||
class att131
|
||||
class att95
|
||||
class att122
|
||||
class att17
|
||||
class att28
|
||||
class att5
|
||||
class att121
|
||||
class att214
|
||||
class att197
|
||||
class att116
|
||||
class att182
|
||||
class att60
|
||||
class att168
|
||||
class att178
|
||||
class att206
|
||||
class att89
|
||||
class att77
|
||||
class att209
|
||||
class att73
|
||||
class att126
|
||||
class att16
|
||||
class att74
|
||||
class att27
|
||||
class att61
|
||||
class att20
|
||||
class att101
|
||||
class att85
|
||||
class att76
|
||||
class att137
|
||||
class att211
|
||||
class att143
|
||||
class att14
|
||||
class att40
|
||||
class att210
|
||||
class att155
|
||||
class att170
|
||||
class att160
|
||||
class att23
|
||||
class att162
|
||||
class att203
|
||||
class att164
|
||||
class att107
|
||||
class att62
|
||||
class att42
|
||||
class att71
|
||||
class att128
|
||||
class att138
|
||||
class att83
|
||||
class att171
|
||||
class att92
|
||||
class att163
|
||||
class att49
|
||||
class att161
|
||||
class att158
|
||||
class att176
|
||||
class att11
|
||||
class att145
|
||||
class att4
|
||||
class att172
|
||||
class att196
|
||||
class att58
|
||||
class att68
|
||||
class att169
|
||||
class att80
|
||||
class att32
|
||||
class att175
|
||||
class att87
|
||||
class att88
|
||||
class att159
|
||||
class att18
|
||||
class att52
|
||||
class att98
|
||||
class att136
|
||||
class att150
|
||||
class att156
|
||||
class att110
|
||||
class att100
|
||||
class att63
|
||||
class att148
|
||||
class att90
|
||||
class att167
|
||||
class att35
|
||||
class att205
|
||||
class att51
|
||||
class att21
|
||||
class att142
|
||||
class att46
|
||||
class att134
|
||||
class att39
|
||||
class att102
|
||||
class att208
|
||||
class att130
|
||||
class att149
|
||||
class att96
|
||||
class att75
|
||||
class att118
|
||||
class att78
|
||||
class att213
|
||||
class att112
|
||||
class att38
|
||||
class att174
|
||||
class att189
|
||||
class att70
|
||||
class att179
|
||||
class att59
|
||||
class att79
|
||||
class att15
|
||||
class att47
|
||||
class att124
|
||||
class att34
|
||||
class att54
|
||||
class att191
|
||||
class att86
|
||||
class att56
|
||||
class att151
|
||||
class att66
|
||||
class att173
|
||||
class att44
|
||||
class att198
|
||||
class att139
|
||||
class att216
|
||||
class att129
|
||||
class att152
|
||||
class att69
|
||||
class att81
|
||||
class att50
|
||||
class att153
|
||||
class att41
|
||||
class att204
|
||||
class att188
|
||||
class att26
|
||||
class att13
|
||||
class att117
|
||||
class att114
|
||||
class att10
|
||||
class att64
|
||||
class att200
|
||||
class att9
|
||||
class att3
|
||||
class att119
|
||||
class att45
|
||||
class att104
|
||||
class att140
|
||||
class att30
|
||||
class att183
|
||||
class att146
|
||||
class att141
|
||||
class att202
|
||||
class att194
|
||||
class att24
|
||||
class att147
|
||||
class att8
|
||||
class att212
|
||||
class att123
|
||||
class att166
|
||||
class att187
|
||||
class att127
|
||||
class att190
|
||||
class att105
|
||||
class att106
|
||||
class att184
|
||||
class att82
|
||||
class att2
|
||||
class att135
|
||||
class att154
|
||||
class att111
|
||||
class att115
|
||||
class att99
|
||||
class att22
|
||||
class att84
|
||||
class att207
|
||||
class att94
|
||||
class att177
|
||||
class att103
|
||||
class att93
|
||||
class att201
|
||||
class att43
|
||||
class att36
|
||||
class att12
|
||||
class att125
|
||||
class att165
|
||||
class att180
|
||||
class att195
|
||||
class att157
|
||||
class att48
|
||||
class att6
|
||||
class att113
|
||||
class att193
|
||||
class att91
|
||||
class att72
|
||||
class att31
|
||||
class att132
|
||||
class att33
|
||||
class att57
|
||||
class att144
|
||||
class att192
|
||||
class att185
|
||||
class att37
|
||||
class att53
|
||||
class att120
|
||||
class att186
|
||||
class att199
|
||||
class att65
|
||||
class att108
|
||||
class att133
|
||||
class att29
|
||||
class att19
|
||||
class att7
|
||||
class att97
|
||||
class att67
|
||||
class att55
|
||||
class att1
|
||||
class att109
|
||||
class att181
|
||||
att215 att25
|
||||
att215 att131
|
||||
att215 att95
|
||||
att215 att17
|
||||
att215 att214
|
||||
att215 att143
|
||||
att25 att131
|
||||
att25 att95
|
||||
att25 att122
|
||||
att25 att121
|
||||
att25 att73
|
||||
att25 att61
|
||||
att25 att85
|
||||
att25 att169
|
||||
att25 att13
|
||||
att25 att157
|
||||
att131 att95
|
||||
att131 att122
|
||||
att131 att17
|
||||
att131 att28
|
||||
att131 att5
|
||||
att131 att121
|
||||
att131 att214
|
||||
att131 att116
|
||||
att131 att182
|
||||
att131 att60
|
||||
att131 att126
|
||||
att131 att16
|
||||
att131 att27
|
||||
att131 att20
|
||||
att131 att143
|
||||
att131 att155
|
||||
att95 att122
|
||||
att95 att17
|
||||
att95 att28
|
||||
att95 att5
|
||||
att95 att121
|
||||
att95 att214
|
||||
att95 att197
|
||||
att95 att116
|
||||
att95 att60
|
||||
att95 att168
|
||||
att95 att178
|
||||
att95 att143
|
||||
att95 att155
|
||||
att95 att23
|
||||
att95 att71
|
||||
att95 att167
|
||||
att122 att28
|
||||
att122 att182
|
||||
att122 att170
|
||||
att17 att5
|
||||
att17 att197
|
||||
att17 att89
|
||||
att17 att77
|
||||
att17 att209
|
||||
att17 att137
|
||||
att17 att161
|
||||
att17 att41
|
||||
att28 att206
|
||||
att28 att16
|
||||
att28 att76
|
||||
att28 att40
|
||||
att28 att210
|
||||
att28 att160
|
||||
att28 att172
|
||||
att28 att124
|
||||
att28 att64
|
||||
att5 att197
|
||||
att5 att89
|
||||
att5 att77
|
||||
att5 att209
|
||||
att5 att101
|
||||
att121 att73
|
||||
att121 att61
|
||||
att214 att116
|
||||
att214 att178
|
||||
att214 att206
|
||||
att214 att58
|
||||
att214 att142
|
||||
att214 att46
|
||||
att197 att89
|
||||
att197 att209
|
||||
att197 att101
|
||||
att116 att182
|
||||
att116 att60
|
||||
att116 att168
|
||||
att116 att178
|
||||
att116 att206
|
||||
att116 att73
|
||||
att116 att126
|
||||
att116 att16
|
||||
att116 att74
|
||||
att116 att27
|
||||
att116 att20
|
||||
att116 att211
|
||||
att116 att164
|
||||
att116 att128
|
||||
att116 att92
|
||||
att116 att176
|
||||
att116 att68
|
||||
att182 att27
|
||||
att182 att14
|
||||
att60 att168
|
||||
att60 att156
|
||||
att60 att96
|
||||
att168 att126
|
||||
att168 att156
|
||||
att168 att96
|
||||
att168 att216
|
||||
att178 att20
|
||||
att178 att211
|
||||
att178 att58
|
||||
att178 att142
|
||||
att178 att130
|
||||
att178 att166
|
||||
att206 att74
|
||||
att206 att170
|
||||
att206 att158
|
||||
att89 att77
|
||||
att89 att137
|
||||
att89 att149
|
||||
att89 att173
|
||||
att77 att137
|
||||
att77 att161
|
||||
att77 att149
|
||||
att209 att101
|
||||
att209 att41
|
||||
att73 att61
|
||||
att73 att85
|
||||
att73 att13
|
||||
att73 att157
|
||||
att126 att162
|
||||
att126 att138
|
||||
att126 att18
|
||||
att126 att150
|
||||
att16 att74
|
||||
att16 att76
|
||||
att16 att40
|
||||
att16 att4
|
||||
att16 att196
|
||||
att16 att136
|
||||
att74 att14
|
||||
att74 att62
|
||||
att27 att171
|
||||
att27 att63
|
||||
att61 att85
|
||||
att61 att169
|
||||
att20 att76
|
||||
att20 att211
|
||||
att20 att210
|
||||
att20 att170
|
||||
att20 att164
|
||||
att20 att128
|
||||
att20 att176
|
||||
att20 att80
|
||||
att101 att41
|
||||
att85 att169
|
||||
att85 att13
|
||||
att76 att14
|
||||
att76 att40
|
||||
att76 att160
|
||||
att76 att4
|
||||
att76 att52
|
||||
att137 att161
|
||||
att137 att149
|
||||
att137 att173
|
||||
att137 att125
|
||||
att211 att210
|
||||
att211 att162
|
||||
att211 att164
|
||||
att211 att62
|
||||
att211 att42
|
||||
att211 att171
|
||||
att211 att163
|
||||
att211 att175
|
||||
att211 att79
|
||||
att211 att151
|
||||
att211 att43
|
||||
att143 att155
|
||||
att143 att23
|
||||
att143 att203
|
||||
att143 att71
|
||||
att143 att83
|
||||
att143 att11
|
||||
att14 att98
|
||||
att40 att160
|
||||
att40 att4
|
||||
att40 att196
|
||||
att40 att88
|
||||
att40 att52
|
||||
att210 att162
|
||||
att210 att42
|
||||
att210 att114
|
||||
att155 att23
|
||||
att155 att203
|
||||
att155 att107
|
||||
att155 att11
|
||||
att170 att158
|
||||
att160 att52
|
||||
att160 att124
|
||||
att23 att203
|
||||
att23 att107
|
||||
att23 att71
|
||||
att23 att11
|
||||
att162 att138
|
||||
att162 att18
|
||||
att162 att150
|
||||
att162 att90
|
||||
att162 att102
|
||||
att162 att174
|
||||
att162 att66
|
||||
att203 att107
|
||||
att203 att49
|
||||
att203 att59
|
||||
att203 att47
|
||||
att203 att191
|
||||
att203 att119
|
||||
att164 att62
|
||||
att164 att42
|
||||
att164 att128
|
||||
att164 att171
|
||||
att164 att92
|
||||
att164 att163
|
||||
att164 att158
|
||||
att164 att176
|
||||
att164 att145
|
||||
att164 att172
|
||||
att164 att58
|
||||
att164 att68
|
||||
att164 att80
|
||||
att164 att32
|
||||
att164 att98
|
||||
att164 att156
|
||||
att164 att110
|
||||
att164 att205
|
||||
att164 att21
|
||||
att164 att134
|
||||
att164 att213
|
||||
att164 att112
|
||||
att164 att38
|
||||
att164 att189
|
||||
att164 att56
|
||||
att164 att44
|
||||
att164 att152
|
||||
att164 att8
|
||||
att107 att83
|
||||
att107 att49
|
||||
att107 att59
|
||||
att107 att47
|
||||
att107 att191
|
||||
att42 att138
|
||||
att42 att54
|
||||
att42 att114
|
||||
att71 att83
|
||||
att71 att167
|
||||
att71 att35
|
||||
att71 att179
|
||||
att128 att92
|
||||
att128 att112
|
||||
att138 att18
|
||||
att138 att150
|
||||
att83 att167
|
||||
att83 att35
|
||||
att171 att87
|
||||
att171 att159
|
||||
att171 att63
|
||||
att171 att51
|
||||
att171 att39
|
||||
att171 att75
|
||||
att92 att163
|
||||
att92 att145
|
||||
att92 att56
|
||||
att163 att49
|
||||
att163 att175
|
||||
att163 att87
|
||||
att163 att79
|
||||
att163 att151
|
||||
att163 att139
|
||||
att163 att187
|
||||
att163 att127
|
||||
att163 att103
|
||||
att163 att91
|
||||
att49 att37
|
||||
att161 att173
|
||||
att161 att113
|
||||
att176 att145
|
||||
att176 att172
|
||||
att176 att68
|
||||
att176 att80
|
||||
att176 att32
|
||||
att176 att175
|
||||
att176 att98
|
||||
att176 att110
|
||||
att176 att205
|
||||
att176 att21
|
||||
att176 att134
|
||||
att176 att213
|
||||
att176 att56
|
||||
att4 att196
|
||||
att4 att88
|
||||
att4 att136
|
||||
att4 att100
|
||||
att4 att148
|
||||
att4 att208
|
||||
att172 att112
|
||||
att172 att184
|
||||
att196 att88
|
||||
att196 att136
|
||||
att196 att100
|
||||
att196 att148
|
||||
att196 att208
|
||||
att58 att142
|
||||
att58 att46
|
||||
att58 att34
|
||||
att68 att32
|
||||
att80 att38
|
||||
att32 att110
|
||||
att32 att21
|
||||
att32 att44
|
||||
att32 att200
|
||||
att175 att87
|
||||
att175 att159
|
||||
att175 att79
|
||||
att175 att187
|
||||
att175 att115
|
||||
att87 att159
|
||||
att87 att63
|
||||
att87 att51
|
||||
att87 att75
|
||||
att87 att15
|
||||
att87 att99
|
||||
att159 att75
|
||||
att159 att15
|
||||
att159 att195
|
||||
att18 att90
|
||||
att18 att102
|
||||
att18 att78
|
||||
att18 att198
|
||||
att52 att124
|
||||
att52 att64
|
||||
att98 att86
|
||||
att136 att100
|
||||
att136 att208
|
||||
att150 att90
|
||||
att150 att174
|
||||
att150 att66
|
||||
att156 att205
|
||||
att156 att96
|
||||
att156 att216
|
||||
att156 att204
|
||||
att156 att24
|
||||
att156 att84
|
||||
att156 att36
|
||||
att156 att12
|
||||
att156 att108
|
||||
att100 att148
|
||||
att63 att51
|
||||
att63 att39
|
||||
att63 att3
|
||||
att63 att183
|
||||
att63 att147
|
||||
att90 att102
|
||||
att90 att78
|
||||
att167 att35
|
||||
att167 att179
|
||||
att35 att179
|
||||
att51 att39
|
||||
att51 att3
|
||||
att51 att183
|
||||
att21 att134
|
||||
att21 att213
|
||||
att21 att38
|
||||
att21 att189
|
||||
att21 att129
|
||||
att21 att81
|
||||
att21 att153
|
||||
att21 att117
|
||||
att21 att9
|
||||
att142 att46
|
||||
att142 att130
|
||||
att142 att118
|
||||
att142 att70
|
||||
att142 att10
|
||||
att142 att202
|
||||
att142 att190
|
||||
att142 att106
|
||||
att46 att130
|
||||
att46 att118
|
||||
att46 att70
|
||||
att46 att34
|
||||
att46 att166
|
||||
att46 att82
|
||||
att134 att2
|
||||
att39 att3
|
||||
att102 att78
|
||||
att102 att174
|
||||
att102 att54
|
||||
att102 att198
|
||||
att130 att118
|
||||
att130 att10
|
||||
att130 att202
|
||||
att130 att190
|
||||
att130 att106
|
||||
att149 att125
|
||||
att96 att216
|
||||
att96 att204
|
||||
att96 att24
|
||||
att75 att15
|
||||
att75 att99
|
||||
att118 att70
|
||||
att118 att10
|
||||
att118 att202
|
||||
att78 att198
|
||||
att213 att189
|
||||
att213 att129
|
||||
att213 att69
|
||||
att213 att81
|
||||
att38 att50
|
||||
att38 att26
|
||||
att174 att54
|
||||
att174 att66
|
||||
att174 att30
|
||||
att189 att86
|
||||
att189 att129
|
||||
att189 att69
|
||||
att189 att81
|
||||
att189 att153
|
||||
att189 att117
|
||||
att189 att9
|
||||
att189 att45
|
||||
att189 att141
|
||||
att189 att105
|
||||
att70 att34
|
||||
att70 att154
|
||||
att179 att59
|
||||
att59 att47
|
||||
att59 att191
|
||||
att59 att119
|
||||
att79 att86
|
||||
att79 att151
|
||||
att79 att139
|
||||
att79 att187
|
||||
att79 att127
|
||||
att79 att103
|
||||
att79 att43
|
||||
att79 att193
|
||||
att79 att91
|
||||
att79 att19
|
||||
att124 att64
|
||||
att54 att114
|
||||
att54 att30
|
||||
att54 att6
|
||||
att191 att119
|
||||
att86 att194
|
||||
att56 att44
|
||||
att56 att152
|
||||
att56 att50
|
||||
att56 att188
|
||||
att56 att26
|
||||
att56 att200
|
||||
att56 att104
|
||||
att56 att140
|
||||
att56 att146
|
||||
att56 att194
|
||||
att56 att8
|
||||
att56 att2
|
||||
att56 att133
|
||||
att56 att1
|
||||
att151 att139
|
||||
att66 att30
|
||||
att173 att125
|
||||
att173 att113
|
||||
att173 att185
|
||||
att44 att152
|
||||
att44 att50
|
||||
att44 att188
|
||||
att44 att200
|
||||
att44 att104
|
||||
att44 att140
|
||||
att44 att194
|
||||
att44 att212
|
||||
att44 att1
|
||||
att139 att26
|
||||
att139 att99
|
||||
att139 att103
|
||||
att139 att43
|
||||
att139 att91
|
||||
att139 att31
|
||||
att139 att199
|
||||
att139 att7
|
||||
att216 att204
|
||||
att216 att24
|
||||
att216 att84
|
||||
att216 att36
|
||||
att216 att12
|
||||
att216 att180
|
||||
att216 att108
|
||||
att129 att69
|
||||
att152 att188
|
||||
att152 att140
|
||||
att69 att153
|
||||
att69 att9
|
||||
att69 att177
|
||||
att81 att45
|
||||
att81 att105
|
||||
att153 att117
|
||||
att153 att141
|
||||
att41 att53
|
||||
att204 att12
|
||||
att204 att180
|
||||
att188 att146
|
||||
att188 att212
|
||||
att13 att157
|
||||
att114 att6
|
||||
att114 att186
|
||||
att10 att190
|
||||
att64 att184
|
||||
att200 att104
|
||||
att9 att45
|
||||
att9 att146
|
||||
att9 att141
|
||||
att9 att177
|
||||
att9 att37
|
||||
att9 att133
|
||||
att9 att109
|
||||
att9 att181
|
||||
att3 att183
|
||||
att3 att147
|
||||
att3 att123
|
||||
att3 att135
|
||||
att3 att111
|
||||
att45 att105
|
||||
att45 att177
|
||||
att45 att93
|
||||
att45 att201
|
||||
att45 att165
|
||||
att45 att193
|
||||
att45 att33
|
||||
att45 att37
|
||||
att45 att133
|
||||
att45 att97
|
||||
att140 att8
|
||||
att30 att6
|
||||
att30 att186
|
||||
att183 att147
|
||||
att183 att123
|
||||
att183 att135
|
||||
att146 att2
|
||||
att202 att166
|
||||
att202 att106
|
||||
att202 att82
|
||||
att24 att84
|
||||
att24 att36
|
||||
att24 att132
|
||||
att147 att123
|
||||
att147 att135
|
||||
att147 att111
|
||||
att147 att207
|
||||
att8 att212
|
||||
att166 att82
|
||||
att166 att22
|
||||
att166 att94
|
||||
att187 att127
|
||||
att187 att115
|
||||
att127 att115
|
||||
att105 att184
|
||||
att105 att93
|
||||
att105 att201
|
||||
att106 att154
|
||||
att82 att154
|
||||
att82 att22
|
||||
att135 att111
|
||||
att135 att207
|
||||
att154 att22
|
||||
att154 att94
|
||||
att111 att207
|
||||
att99 att195
|
||||
att22 att94
|
||||
att84 att48
|
||||
att177 att93
|
||||
att177 att165
|
||||
att177 att181
|
||||
att103 att195
|
||||
att103 att97
|
||||
att103 att109
|
||||
att93 att201
|
||||
att93 att165
|
||||
att93 att193
|
||||
att93 att33
|
||||
att93 att57
|
||||
att201 att33
|
||||
att201 att57
|
||||
att43 att31
|
||||
att36 att180
|
||||
att36 att48
|
||||
att36 att72
|
||||
att36 att132
|
||||
att36 att144
|
||||
att125 att113
|
||||
att125 att185
|
||||
att125 att65
|
||||
att125 att29
|
||||
att180 att48
|
||||
att180 att72
|
||||
att180 att192
|
||||
att180 att108
|
||||
att48 att72
|
||||
att6 att186
|
||||
att113 att185
|
||||
att113 att53
|
||||
att113 att65
|
||||
att193 att97
|
||||
att91 att31
|
||||
att91 att199
|
||||
att91 att19
|
||||
att72 att132
|
||||
att72 att144
|
||||
att72 att192
|
||||
att72 att120
|
||||
att31 att199
|
||||
att31 att7
|
||||
att31 att67
|
||||
att31 att55
|
||||
att31 att1
|
||||
att132 att144
|
||||
att132 att120
|
||||
att33 att57
|
||||
att144 att192
|
||||
att144 att120
|
||||
att185 att53
|
||||
att185 att65
|
||||
att185 att29
|
||||
att199 att19
|
||||
att199 att7
|
||||
att199 att67
|
||||
att199 att55
|
||||
att199 att109
|
||||
att65 att29
|
||||
att7 att67
|
||||
att67 att55
|
||||
att109 att181
|
||||
|
@@ -1,859 +0,0 @@
|
||||
class att215
|
||||
class att25
|
||||
class att131
|
||||
class att95
|
||||
class att122
|
||||
class att17
|
||||
class att28
|
||||
class att5
|
||||
class att121
|
||||
class att214
|
||||
class att197
|
||||
class att116
|
||||
class att182
|
||||
class att60
|
||||
class att168
|
||||
class att178
|
||||
class att206
|
||||
class att89
|
||||
class att77
|
||||
class att209
|
||||
class att73
|
||||
class att126
|
||||
class att16
|
||||
class att74
|
||||
class att27
|
||||
class att61
|
||||
class att20
|
||||
class att101
|
||||
class att85
|
||||
class att76
|
||||
class att137
|
||||
class att211
|
||||
class att143
|
||||
class att14
|
||||
class att40
|
||||
class att210
|
||||
class att155
|
||||
class att170
|
||||
class att160
|
||||
class att23
|
||||
class att162
|
||||
class att203
|
||||
class att164
|
||||
class att107
|
||||
class att62
|
||||
class att42
|
||||
class att71
|
||||
class att128
|
||||
class att138
|
||||
class att83
|
||||
class att171
|
||||
class att92
|
||||
class att163
|
||||
class att49
|
||||
class att161
|
||||
class att158
|
||||
class att176
|
||||
class att11
|
||||
class att145
|
||||
class att4
|
||||
class att172
|
||||
class att196
|
||||
class att58
|
||||
class att68
|
||||
class att169
|
||||
class att80
|
||||
class att32
|
||||
class att175
|
||||
class att87
|
||||
class att88
|
||||
class att159
|
||||
class att18
|
||||
class att52
|
||||
class att98
|
||||
class att136
|
||||
class att150
|
||||
class att156
|
||||
class att110
|
||||
class att100
|
||||
class att63
|
||||
class att148
|
||||
class att90
|
||||
class att167
|
||||
class att35
|
||||
class att205
|
||||
class att51
|
||||
class att21
|
||||
class att142
|
||||
class att46
|
||||
class att134
|
||||
class att39
|
||||
class att102
|
||||
class att208
|
||||
class att130
|
||||
class att149
|
||||
class att96
|
||||
class att75
|
||||
class att118
|
||||
class att78
|
||||
class att213
|
||||
class att112
|
||||
class att38
|
||||
class att174
|
||||
class att189
|
||||
class att70
|
||||
class att179
|
||||
class att59
|
||||
class att79
|
||||
class att15
|
||||
class att47
|
||||
class att124
|
||||
class att34
|
||||
class att54
|
||||
class att191
|
||||
class att86
|
||||
class att56
|
||||
class att151
|
||||
class att66
|
||||
class att173
|
||||
class att44
|
||||
class att198
|
||||
class att139
|
||||
class att216
|
||||
class att129
|
||||
class att152
|
||||
class att69
|
||||
class att81
|
||||
class att50
|
||||
class att153
|
||||
class att41
|
||||
class att204
|
||||
class att188
|
||||
class att26
|
||||
class att13
|
||||
class att117
|
||||
class att114
|
||||
class att10
|
||||
class att64
|
||||
class att200
|
||||
class att9
|
||||
class att3
|
||||
class att119
|
||||
class att45
|
||||
class att104
|
||||
class att140
|
||||
class att30
|
||||
class att183
|
||||
class att146
|
||||
class att141
|
||||
class att202
|
||||
class att194
|
||||
class att24
|
||||
class att147
|
||||
class att8
|
||||
class att212
|
||||
class att123
|
||||
class att166
|
||||
class att187
|
||||
class att127
|
||||
class att190
|
||||
class att105
|
||||
class att106
|
||||
class att184
|
||||
class att82
|
||||
class att2
|
||||
class att135
|
||||
class att154
|
||||
class att111
|
||||
class att115
|
||||
class att99
|
||||
class att22
|
||||
class att84
|
||||
class att207
|
||||
class att94
|
||||
class att177
|
||||
class att103
|
||||
class att93
|
||||
class att201
|
||||
class att43
|
||||
class att36
|
||||
class att12
|
||||
class att125
|
||||
class att165
|
||||
class att180
|
||||
class att195
|
||||
class att157
|
||||
class att48
|
||||
class att6
|
||||
class att113
|
||||
class att193
|
||||
class att91
|
||||
class att72
|
||||
class att31
|
||||
class att132
|
||||
class att33
|
||||
class att57
|
||||
class att144
|
||||
class att192
|
||||
class att185
|
||||
class att37
|
||||
class att53
|
||||
class att120
|
||||
class att186
|
||||
class att199
|
||||
class att65
|
||||
class att108
|
||||
class att133
|
||||
class att29
|
||||
class att19
|
||||
class att7
|
||||
class att97
|
||||
class att67
|
||||
class att55
|
||||
class att1
|
||||
class att109
|
||||
class att181
|
||||
att215 att25
|
||||
att215 att131
|
||||
att215 att95
|
||||
att215 att17
|
||||
att215 att214
|
||||
att215 att143
|
||||
att25 att131
|
||||
att25 att95
|
||||
att25 att122
|
||||
att25 att121
|
||||
att25 att73
|
||||
att25 att61
|
||||
att25 att85
|
||||
att25 att169
|
||||
att25 att13
|
||||
att25 att157
|
||||
att131 att95
|
||||
att131 att122
|
||||
att131 att17
|
||||
att131 att28
|
||||
att131 att5
|
||||
att131 att121
|
||||
att131 att214
|
||||
att131 att116
|
||||
att131 att182
|
||||
att131 att60
|
||||
att131 att126
|
||||
att131 att16
|
||||
att131 att27
|
||||
att131 att20
|
||||
att131 att143
|
||||
att131 att155
|
||||
att95 att122
|
||||
att95 att17
|
||||
att95 att28
|
||||
att95 att5
|
||||
att95 att121
|
||||
att95 att214
|
||||
att95 att197
|
||||
att95 att116
|
||||
att95 att60
|
||||
att95 att168
|
||||
att95 att178
|
||||
att95 att143
|
||||
att95 att155
|
||||
att95 att23
|
||||
att95 att71
|
||||
att95 att167
|
||||
att122 att28
|
||||
att122 att182
|
||||
att122 att170
|
||||
att17 att5
|
||||
att17 att197
|
||||
att17 att89
|
||||
att17 att77
|
||||
att17 att209
|
||||
att17 att137
|
||||
att17 att161
|
||||
att17 att41
|
||||
att28 att206
|
||||
att28 att16
|
||||
att28 att76
|
||||
att28 att40
|
||||
att28 att210
|
||||
att28 att160
|
||||
att28 att172
|
||||
att28 att124
|
||||
att28 att64
|
||||
att5 att197
|
||||
att5 att89
|
||||
att5 att77
|
||||
att5 att209
|
||||
att5 att101
|
||||
att121 att73
|
||||
att121 att61
|
||||
att214 att116
|
||||
att214 att178
|
||||
att214 att206
|
||||
att214 att58
|
||||
att214 att142
|
||||
att214 att46
|
||||
att197 att89
|
||||
att197 att209
|
||||
att197 att101
|
||||
att116 att182
|
||||
att116 att60
|
||||
att116 att168
|
||||
att116 att178
|
||||
att116 att206
|
||||
att116 att73
|
||||
att116 att126
|
||||
att116 att16
|
||||
att116 att74
|
||||
att116 att27
|
||||
att116 att20
|
||||
att116 att211
|
||||
att116 att164
|
||||
att116 att128
|
||||
att116 att92
|
||||
att116 att176
|
||||
att116 att68
|
||||
att182 att27
|
||||
att182 att14
|
||||
att60 att168
|
||||
att60 att156
|
||||
att60 att96
|
||||
att168 att126
|
||||
att168 att156
|
||||
att168 att96
|
||||
att168 att216
|
||||
att178 att20
|
||||
att178 att211
|
||||
att178 att58
|
||||
att178 att142
|
||||
att178 att130
|
||||
att178 att166
|
||||
att206 att74
|
||||
att206 att170
|
||||
att206 att158
|
||||
att89 att77
|
||||
att89 att137
|
||||
att89 att149
|
||||
att89 att173
|
||||
att77 att137
|
||||
att77 att161
|
||||
att77 att149
|
||||
att209 att101
|
||||
att209 att41
|
||||
att73 att61
|
||||
att73 att85
|
||||
att73 att13
|
||||
att73 att157
|
||||
att126 att162
|
||||
att126 att138
|
||||
att126 att18
|
||||
att126 att150
|
||||
att16 att74
|
||||
att16 att76
|
||||
att16 att40
|
||||
att16 att4
|
||||
att16 att196
|
||||
att16 att136
|
||||
att74 att14
|
||||
att74 att62
|
||||
att27 att171
|
||||
att27 att63
|
||||
att61 att85
|
||||
att61 att169
|
||||
att20 att76
|
||||
att20 att211
|
||||
att20 att210
|
||||
att20 att170
|
||||
att20 att164
|
||||
att20 att128
|
||||
att20 att176
|
||||
att20 att80
|
||||
att101 att41
|
||||
att85 att169
|
||||
att85 att13
|
||||
att76 att14
|
||||
att76 att40
|
||||
att76 att160
|
||||
att76 att4
|
||||
att76 att52
|
||||
att137 att161
|
||||
att137 att149
|
||||
att137 att173
|
||||
att137 att125
|
||||
att211 att210
|
||||
att211 att162
|
||||
att211 att164
|
||||
att211 att62
|
||||
att211 att42
|
||||
att211 att171
|
||||
att211 att163
|
||||
att211 att175
|
||||
att211 att79
|
||||
att211 att151
|
||||
att211 att43
|
||||
att143 att155
|
||||
att143 att23
|
||||
att143 att203
|
||||
att143 att71
|
||||
att143 att83
|
||||
att143 att11
|
||||
att14 att98
|
||||
att40 att160
|
||||
att40 att4
|
||||
att40 att196
|
||||
att40 att88
|
||||
att40 att52
|
||||
att210 att162
|
||||
att210 att42
|
||||
att210 att114
|
||||
att155 att23
|
||||
att155 att203
|
||||
att155 att107
|
||||
att155 att11
|
||||
att170 att158
|
||||
att160 att52
|
||||
att160 att124
|
||||
att23 att203
|
||||
att23 att107
|
||||
att23 att71
|
||||
att23 att11
|
||||
att162 att138
|
||||
att162 att18
|
||||
att162 att150
|
||||
att162 att90
|
||||
att162 att102
|
||||
att162 att174
|
||||
att162 att66
|
||||
att203 att107
|
||||
att203 att49
|
||||
att203 att59
|
||||
att203 att47
|
||||
att203 att191
|
||||
att203 att119
|
||||
att164 att62
|
||||
att164 att42
|
||||
att164 att128
|
||||
att164 att171
|
||||
att164 att92
|
||||
att164 att163
|
||||
att164 att158
|
||||
att164 att176
|
||||
att164 att145
|
||||
att164 att172
|
||||
att164 att58
|
||||
att164 att68
|
||||
att164 att80
|
||||
att164 att32
|
||||
att164 att98
|
||||
att164 att156
|
||||
att164 att110
|
||||
att164 att205
|
||||
att164 att21
|
||||
att164 att134
|
||||
att164 att213
|
||||
att164 att112
|
||||
att164 att38
|
||||
att164 att189
|
||||
att164 att56
|
||||
att164 att44
|
||||
att164 att152
|
||||
att164 att8
|
||||
att107 att83
|
||||
att107 att49
|
||||
att107 att59
|
||||
att107 att47
|
||||
att107 att191
|
||||
att42 att138
|
||||
att42 att54
|
||||
att42 att114
|
||||
att71 att83
|
||||
att71 att167
|
||||
att71 att35
|
||||
att71 att179
|
||||
att128 att92
|
||||
att128 att112
|
||||
att138 att18
|
||||
att138 att150
|
||||
att83 att167
|
||||
att83 att35
|
||||
att171 att87
|
||||
att171 att159
|
||||
att171 att63
|
||||
att171 att51
|
||||
att171 att39
|
||||
att171 att75
|
||||
att92 att163
|
||||
att92 att145
|
||||
att92 att56
|
||||
att163 att49
|
||||
att163 att175
|
||||
att163 att87
|
||||
att163 att79
|
||||
att163 att151
|
||||
att163 att139
|
||||
att163 att187
|
||||
att163 att127
|
||||
att163 att103
|
||||
att163 att91
|
||||
att49 att37
|
||||
att161 att173
|
||||
att161 att113
|
||||
att176 att145
|
||||
att176 att172
|
||||
att176 att68
|
||||
att176 att80
|
||||
att176 att32
|
||||
att176 att175
|
||||
att176 att98
|
||||
att176 att110
|
||||
att176 att205
|
||||
att176 att21
|
||||
att176 att134
|
||||
att176 att213
|
||||
att176 att56
|
||||
att4 att196
|
||||
att4 att88
|
||||
att4 att136
|
||||
att4 att100
|
||||
att4 att148
|
||||
att4 att208
|
||||
att172 att112
|
||||
att172 att184
|
||||
att196 att88
|
||||
att196 att136
|
||||
att196 att100
|
||||
att196 att148
|
||||
att196 att208
|
||||
att58 att142
|
||||
att58 att46
|
||||
att58 att34
|
||||
att68 att32
|
||||
att80 att38
|
||||
att32 att110
|
||||
att32 att21
|
||||
att32 att44
|
||||
att32 att200
|
||||
att175 att87
|
||||
att175 att159
|
||||
att175 att79
|
||||
att175 att187
|
||||
att175 att115
|
||||
att87 att159
|
||||
att87 att63
|
||||
att87 att51
|
||||
att87 att75
|
||||
att87 att15
|
||||
att87 att99
|
||||
att159 att75
|
||||
att159 att15
|
||||
att159 att195
|
||||
att18 att90
|
||||
att18 att102
|
||||
att18 att78
|
||||
att18 att198
|
||||
att52 att124
|
||||
att52 att64
|
||||
att98 att86
|
||||
att136 att100
|
||||
att136 att208
|
||||
att150 att90
|
||||
att150 att174
|
||||
att150 att66
|
||||
att156 att205
|
||||
att156 att96
|
||||
att156 att216
|
||||
att156 att204
|
||||
att156 att24
|
||||
att156 att84
|
||||
att156 att36
|
||||
att156 att12
|
||||
att156 att108
|
||||
att100 att148
|
||||
att63 att51
|
||||
att63 att39
|
||||
att63 att3
|
||||
att63 att183
|
||||
att63 att147
|
||||
att90 att102
|
||||
att90 att78
|
||||
att167 att35
|
||||
att167 att179
|
||||
att35 att179
|
||||
att51 att39
|
||||
att51 att3
|
||||
att51 att183
|
||||
att21 att134
|
||||
att21 att213
|
||||
att21 att38
|
||||
att21 att189
|
||||
att21 att129
|
||||
att21 att81
|
||||
att21 att153
|
||||
att21 att117
|
||||
att21 att9
|
||||
att142 att46
|
||||
att142 att130
|
||||
att142 att118
|
||||
att142 att70
|
||||
att142 att10
|
||||
att142 att202
|
||||
att142 att190
|
||||
att142 att106
|
||||
att46 att130
|
||||
att46 att118
|
||||
att46 att70
|
||||
att46 att34
|
||||
att46 att166
|
||||
att46 att82
|
||||
att134 att2
|
||||
att39 att3
|
||||
att102 att78
|
||||
att102 att174
|
||||
att102 att54
|
||||
att102 att198
|
||||
att130 att118
|
||||
att130 att10
|
||||
att130 att202
|
||||
att130 att190
|
||||
att130 att106
|
||||
att149 att125
|
||||
att96 att216
|
||||
att96 att204
|
||||
att96 att24
|
||||
att75 att15
|
||||
att75 att99
|
||||
att118 att70
|
||||
att118 att10
|
||||
att118 att202
|
||||
att78 att198
|
||||
att213 att189
|
||||
att213 att129
|
||||
att213 att69
|
||||
att213 att81
|
||||
att38 att50
|
||||
att38 att26
|
||||
att174 att54
|
||||
att174 att66
|
||||
att174 att30
|
||||
att189 att86
|
||||
att189 att129
|
||||
att189 att69
|
||||
att189 att81
|
||||
att189 att153
|
||||
att189 att117
|
||||
att189 att9
|
||||
att189 att45
|
||||
att189 att141
|
||||
att189 att105
|
||||
att70 att34
|
||||
att70 att154
|
||||
att179 att59
|
||||
att59 att47
|
||||
att59 att191
|
||||
att59 att119
|
||||
att79 att86
|
||||
att79 att151
|
||||
att79 att139
|
||||
att79 att187
|
||||
att79 att127
|
||||
att79 att103
|
||||
att79 att43
|
||||
att79 att193
|
||||
att79 att91
|
||||
att79 att19
|
||||
att124 att64
|
||||
att54 att114
|
||||
att54 att30
|
||||
att54 att6
|
||||
att191 att119
|
||||
att86 att194
|
||||
att56 att44
|
||||
att56 att152
|
||||
att56 att50
|
||||
att56 att188
|
||||
att56 att26
|
||||
att56 att200
|
||||
att56 att104
|
||||
att56 att140
|
||||
att56 att146
|
||||
att56 att194
|
||||
att56 att8
|
||||
att56 att2
|
||||
att56 att133
|
||||
att56 att1
|
||||
att151 att139
|
||||
att66 att30
|
||||
att173 att125
|
||||
att173 att113
|
||||
att173 att185
|
||||
att44 att152
|
||||
att44 att50
|
||||
att44 att188
|
||||
att44 att200
|
||||
att44 att104
|
||||
att44 att140
|
||||
att44 att194
|
||||
att44 att212
|
||||
att44 att1
|
||||
att139 att26
|
||||
att139 att99
|
||||
att139 att103
|
||||
att139 att43
|
||||
att139 att91
|
||||
att139 att31
|
||||
att139 att199
|
||||
att139 att7
|
||||
att216 att204
|
||||
att216 att24
|
||||
att216 att84
|
||||
att216 att36
|
||||
att216 att12
|
||||
att216 att180
|
||||
att216 att108
|
||||
att129 att69
|
||||
att152 att188
|
||||
att152 att140
|
||||
att69 att153
|
||||
att69 att9
|
||||
att69 att177
|
||||
att81 att45
|
||||
att81 att105
|
||||
att153 att117
|
||||
att153 att141
|
||||
att41 att53
|
||||
att204 att12
|
||||
att204 att180
|
||||
att188 att146
|
||||
att188 att212
|
||||
att13 att157
|
||||
att114 att6
|
||||
att114 att186
|
||||
att10 att190
|
||||
att64 att184
|
||||
att200 att104
|
||||
att9 att45
|
||||
att9 att146
|
||||
att9 att141
|
||||
att9 att177
|
||||
att9 att37
|
||||
att9 att133
|
||||
att9 att109
|
||||
att9 att181
|
||||
att3 att183
|
||||
att3 att147
|
||||
att3 att123
|
||||
att3 att135
|
||||
att3 att111
|
||||
att45 att105
|
||||
att45 att177
|
||||
att45 att93
|
||||
att45 att201
|
||||
att45 att165
|
||||
att45 att193
|
||||
att45 att33
|
||||
att45 att37
|
||||
att45 att133
|
||||
att45 att97
|
||||
att140 att8
|
||||
att30 att6
|
||||
att30 att186
|
||||
att183 att147
|
||||
att183 att123
|
||||
att183 att135
|
||||
att146 att2
|
||||
att202 att166
|
||||
att202 att106
|
||||
att202 att82
|
||||
att24 att84
|
||||
att24 att36
|
||||
att24 att132
|
||||
att147 att123
|
||||
att147 att135
|
||||
att147 att111
|
||||
att147 att207
|
||||
att8 att212
|
||||
att166 att82
|
||||
att166 att22
|
||||
att166 att94
|
||||
att187 att127
|
||||
att187 att115
|
||||
att127 att115
|
||||
att105 att184
|
||||
att105 att93
|
||||
att105 att201
|
||||
att106 att154
|
||||
att82 att154
|
||||
att82 att22
|
||||
att135 att111
|
||||
att135 att207
|
||||
att154 att22
|
||||
att154 att94
|
||||
att111 att207
|
||||
att99 att195
|
||||
att22 att94
|
||||
att84 att48
|
||||
att177 att93
|
||||
att177 att165
|
||||
att177 att181
|
||||
att103 att195
|
||||
att103 att97
|
||||
att103 att109
|
||||
att93 att201
|
||||
att93 att165
|
||||
att93 att193
|
||||
att93 att33
|
||||
att93 att57
|
||||
att201 att33
|
||||
att201 att57
|
||||
att43 att31
|
||||
att36 att180
|
||||
att36 att48
|
||||
att36 att72
|
||||
att36 att132
|
||||
att36 att144
|
||||
att125 att113
|
||||
att125 att185
|
||||
att125 att65
|
||||
att125 att29
|
||||
att180 att48
|
||||
att180 att72
|
||||
att180 att192
|
||||
att180 att108
|
||||
att48 att72
|
||||
att6 att186
|
||||
att113 att185
|
||||
att113 att53
|
||||
att113 att65
|
||||
att193 att97
|
||||
att91 att31
|
||||
att91 att199
|
||||
att91 att19
|
||||
att72 att132
|
||||
att72 att144
|
||||
att72 att192
|
||||
att72 att120
|
||||
att31 att199
|
||||
att31 att7
|
||||
att31 att67
|
||||
att31 att55
|
||||
att31 att1
|
||||
att132 att144
|
||||
att132 att120
|
||||
att33 att57
|
||||
att144 att192
|
||||
att144 att120
|
||||
att185 att53
|
||||
att185 att65
|
||||
att185 att29
|
||||
att199 att19
|
||||
att199 att7
|
||||
att199 att67
|
||||
att199 att55
|
||||
att199 att109
|
||||
att65 att29
|
||||
att7 att67
|
||||
att67 att55
|
||||
att109 att181
|
||||
|
BIN
diagrams/BayesNet.pdf
Executable file
BIN
diagrams/BayesNet.pdf
Executable file
Binary file not shown.
71
docs/BoostAODE.md
Normal file
71
docs/BoostAODE.md
Normal file
@@ -0,0 +1,71 @@
|
||||
# BoostAODE Algorithm Operation
|
||||
|
||||
The algorithm is based on the AdaBoost algorithm with some new proposals that can be activated using the following hyperparameters.
|
||||
|
||||
## Hyperparameters
|
||||
|
||||
The hyperparameters defined in the algorithm are:
|
||||
|
||||
- ***repeatSparent*** (*boolean*): Allows dataset variables to be repeated as parents of an *SPODE*. Default value: *false*.
|
||||
|
||||
- ***maxModels*** (*int*): Maximum number of models (*SPODEs*) to build. This hyperparameter is only taken into account if ***repeatSparent*** is set to *true*. Default value: *0*.
|
||||
|
||||
- ***order*** (*{"asc", "desc", "rand"}*): Sets the order (ascending/descending/random) in which dataset variables will be processed to choose the parents of the *SPODEs*. Default value: *"desc"*.
|
||||
|
||||
- ***convergence*** (*boolean*): Sets whether the convergence of the result will be used as a termination condition. If this hyperparameter is set to true, the training dataset passed to the model is divided into two sets, one serving as training data and the other as a test set (so the original test partition will become a validation partition in this case). The partition is made by taking the first partition generated by a process of generating a 5 fold partition with stratification using a predetermined seed. The exit condition used in this *convergence* is that the difference between the accuracy obtained by the current model and that obtained by the previous model is greater than *1e-4*; otherwise, one will be added to the number of models that worsen the result (see next hyperparameter). Default value: *false*.
|
||||
|
||||
- ***tolerance*** (*int*): Sets the maximum number of models that can worsen the result without constituting a termination condition. Default value: *0*.
|
||||
|
||||
- ***select_features*** (*{"IWSS", "FCBF", "CFS", ""}*): Selects the variable selection method to be used to build initial models for the ensemble that will be included without considering any of the other exit conditions. Once the models of the selected variables are built, the algorithm will update the weights using the ensemble and set the significance of all the models built with the same α<sub>t</sub>. Default value: *""*.
|
||||
|
||||
- ***threshold*** (*double*): Sets the necessary value for the IWSS and FCBF algorithms to function. Accepted values are:
|
||||
- IWSS: $threshold \in [0, 0.5]$
|
||||
- FCBF: $threshold \in [10^{-7}, 1]$
|
||||
|
||||
Default value is *-1* so every time any of those algorithms are called, the threshold has to be set to the desired value.
|
||||
|
||||
- ***predict_voting*** (*boolean*): Sets whether the algorithm will use *model voting* to predict the result. If set to false, the weighted average of the probabilities of each model's prediction will be used. Default value: *true*.
|
||||
|
||||
- ***predict_single*** (*boolean*): Sets whether the algorithm will use single-model prediction in the learning process. If set to *false*, all models trained up to that point will be used to calculate the prediction necessary to update the weights in the learning process. Default value: *true*.
|
||||
|
||||
## Operation
|
||||
|
||||
The algorithm performs the following steps:
|
||||
|
||||
1. **Initialization**
|
||||
|
||||
- If ***select_features*** is set, as many *SPODEs* are created as variables selected by the corresponding feature selection algorithm, and these variables are marked as used.
|
||||
|
||||
- Initial weights of the examples are set to *1/m*.
|
||||
|
||||
1. **Main Training Loop:**
|
||||
|
||||
- Variables are sorted by mutual information order with the class variable and processed in ascending, descending or random order, according to the value of the *order* hyperparameter. If it is random, the variables are shuffled.
|
||||
|
||||
- If the parent repetition is not established, the variable is marked as used.
|
||||
|
||||
- A *SPODE* is created using the selected variable as the parent.
|
||||
|
||||
- The model is trained, and the class variable corresponding to the training dataset is calculated. The calculation can be done using the last trained model or the set of models trained up to that point, according to the value of the *predict_single* hyperparameter.
|
||||
|
||||
- The weights associated with the examples are updated using this expression:
|
||||
|
||||
- w<sub>i</sub> · e<sup>α<sub>t</sub></sup> (if the example has been misclassified)
|
||||
|
||||
- w<sub>i</sub> · e<sup>-α<sub>t</sub></sup> (if the example has been correctly classified)
|
||||
|
||||
- The model significance is set to α<sub>t</sub>.
|
||||
|
||||
- If the ***convergence*** hyperparameter is set, the accuracy value on the test dataset that we separated in an initial step is calculated.
|
||||
|
||||
1. **Exit Conditions:**
|
||||
|
||||
- ε<sub>t</sub> > 0.5 => misclassified examples are penalized.
|
||||
|
||||
- Number of models with worse accuracy greater than ***tolerance*** and ***convergence*** established.
|
||||
|
||||
- There are no more variables to create models, and ***repeatSparent*** is not set.
|
||||
|
||||
- Number of models > ***maxModels*** if ***repeatSparent*** is set.
|
||||
|
||||
### [Proposal for *predict_single = false*](./BoostAODE_train_predict.pdf)
|
BIN
docs/BoostAODE_train_predict.odp
Normal file
BIN
docs/BoostAODE_train_predict.odp
Normal file
Binary file not shown.
BIN
docs/BoostAODE_train_predict.pdf
Normal file
BIN
docs/BoostAODE_train_predict.pdf
Normal file
Binary file not shown.
@@ -1,4 +1,4 @@
|
||||
filter = src/
|
||||
exclude-directories = build/lib/
|
||||
filter = bayesnet/
|
||||
exclude-directories = build_debug/lib/
|
||||
print-summary = yes
|
||||
sort-percentage = yes
|
||||
sort = uncovered-percent
|
||||
|
@@ -4,11 +4,9 @@
|
||||
#include <map>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
|
||||
ArffFiles::ArffFiles() = default;
|
||||
|
||||
vector<string> ArffFiles::getLines() const
|
||||
std::vector<std::string> ArffFiles::getLines() const
|
||||
{
|
||||
return lines;
|
||||
}
|
||||
@@ -18,48 +16,48 @@ unsigned long int ArffFiles::getSize() const
|
||||
return lines.size();
|
||||
}
|
||||
|
||||
vector<pair<string, string>> ArffFiles::getAttributes() const
|
||||
std::vector<std::pair<std::string, std::string>> ArffFiles::getAttributes() const
|
||||
{
|
||||
return attributes;
|
||||
}
|
||||
|
||||
string ArffFiles::getClassName() const
|
||||
std::string ArffFiles::getClassName() const
|
||||
{
|
||||
return className;
|
||||
}
|
||||
|
||||
string ArffFiles::getClassType() const
|
||||
std::string ArffFiles::getClassType() const
|
||||
{
|
||||
return classType;
|
||||
}
|
||||
|
||||
vector<vector<float>>& ArffFiles::getX()
|
||||
std::vector<std::vector<float>>& ArffFiles::getX()
|
||||
{
|
||||
return X;
|
||||
}
|
||||
|
||||
vector<int>& ArffFiles::getY()
|
||||
std::vector<int>& ArffFiles::getY()
|
||||
{
|
||||
return y;
|
||||
}
|
||||
|
||||
void ArffFiles::loadCommon(string fileName)
|
||||
void ArffFiles::loadCommon(std::string fileName)
|
||||
{
|
||||
ifstream file(fileName);
|
||||
std::ifstream file(fileName);
|
||||
if (!file.is_open()) {
|
||||
throw invalid_argument("Unable to open file");
|
||||
throw std::invalid_argument("Unable to open file");
|
||||
}
|
||||
string line;
|
||||
string keyword;
|
||||
string attribute;
|
||||
string type;
|
||||
string type_w;
|
||||
std::string line;
|
||||
std::string keyword;
|
||||
std::string attribute;
|
||||
std::string type;
|
||||
std::string type_w;
|
||||
while (getline(file, line)) {
|
||||
if (line.empty() || line[0] == '%' || line == "\r" || line == " ") {
|
||||
continue;
|
||||
}
|
||||
if (line.find("@attribute") != string::npos || line.find("@ATTRIBUTE") != string::npos) {
|
||||
stringstream ss(line);
|
||||
if (line.find("@attribute") != std::string::npos || line.find("@ATTRIBUTE") != std::string::npos) {
|
||||
std::stringstream ss(line);
|
||||
ss >> keyword >> attribute;
|
||||
type = "";
|
||||
while (ss >> type_w)
|
||||
@@ -74,35 +72,35 @@ void ArffFiles::loadCommon(string fileName)
|
||||
}
|
||||
file.close();
|
||||
if (attributes.empty())
|
||||
throw invalid_argument("No attributes found");
|
||||
throw std::invalid_argument("No attributes found");
|
||||
}
|
||||
|
||||
void ArffFiles::load(const string& fileName, bool classLast)
|
||||
void ArffFiles::load(const std::string& fileName, bool classLast)
|
||||
{
|
||||
int labelIndex;
|
||||
loadCommon(fileName);
|
||||
if (classLast) {
|
||||
className = get<0>(attributes.back());
|
||||
classType = get<1>(attributes.back());
|
||||
className = std::get<0>(attributes.back());
|
||||
classType = std::get<1>(attributes.back());
|
||||
attributes.pop_back();
|
||||
labelIndex = static_cast<int>(attributes.size());
|
||||
} else {
|
||||
className = get<0>(attributes.front());
|
||||
classType = get<1>(attributes.front());
|
||||
className = std::get<0>(attributes.front());
|
||||
classType = std::get<1>(attributes.front());
|
||||
attributes.erase(attributes.begin());
|
||||
labelIndex = 0;
|
||||
}
|
||||
generateDataset(labelIndex);
|
||||
}
|
||||
void ArffFiles::load(const string& fileName, const string& name)
|
||||
void ArffFiles::load(const std::string& fileName, const std::string& name)
|
||||
{
|
||||
int labelIndex;
|
||||
loadCommon(fileName);
|
||||
bool found = false;
|
||||
for (int i = 0; i < attributes.size(); ++i) {
|
||||
if (attributes[i].first == name) {
|
||||
className = get<0>(attributes[i]);
|
||||
classType = get<1>(attributes[i]);
|
||||
className = std::get<0>(attributes[i]);
|
||||
classType = std::get<1>(attributes[i]);
|
||||
attributes.erase(attributes.begin() + i);
|
||||
labelIndex = i;
|
||||
found = true;
|
||||
@@ -110,19 +108,19 @@ void ArffFiles::load(const string& fileName, const string& name)
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
throw invalid_argument("Class name not found");
|
||||
throw std::invalid_argument("Class name not found");
|
||||
}
|
||||
generateDataset(labelIndex);
|
||||
}
|
||||
|
||||
void ArffFiles::generateDataset(int labelIndex)
|
||||
{
|
||||
X = vector<vector<float>>(attributes.size(), vector<float>(lines.size()));
|
||||
auto yy = vector<string>(lines.size(), "");
|
||||
auto removeLines = vector<int>(); // Lines with missing values
|
||||
X = std::vector<std::vector<float>>(attributes.size(), std::vector<float>(lines.size()));
|
||||
auto yy = std::vector<std::string>(lines.size(), "");
|
||||
auto removeLines = std::vector<int>(); // Lines with missing values
|
||||
for (size_t i = 0; i < lines.size(); i++) {
|
||||
stringstream ss(lines[i]);
|
||||
string value;
|
||||
std::stringstream ss(lines[i]);
|
||||
std::string value;
|
||||
int pos = 0;
|
||||
int xIndex = 0;
|
||||
while (getline(ss, value, ',')) {
|
||||
@@ -146,21 +144,21 @@ void ArffFiles::generateDataset(int labelIndex)
|
||||
y = factorize(yy);
|
||||
}
|
||||
|
||||
string ArffFiles::trim(const string& source)
|
||||
std::string ArffFiles::trim(const std::string& source)
|
||||
{
|
||||
string s(source);
|
||||
std::string s(source);
|
||||
s.erase(0, s.find_first_not_of(" '\n\r\t"));
|
||||
s.erase(s.find_last_not_of(" '\n\r\t") + 1);
|
||||
return s;
|
||||
}
|
||||
|
||||
vector<int> ArffFiles::factorize(const vector<string>& labels_t)
|
||||
std::vector<int> ArffFiles::factorize(const std::vector<std::string>& labels_t)
|
||||
{
|
||||
vector<int> yy;
|
||||
std::vector<int> yy;
|
||||
yy.reserve(labels_t.size());
|
||||
map<string, int> labelMap;
|
||||
std::map<std::string, int> labelMap;
|
||||
int i = 0;
|
||||
for (const string& label : labels_t) {
|
||||
for (const std::string& label : labels_t) {
|
||||
if (labelMap.find(label) == labelMap.end()) {
|
||||
labelMap[label] = i++;
|
||||
}
|
||||
|
@@ -4,31 +4,29 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
|
||||
class ArffFiles {
|
||||
private:
|
||||
vector<string> lines;
|
||||
vector<pair<string, string>> attributes;
|
||||
string className;
|
||||
string classType;
|
||||
vector<vector<float>> X;
|
||||
vector<int> y;
|
||||
std::vector<std::string> lines;
|
||||
std::vector<std::pair<std::string, std::string>> attributes;
|
||||
std::string className;
|
||||
std::string classType;
|
||||
std::vector<std::vector<float>> X;
|
||||
std::vector<int> y;
|
||||
void generateDataset(int);
|
||||
void loadCommon(string);
|
||||
void loadCommon(std::string);
|
||||
public:
|
||||
ArffFiles();
|
||||
void load(const string&, bool = true);
|
||||
void load(const string&, const string&);
|
||||
vector<string> getLines() const;
|
||||
void load(const std::string&, bool = true);
|
||||
void load(const std::string&, const std::string&);
|
||||
std::vector<std::string> getLines() const;
|
||||
unsigned long int getSize() const;
|
||||
string getClassName() const;
|
||||
string getClassType() const;
|
||||
static string trim(const string&);
|
||||
vector<vector<float>>& getX();
|
||||
vector<int>& getY();
|
||||
vector<pair<string, string>> getAttributes() const;
|
||||
static vector<int> factorize(const vector<string>& labels_t);
|
||||
std::string getClassName() const;
|
||||
std::string getClassType() const;
|
||||
static std::string trim(const std::string&);
|
||||
std::vector<std::vector<float>>& getX();
|
||||
std::vector<int>& getY();
|
||||
std::vector<std::pair<std::string, std::string>> getAttributes() const;
|
||||
static std::vector<int> factorize(const std::vector<std::string>& labels_t);
|
||||
};
|
||||
|
||||
#endif
|
@@ -1,2 +1 @@
|
||||
add_library(ArffFiles ArffFiles.cc)
|
||||
#target_link_libraries(BayesNet "${TORCH_LIBRARIES}")
|
||||
add_library(ArffFiles ArffFiles.cc)
|
Submodule lib/argparse deleted from b0930ab028
Submodule lib/catch2 updated: 4acc51828f...8ac8190e49
1
lib/folding
Submodule
1
lib/folding
Submodule
Submodule lib/folding added at 37316a54e0
2
lib/json
2
lib/json
Submodule lib/json updated: 5d2754306d...0457de21cf
@@ -1,7 +1,20 @@
|
||||
include_directories(${BayesNet_SOURCE_DIR}/src/Platform)
|
||||
include_directories(${BayesNet_SOURCE_DIR}/src/BayesNet)
|
||||
include_directories(${BayesNet_SOURCE_DIR}/lib/Files)
|
||||
include_directories(${BayesNet_SOURCE_DIR}/lib/mdlp)
|
||||
include_directories(${BayesNet_SOURCE_DIR}/lib/argparse/include)
|
||||
add_executable(BayesNetSample sample.cc ${BayesNet_SOURCE_DIR}/src/Platform/Folding.cc ${BayesNet_SOURCE_DIR}/src/Platform/Models.cc)
|
||||
target_link_libraries(BayesNetSample BayesNet ArffFiles mdlp "${TORCH_LIBRARIES}")
|
||||
cmake_minimum_required(VERSION 3.20)
|
||||
|
||||
project(bayesnet_sample)
|
||||
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
|
||||
find_package(Torch REQUIRED)
|
||||
find_library(BayesNet NAMES BayesNet.a libBayesNet.a REQUIRED)
|
||||
|
||||
include_directories(
|
||||
lib/Files
|
||||
lib/mdlp
|
||||
lib/json/include
|
||||
/usr/local/include
|
||||
)
|
||||
|
||||
add_subdirectory(lib/Files)
|
||||
add_subdirectory(lib/mdlp)
|
||||
add_executable(bayesnet_sample sample.cc)
|
||||
target_link_libraries(bayesnet_sample ArffFiles mdlp "${TORCH_LIBRARIES}" "${BayesNet}")
|
168
sample/lib/Files/ArffFiles.cc
Normal file
168
sample/lib/Files/ArffFiles.cc
Normal file
@@ -0,0 +1,168 @@
|
||||
#include "ArffFiles.h"
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
#include <map>
|
||||
#include <iostream>
|
||||
|
||||
ArffFiles::ArffFiles() = default;
|
||||
|
||||
std::vector<std::string> ArffFiles::getLines() const
|
||||
{
|
||||
return lines;
|
||||
}
|
||||
|
||||
unsigned long int ArffFiles::getSize() const
|
||||
{
|
||||
return lines.size();
|
||||
}
|
||||
|
||||
std::vector<std::pair<std::string, std::string>> ArffFiles::getAttributes() const
|
||||
{
|
||||
return attributes;
|
||||
}
|
||||
|
||||
std::string ArffFiles::getClassName() const
|
||||
{
|
||||
return className;
|
||||
}
|
||||
|
||||
std::string ArffFiles::getClassType() const
|
||||
{
|
||||
return classType;
|
||||
}
|
||||
|
||||
std::vector<std::vector<float>>& ArffFiles::getX()
|
||||
{
|
||||
return X;
|
||||
}
|
||||
|
||||
std::vector<int>& ArffFiles::getY()
|
||||
{
|
||||
return y;
|
||||
}
|
||||
|
||||
void ArffFiles::loadCommon(std::string fileName)
|
||||
{
|
||||
std::ifstream file(fileName);
|
||||
if (!file.is_open()) {
|
||||
throw std::invalid_argument("Unable to open file");
|
||||
}
|
||||
std::string line;
|
||||
std::string keyword;
|
||||
std::string attribute;
|
||||
std::string type;
|
||||
std::string type_w;
|
||||
while (getline(file, line)) {
|
||||
if (line.empty() || line[0] == '%' || line == "\r" || line == " ") {
|
||||
continue;
|
||||
}
|
||||
if (line.find("@attribute") != std::string::npos || line.find("@ATTRIBUTE") != std::string::npos) {
|
||||
std::stringstream ss(line);
|
||||
ss >> keyword >> attribute;
|
||||
type = "";
|
||||
while (ss >> type_w)
|
||||
type += type_w + " ";
|
||||
attributes.emplace_back(trim(attribute), trim(type));
|
||||
continue;
|
||||
}
|
||||
if (line[0] == '@') {
|
||||
continue;
|
||||
}
|
||||
lines.push_back(line);
|
||||
}
|
||||
file.close();
|
||||
if (attributes.empty())
|
||||
throw std::invalid_argument("No attributes found");
|
||||
}
|
||||
|
||||
void ArffFiles::load(const std::string& fileName, bool classLast)
|
||||
{
|
||||
int labelIndex;
|
||||
loadCommon(fileName);
|
||||
if (classLast) {
|
||||
className = std::get<0>(attributes.back());
|
||||
classType = std::get<1>(attributes.back());
|
||||
attributes.pop_back();
|
||||
labelIndex = static_cast<int>(attributes.size());
|
||||
} else {
|
||||
className = std::get<0>(attributes.front());
|
||||
classType = std::get<1>(attributes.front());
|
||||
attributes.erase(attributes.begin());
|
||||
labelIndex = 0;
|
||||
}
|
||||
generateDataset(labelIndex);
|
||||
}
|
||||
void ArffFiles::load(const std::string& fileName, const std::string& name)
|
||||
{
|
||||
int labelIndex;
|
||||
loadCommon(fileName);
|
||||
bool found = false;
|
||||
for (int i = 0; i < attributes.size(); ++i) {
|
||||
if (attributes[i].first == name) {
|
||||
className = std::get<0>(attributes[i]);
|
||||
classType = std::get<1>(attributes[i]);
|
||||
attributes.erase(attributes.begin() + i);
|
||||
labelIndex = i;
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
throw std::invalid_argument("Class name not found");
|
||||
}
|
||||
generateDataset(labelIndex);
|
||||
}
|
||||
|
||||
void ArffFiles::generateDataset(int labelIndex)
|
||||
{
|
||||
X = std::vector<std::vector<float>>(attributes.size(), std::vector<float>(lines.size()));
|
||||
auto yy = std::vector<std::string>(lines.size(), "");
|
||||
auto removeLines = std::vector<int>(); // Lines with missing values
|
||||
for (size_t i = 0; i < lines.size(); i++) {
|
||||
std::stringstream ss(lines[i]);
|
||||
std::string value;
|
||||
int pos = 0;
|
||||
int xIndex = 0;
|
||||
while (getline(ss, value, ',')) {
|
||||
if (pos++ == labelIndex) {
|
||||
yy[i] = value;
|
||||
} else {
|
||||
if (value == "?") {
|
||||
X[xIndex++][i] = -1;
|
||||
removeLines.push_back(i);
|
||||
} else
|
||||
X[xIndex++][i] = stof(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (auto i : removeLines) {
|
||||
yy.erase(yy.begin() + i);
|
||||
for (auto& x : X) {
|
||||
x.erase(x.begin() + i);
|
||||
}
|
||||
}
|
||||
y = factorize(yy);
|
||||
}
|
||||
|
||||
std::string ArffFiles::trim(const std::string& source)
|
||||
{
|
||||
std::string s(source);
|
||||
s.erase(0, s.find_first_not_of(" '\n\r\t"));
|
||||
s.erase(s.find_last_not_of(" '\n\r\t") + 1);
|
||||
return s;
|
||||
}
|
||||
|
||||
std::vector<int> ArffFiles::factorize(const std::vector<std::string>& labels_t)
|
||||
{
|
||||
std::vector<int> yy;
|
||||
yy.reserve(labels_t.size());
|
||||
std::map<std::string, int> labelMap;
|
||||
int i = 0;
|
||||
for (const std::string& label : labels_t) {
|
||||
if (labelMap.find(label) == labelMap.end()) {
|
||||
labelMap[label] = i++;
|
||||
}
|
||||
yy.push_back(labelMap[label]);
|
||||
}
|
||||
return yy;
|
||||
}
|
32
sample/lib/Files/ArffFiles.h
Normal file
32
sample/lib/Files/ArffFiles.h
Normal file
@@ -0,0 +1,32 @@
|
||||
#ifndef ARFFFILES_H
|
||||
#define ARFFFILES_H
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
class ArffFiles {
|
||||
private:
|
||||
std::vector<std::string> lines;
|
||||
std::vector<std::pair<std::string, std::string>> attributes;
|
||||
std::string className;
|
||||
std::string classType;
|
||||
std::vector<std::vector<float>> X;
|
||||
std::vector<int> y;
|
||||
void generateDataset(int);
|
||||
void loadCommon(std::string);
|
||||
public:
|
||||
ArffFiles();
|
||||
void load(const std::string&, bool = true);
|
||||
void load(const std::string&, const std::string&);
|
||||
std::vector<std::string> getLines() const;
|
||||
unsigned long int getSize() const;
|
||||
std::string getClassName() const;
|
||||
std::string getClassType() const;
|
||||
static std::string trim(const std::string&);
|
||||
std::vector<std::vector<float>>& getX();
|
||||
std::vector<int>& getY();
|
||||
std::vector<std::pair<std::string, std::string>> getAttributes() const;
|
||||
static std::vector<int> factorize(const std::vector<std::string>& labels_t);
|
||||
};
|
||||
|
||||
#endif
|
1
sample/lib/Files/CMakeLists.txt
Normal file
1
sample/lib/Files/CMakeLists.txt
Normal file
@@ -0,0 +1 @@
|
||||
add_library(ArffFiles ArffFiles.cc)
|
55
sample/lib/json/include/nlohmann/adl_serializer.hpp
Normal file
55
sample/lib/json/include/nlohmann/adl_serializer.hpp
Normal file
@@ -0,0 +1,55 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include <nlohmann/detail/abi_macros.hpp>
|
||||
#include <nlohmann/detail/conversions/from_json.hpp>
|
||||
#include <nlohmann/detail/conversions/to_json.hpp>
|
||||
#include <nlohmann/detail/meta/identity_tag.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
|
||||
/// @sa https://json.nlohmann.me/api/adl_serializer/
|
||||
template<typename ValueType, typename>
|
||||
struct adl_serializer
|
||||
{
|
||||
/// @brief convert a JSON value to any value type
|
||||
/// @sa https://json.nlohmann.me/api/adl_serializer/from_json/
|
||||
template<typename BasicJsonType, typename TargetType = ValueType>
|
||||
static auto from_json(BasicJsonType && j, TargetType& val) noexcept(
|
||||
noexcept(::nlohmann::from_json(std::forward<BasicJsonType>(j), val)))
|
||||
-> decltype(::nlohmann::from_json(std::forward<BasicJsonType>(j), val), void())
|
||||
{
|
||||
::nlohmann::from_json(std::forward<BasicJsonType>(j), val);
|
||||
}
|
||||
|
||||
/// @brief convert a JSON value to any value type
|
||||
/// @sa https://json.nlohmann.me/api/adl_serializer/from_json/
|
||||
template<typename BasicJsonType, typename TargetType = ValueType>
|
||||
static auto from_json(BasicJsonType && j) noexcept(
|
||||
noexcept(::nlohmann::from_json(std::forward<BasicJsonType>(j), detail::identity_tag<TargetType> {})))
|
||||
-> decltype(::nlohmann::from_json(std::forward<BasicJsonType>(j), detail::identity_tag<TargetType> {}))
|
||||
{
|
||||
return ::nlohmann::from_json(std::forward<BasicJsonType>(j), detail::identity_tag<TargetType> {});
|
||||
}
|
||||
|
||||
/// @brief convert any value type to a JSON value
|
||||
/// @sa https://json.nlohmann.me/api/adl_serializer/to_json/
|
||||
template<typename BasicJsonType, typename TargetType = ValueType>
|
||||
static auto to_json(BasicJsonType& j, TargetType && val) noexcept(
|
||||
noexcept(::nlohmann::to_json(j, std::forward<TargetType>(val))))
|
||||
-> decltype(::nlohmann::to_json(j, std::forward<TargetType>(val)), void())
|
||||
{
|
||||
::nlohmann::to_json(j, std::forward<TargetType>(val));
|
||||
}
|
||||
};
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
103
sample/lib/json/include/nlohmann/byte_container_with_subtype.hpp
Normal file
103
sample/lib/json/include/nlohmann/byte_container_with_subtype.hpp
Normal file
@@ -0,0 +1,103 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint> // uint8_t, uint64_t
|
||||
#include <tuple> // tie
|
||||
#include <utility> // move
|
||||
|
||||
#include <nlohmann/detail/abi_macros.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
|
||||
/// @brief an internal type for a backed binary type
|
||||
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/
|
||||
template<typename BinaryType>
|
||||
class byte_container_with_subtype : public BinaryType
|
||||
{
|
||||
public:
|
||||
using container_type = BinaryType;
|
||||
using subtype_type = std::uint64_t;
|
||||
|
||||
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/byte_container_with_subtype/
|
||||
byte_container_with_subtype() noexcept(noexcept(container_type()))
|
||||
: container_type()
|
||||
{}
|
||||
|
||||
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/byte_container_with_subtype/
|
||||
byte_container_with_subtype(const container_type& b) noexcept(noexcept(container_type(b)))
|
||||
: container_type(b)
|
||||
{}
|
||||
|
||||
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/byte_container_with_subtype/
|
||||
byte_container_with_subtype(container_type&& b) noexcept(noexcept(container_type(std::move(b))))
|
||||
: container_type(std::move(b))
|
||||
{}
|
||||
|
||||
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/byte_container_with_subtype/
|
||||
byte_container_with_subtype(const container_type& b, subtype_type subtype_) noexcept(noexcept(container_type(b)))
|
||||
: container_type(b)
|
||||
, m_subtype(subtype_)
|
||||
, m_has_subtype(true)
|
||||
{}
|
||||
|
||||
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/byte_container_with_subtype/
|
||||
byte_container_with_subtype(container_type&& b, subtype_type subtype_) noexcept(noexcept(container_type(std::move(b))))
|
||||
: container_type(std::move(b))
|
||||
, m_subtype(subtype_)
|
||||
, m_has_subtype(true)
|
||||
{}
|
||||
|
||||
bool operator==(const byte_container_with_subtype& rhs) const
|
||||
{
|
||||
return std::tie(static_cast<const BinaryType&>(*this), m_subtype, m_has_subtype) ==
|
||||
std::tie(static_cast<const BinaryType&>(rhs), rhs.m_subtype, rhs.m_has_subtype);
|
||||
}
|
||||
|
||||
bool operator!=(const byte_container_with_subtype& rhs) const
|
||||
{
|
||||
return !(rhs == *this);
|
||||
}
|
||||
|
||||
/// @brief sets the binary subtype
|
||||
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/set_subtype/
|
||||
void set_subtype(subtype_type subtype_) noexcept
|
||||
{
|
||||
m_subtype = subtype_;
|
||||
m_has_subtype = true;
|
||||
}
|
||||
|
||||
/// @brief return the binary subtype
|
||||
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/subtype/
|
||||
constexpr subtype_type subtype() const noexcept
|
||||
{
|
||||
return m_has_subtype ? m_subtype : static_cast<subtype_type>(-1);
|
||||
}
|
||||
|
||||
/// @brief return whether the value has a subtype
|
||||
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/has_subtype/
|
||||
constexpr bool has_subtype() const noexcept
|
||||
{
|
||||
return m_has_subtype;
|
||||
}
|
||||
|
||||
/// @brief clears the binary subtype
|
||||
/// @sa https://json.nlohmann.me/api/byte_container_with_subtype/clear_subtype/
|
||||
void clear_subtype() noexcept
|
||||
{
|
||||
m_subtype = 0;
|
||||
m_has_subtype = false;
|
||||
}
|
||||
|
||||
private:
|
||||
subtype_type m_subtype = 0;
|
||||
bool m_has_subtype = false;
|
||||
};
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
100
sample/lib/json/include/nlohmann/detail/abi_macros.hpp
Normal file
100
sample/lib/json/include/nlohmann/detail/abi_macros.hpp
Normal file
@@ -0,0 +1,100 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
// This file contains all macro definitions affecting or depending on the ABI
|
||||
|
||||
#ifndef JSON_SKIP_LIBRARY_VERSION_CHECK
|
||||
#if defined(NLOHMANN_JSON_VERSION_MAJOR) && defined(NLOHMANN_JSON_VERSION_MINOR) && defined(NLOHMANN_JSON_VERSION_PATCH)
|
||||
#if NLOHMANN_JSON_VERSION_MAJOR != 3 || NLOHMANN_JSON_VERSION_MINOR != 11 || NLOHMANN_JSON_VERSION_PATCH != 3
|
||||
#warning "Already included a different version of the library!"
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define NLOHMANN_JSON_VERSION_MAJOR 3 // NOLINT(modernize-macro-to-enum)
|
||||
#define NLOHMANN_JSON_VERSION_MINOR 11 // NOLINT(modernize-macro-to-enum)
|
||||
#define NLOHMANN_JSON_VERSION_PATCH 3 // NOLINT(modernize-macro-to-enum)
|
||||
|
||||
#ifndef JSON_DIAGNOSTICS
|
||||
#define JSON_DIAGNOSTICS 0
|
||||
#endif
|
||||
|
||||
#ifndef JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON
|
||||
#define JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON 0
|
||||
#endif
|
||||
|
||||
#if JSON_DIAGNOSTICS
|
||||
#define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS _diag
|
||||
#else
|
||||
#define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS
|
||||
#endif
|
||||
|
||||
#if JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON
|
||||
#define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON _ldvcmp
|
||||
#else
|
||||
#define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON
|
||||
#endif
|
||||
|
||||
#ifndef NLOHMANN_JSON_NAMESPACE_NO_VERSION
|
||||
#define NLOHMANN_JSON_NAMESPACE_NO_VERSION 0
|
||||
#endif
|
||||
|
||||
// Construct the namespace ABI tags component
|
||||
#define NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b) json_abi ## a ## b
|
||||
#define NLOHMANN_JSON_ABI_TAGS_CONCAT(a, b) \
|
||||
NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b)
|
||||
|
||||
#define NLOHMANN_JSON_ABI_TAGS \
|
||||
NLOHMANN_JSON_ABI_TAGS_CONCAT( \
|
||||
NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS, \
|
||||
NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON)
|
||||
|
||||
// Construct the namespace version component
|
||||
#define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch) \
|
||||
_v ## major ## _ ## minor ## _ ## patch
|
||||
#define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(major, minor, patch) \
|
||||
NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch)
|
||||
|
||||
#if NLOHMANN_JSON_NAMESPACE_NO_VERSION
|
||||
#define NLOHMANN_JSON_NAMESPACE_VERSION
|
||||
#else
|
||||
#define NLOHMANN_JSON_NAMESPACE_VERSION \
|
||||
NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(NLOHMANN_JSON_VERSION_MAJOR, \
|
||||
NLOHMANN_JSON_VERSION_MINOR, \
|
||||
NLOHMANN_JSON_VERSION_PATCH)
|
||||
#endif
|
||||
|
||||
// Combine namespace components
|
||||
#define NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b) a ## b
|
||||
#define NLOHMANN_JSON_NAMESPACE_CONCAT(a, b) \
|
||||
NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b)
|
||||
|
||||
#ifndef NLOHMANN_JSON_NAMESPACE
|
||||
#define NLOHMANN_JSON_NAMESPACE \
|
||||
nlohmann::NLOHMANN_JSON_NAMESPACE_CONCAT( \
|
||||
NLOHMANN_JSON_ABI_TAGS, \
|
||||
NLOHMANN_JSON_NAMESPACE_VERSION)
|
||||
#endif
|
||||
|
||||
#ifndef NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
#define NLOHMANN_JSON_NAMESPACE_BEGIN \
|
||||
namespace nlohmann \
|
||||
{ \
|
||||
inline namespace NLOHMANN_JSON_NAMESPACE_CONCAT( \
|
||||
NLOHMANN_JSON_ABI_TAGS, \
|
||||
NLOHMANN_JSON_NAMESPACE_VERSION) \
|
||||
{
|
||||
#endif
|
||||
|
||||
#ifndef NLOHMANN_JSON_NAMESPACE_END
|
||||
#define NLOHMANN_JSON_NAMESPACE_END \
|
||||
} /* namespace (inline namespace) NOLINT(readability/namespace) */ \
|
||||
} // namespace nlohmann
|
||||
#endif
|
@@ -0,0 +1,497 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <algorithm> // transform
|
||||
#include <array> // array
|
||||
#include <forward_list> // forward_list
|
||||
#include <iterator> // inserter, front_inserter, end
|
||||
#include <map> // map
|
||||
#include <string> // string
|
||||
#include <tuple> // tuple, make_tuple
|
||||
#include <type_traits> // is_arithmetic, is_same, is_enum, underlying_type, is_convertible
|
||||
#include <unordered_map> // unordered_map
|
||||
#include <utility> // pair, declval
|
||||
#include <valarray> // valarray
|
||||
|
||||
#include <nlohmann/detail/exceptions.hpp>
|
||||
#include <nlohmann/detail/macro_scope.hpp>
|
||||
#include <nlohmann/detail/meta/cpp_future.hpp>
|
||||
#include <nlohmann/detail/meta/identity_tag.hpp>
|
||||
#include <nlohmann/detail/meta/std_fs.hpp>
|
||||
#include <nlohmann/detail/meta/type_traits.hpp>
|
||||
#include <nlohmann/detail/string_concat.hpp>
|
||||
#include <nlohmann/detail/value_t.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void from_json(const BasicJsonType& j, typename std::nullptr_t& n)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_null()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be null, but is ", j.type_name()), &j));
|
||||
}
|
||||
n = nullptr;
|
||||
}
|
||||
|
||||
// overloads for basic_json template parameters
|
||||
template < typename BasicJsonType, typename ArithmeticType,
|
||||
enable_if_t < std::is_arithmetic<ArithmeticType>::value&&
|
||||
!std::is_same<ArithmeticType, typename BasicJsonType::boolean_t>::value,
|
||||
int > = 0 >
|
||||
void get_arithmetic_value(const BasicJsonType& j, ArithmeticType& val)
|
||||
{
|
||||
switch (static_cast<value_t>(j))
|
||||
{
|
||||
case value_t::number_unsigned:
|
||||
{
|
||||
val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_unsigned_t*>());
|
||||
break;
|
||||
}
|
||||
case value_t::number_integer:
|
||||
{
|
||||
val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_integer_t*>());
|
||||
break;
|
||||
}
|
||||
case value_t::number_float:
|
||||
{
|
||||
val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_float_t*>());
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::null:
|
||||
case value_t::object:
|
||||
case value_t::array:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
JSON_THROW(type_error::create(302, concat("type must be number, but is ", j.type_name()), &j));
|
||||
}
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void from_json(const BasicJsonType& j, typename BasicJsonType::boolean_t& b)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_boolean()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be boolean, but is ", j.type_name()), &j));
|
||||
}
|
||||
b = *j.template get_ptr<const typename BasicJsonType::boolean_t*>();
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void from_json(const BasicJsonType& j, typename BasicJsonType::string_t& s)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_string()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be string, but is ", j.type_name()), &j));
|
||||
}
|
||||
s = *j.template get_ptr<const typename BasicJsonType::string_t*>();
|
||||
}
|
||||
|
||||
template <
|
||||
typename BasicJsonType, typename StringType,
|
||||
enable_if_t <
|
||||
std::is_assignable<StringType&, const typename BasicJsonType::string_t>::value
|
||||
&& is_detected_exact<typename BasicJsonType::string_t::value_type, value_type_t, StringType>::value
|
||||
&& !std::is_same<typename BasicJsonType::string_t, StringType>::value
|
||||
&& !is_json_ref<StringType>::value, int > = 0 >
|
||||
inline void from_json(const BasicJsonType& j, StringType& s)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_string()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be string, but is ", j.type_name()), &j));
|
||||
}
|
||||
|
||||
s = *j.template get_ptr<const typename BasicJsonType::string_t*>();
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void from_json(const BasicJsonType& j, typename BasicJsonType::number_float_t& val)
|
||||
{
|
||||
get_arithmetic_value(j, val);
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void from_json(const BasicJsonType& j, typename BasicJsonType::number_unsigned_t& val)
|
||||
{
|
||||
get_arithmetic_value(j, val);
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void from_json(const BasicJsonType& j, typename BasicJsonType::number_integer_t& val)
|
||||
{
|
||||
get_arithmetic_value(j, val);
|
||||
}
|
||||
|
||||
#if !JSON_DISABLE_ENUM_SERIALIZATION
|
||||
template<typename BasicJsonType, typename EnumType,
|
||||
enable_if_t<std::is_enum<EnumType>::value, int> = 0>
|
||||
inline void from_json(const BasicJsonType& j, EnumType& e)
|
||||
{
|
||||
typename std::underlying_type<EnumType>::type val;
|
||||
get_arithmetic_value(j, val);
|
||||
e = static_cast<EnumType>(val);
|
||||
}
|
||||
#endif // JSON_DISABLE_ENUM_SERIALIZATION
|
||||
|
||||
// forward_list doesn't have an insert method
|
||||
template<typename BasicJsonType, typename T, typename Allocator,
|
||||
enable_if_t<is_getable<BasicJsonType, T>::value, int> = 0>
|
||||
inline void from_json(const BasicJsonType& j, std::forward_list<T, Allocator>& l)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
|
||||
}
|
||||
l.clear();
|
||||
std::transform(j.rbegin(), j.rend(),
|
||||
std::front_inserter(l), [](const BasicJsonType & i)
|
||||
{
|
||||
return i.template get<T>();
|
||||
});
|
||||
}
|
||||
|
||||
// valarray doesn't have an insert method
|
||||
template<typename BasicJsonType, typename T,
|
||||
enable_if_t<is_getable<BasicJsonType, T>::value, int> = 0>
|
||||
inline void from_json(const BasicJsonType& j, std::valarray<T>& l)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
|
||||
}
|
||||
l.resize(j.size());
|
||||
std::transform(j.begin(), j.end(), std::begin(l),
|
||||
[](const BasicJsonType & elem)
|
||||
{
|
||||
return elem.template get<T>();
|
||||
});
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename T, std::size_t N>
|
||||
auto from_json(const BasicJsonType& j, T (&arr)[N]) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays)
|
||||
-> decltype(j.template get<T>(), void())
|
||||
{
|
||||
for (std::size_t i = 0; i < N; ++i)
|
||||
{
|
||||
arr[i] = j.at(i).template get<T>();
|
||||
}
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void from_json_array_impl(const BasicJsonType& j, typename BasicJsonType::array_t& arr, priority_tag<3> /*unused*/)
|
||||
{
|
||||
arr = *j.template get_ptr<const typename BasicJsonType::array_t*>();
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename T, std::size_t N>
|
||||
auto from_json_array_impl(const BasicJsonType& j, std::array<T, N>& arr,
|
||||
priority_tag<2> /*unused*/)
|
||||
-> decltype(j.template get<T>(), void())
|
||||
{
|
||||
for (std::size_t i = 0; i < N; ++i)
|
||||
{
|
||||
arr[i] = j.at(i).template get<T>();
|
||||
}
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename ConstructibleArrayType,
|
||||
enable_if_t<
|
||||
std::is_assignable<ConstructibleArrayType&, ConstructibleArrayType>::value,
|
||||
int> = 0>
|
||||
auto from_json_array_impl(const BasicJsonType& j, ConstructibleArrayType& arr, priority_tag<1> /*unused*/)
|
||||
-> decltype(
|
||||
arr.reserve(std::declval<typename ConstructibleArrayType::size_type>()),
|
||||
j.template get<typename ConstructibleArrayType::value_type>(),
|
||||
void())
|
||||
{
|
||||
using std::end;
|
||||
|
||||
ConstructibleArrayType ret;
|
||||
ret.reserve(j.size());
|
||||
std::transform(j.begin(), j.end(),
|
||||
std::inserter(ret, end(ret)), [](const BasicJsonType & i)
|
||||
{
|
||||
// get<BasicJsonType>() returns *this, this won't call a from_json
|
||||
// method when value_type is BasicJsonType
|
||||
return i.template get<typename ConstructibleArrayType::value_type>();
|
||||
});
|
||||
arr = std::move(ret);
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename ConstructibleArrayType,
|
||||
enable_if_t<
|
||||
std::is_assignable<ConstructibleArrayType&, ConstructibleArrayType>::value,
|
||||
int> = 0>
|
||||
inline void from_json_array_impl(const BasicJsonType& j, ConstructibleArrayType& arr,
|
||||
priority_tag<0> /*unused*/)
|
||||
{
|
||||
using std::end;
|
||||
|
||||
ConstructibleArrayType ret;
|
||||
std::transform(
|
||||
j.begin(), j.end(), std::inserter(ret, end(ret)),
|
||||
[](const BasicJsonType & i)
|
||||
{
|
||||
// get<BasicJsonType>() returns *this, this won't call a from_json
|
||||
// method when value_type is BasicJsonType
|
||||
return i.template get<typename ConstructibleArrayType::value_type>();
|
||||
});
|
||||
arr = std::move(ret);
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename ConstructibleArrayType,
|
||||
enable_if_t <
|
||||
is_constructible_array_type<BasicJsonType, ConstructibleArrayType>::value&&
|
||||
!is_constructible_object_type<BasicJsonType, ConstructibleArrayType>::value&&
|
||||
!is_constructible_string_type<BasicJsonType, ConstructibleArrayType>::value&&
|
||||
!std::is_same<ConstructibleArrayType, typename BasicJsonType::binary_t>::value&&
|
||||
!is_basic_json<ConstructibleArrayType>::value,
|
||||
int > = 0 >
|
||||
auto from_json(const BasicJsonType& j, ConstructibleArrayType& arr)
|
||||
-> decltype(from_json_array_impl(j, arr, priority_tag<3> {}),
|
||||
j.template get<typename ConstructibleArrayType::value_type>(),
|
||||
void())
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
|
||||
}
|
||||
|
||||
from_json_array_impl(j, arr, priority_tag<3> {});
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename T, std::size_t... Idx >
|
||||
std::array<T, sizeof...(Idx)> from_json_inplace_array_impl(BasicJsonType&& j,
|
||||
identity_tag<std::array<T, sizeof...(Idx)>> /*unused*/, index_sequence<Idx...> /*unused*/)
|
||||
{
|
||||
return { { std::forward<BasicJsonType>(j).at(Idx).template get<T>()... } };
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename T, std::size_t N >
|
||||
auto from_json(BasicJsonType&& j, identity_tag<std::array<T, N>> tag)
|
||||
-> decltype(from_json_inplace_array_impl(std::forward<BasicJsonType>(j), tag, make_index_sequence<N> {}))
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
|
||||
}
|
||||
|
||||
return from_json_inplace_array_impl(std::forward<BasicJsonType>(j), tag, make_index_sequence<N> {});
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void from_json(const BasicJsonType& j, typename BasicJsonType::binary_t& bin)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_binary()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be binary, but is ", j.type_name()), &j));
|
||||
}
|
||||
|
||||
bin = *j.template get_ptr<const typename BasicJsonType::binary_t*>();
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename ConstructibleObjectType,
|
||||
enable_if_t<is_constructible_object_type<BasicJsonType, ConstructibleObjectType>::value, int> = 0>
|
||||
inline void from_json(const BasicJsonType& j, ConstructibleObjectType& obj)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_object()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be object, but is ", j.type_name()), &j));
|
||||
}
|
||||
|
||||
ConstructibleObjectType ret;
|
||||
const auto* inner_object = j.template get_ptr<const typename BasicJsonType::object_t*>();
|
||||
using value_type = typename ConstructibleObjectType::value_type;
|
||||
std::transform(
|
||||
inner_object->begin(), inner_object->end(),
|
||||
std::inserter(ret, ret.begin()),
|
||||
[](typename BasicJsonType::object_t::value_type const & p)
|
||||
{
|
||||
return value_type(p.first, p.second.template get<typename ConstructibleObjectType::mapped_type>());
|
||||
});
|
||||
obj = std::move(ret);
|
||||
}
|
||||
|
||||
// overload for arithmetic types, not chosen for basic_json template arguments
|
||||
// (BooleanType, etc..); note: Is it really necessary to provide explicit
|
||||
// overloads for boolean_t etc. in case of a custom BooleanType which is not
|
||||
// an arithmetic type?
|
||||
template < typename BasicJsonType, typename ArithmeticType,
|
||||
enable_if_t <
|
||||
std::is_arithmetic<ArithmeticType>::value&&
|
||||
!std::is_same<ArithmeticType, typename BasicJsonType::number_unsigned_t>::value&&
|
||||
!std::is_same<ArithmeticType, typename BasicJsonType::number_integer_t>::value&&
|
||||
!std::is_same<ArithmeticType, typename BasicJsonType::number_float_t>::value&&
|
||||
!std::is_same<ArithmeticType, typename BasicJsonType::boolean_t>::value,
|
||||
int > = 0 >
|
||||
inline void from_json(const BasicJsonType& j, ArithmeticType& val)
|
||||
{
|
||||
switch (static_cast<value_t>(j))
|
||||
{
|
||||
case value_t::number_unsigned:
|
||||
{
|
||||
val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_unsigned_t*>());
|
||||
break;
|
||||
}
|
||||
case value_t::number_integer:
|
||||
{
|
||||
val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_integer_t*>());
|
||||
break;
|
||||
}
|
||||
case value_t::number_float:
|
||||
{
|
||||
val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_float_t*>());
|
||||
break;
|
||||
}
|
||||
case value_t::boolean:
|
||||
{
|
||||
val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::boolean_t*>());
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::null:
|
||||
case value_t::object:
|
||||
case value_t::array:
|
||||
case value_t::string:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
JSON_THROW(type_error::create(302, concat("type must be number, but is ", j.type_name()), &j));
|
||||
}
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename... Args, std::size_t... Idx>
|
||||
std::tuple<Args...> from_json_tuple_impl_base(BasicJsonType&& j, index_sequence<Idx...> /*unused*/)
|
||||
{
|
||||
return std::make_tuple(std::forward<BasicJsonType>(j).at(Idx).template get<Args>()...);
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, class A1, class A2 >
|
||||
std::pair<A1, A2> from_json_tuple_impl(BasicJsonType&& j, identity_tag<std::pair<A1, A2>> /*unused*/, priority_tag<0> /*unused*/)
|
||||
{
|
||||
return {std::forward<BasicJsonType>(j).at(0).template get<A1>(),
|
||||
std::forward<BasicJsonType>(j).at(1).template get<A2>()};
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename A1, typename A2>
|
||||
inline void from_json_tuple_impl(BasicJsonType&& j, std::pair<A1, A2>& p, priority_tag<1> /*unused*/)
|
||||
{
|
||||
p = from_json_tuple_impl(std::forward<BasicJsonType>(j), identity_tag<std::pair<A1, A2>> {}, priority_tag<0> {});
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename... Args>
|
||||
std::tuple<Args...> from_json_tuple_impl(BasicJsonType&& j, identity_tag<std::tuple<Args...>> /*unused*/, priority_tag<2> /*unused*/)
|
||||
{
|
||||
return from_json_tuple_impl_base<BasicJsonType, Args...>(std::forward<BasicJsonType>(j), index_sequence_for<Args...> {});
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename... Args>
|
||||
inline void from_json_tuple_impl(BasicJsonType&& j, std::tuple<Args...>& t, priority_tag<3> /*unused*/)
|
||||
{
|
||||
t = from_json_tuple_impl_base<BasicJsonType, Args...>(std::forward<BasicJsonType>(j), index_sequence_for<Args...> {});
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename TupleRelated>
|
||||
auto from_json(BasicJsonType&& j, TupleRelated&& t)
|
||||
-> decltype(from_json_tuple_impl(std::forward<BasicJsonType>(j), std::forward<TupleRelated>(t), priority_tag<3> {}))
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
|
||||
}
|
||||
|
||||
return from_json_tuple_impl(std::forward<BasicJsonType>(j), std::forward<TupleRelated>(t), priority_tag<3> {});
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename Key, typename Value, typename Compare, typename Allocator,
|
||||
typename = enable_if_t < !std::is_constructible <
|
||||
typename BasicJsonType::string_t, Key >::value >>
|
||||
inline void from_json(const BasicJsonType& j, std::map<Key, Value, Compare, Allocator>& m)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
|
||||
}
|
||||
m.clear();
|
||||
for (const auto& p : j)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!p.is_array()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be array, but is ", p.type_name()), &j));
|
||||
}
|
||||
m.emplace(p.at(0).template get<Key>(), p.at(1).template get<Value>());
|
||||
}
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename Key, typename Value, typename Hash, typename KeyEqual, typename Allocator,
|
||||
typename = enable_if_t < !std::is_constructible <
|
||||
typename BasicJsonType::string_t, Key >::value >>
|
||||
inline void from_json(const BasicJsonType& j, std::unordered_map<Key, Value, Hash, KeyEqual, Allocator>& m)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
|
||||
}
|
||||
m.clear();
|
||||
for (const auto& p : j)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!p.is_array()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be array, but is ", p.type_name()), &j));
|
||||
}
|
||||
m.emplace(p.at(0).template get<Key>(), p.at(1).template get<Value>());
|
||||
}
|
||||
}
|
||||
|
||||
#if JSON_HAS_FILESYSTEM || JSON_HAS_EXPERIMENTAL_FILESYSTEM
|
||||
template<typename BasicJsonType>
|
||||
inline void from_json(const BasicJsonType& j, std_fs::path& p)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!j.is_string()))
|
||||
{
|
||||
JSON_THROW(type_error::create(302, concat("type must be string, but is ", j.type_name()), &j));
|
||||
}
|
||||
p = *j.template get_ptr<const typename BasicJsonType::string_t*>();
|
||||
}
|
||||
#endif
|
||||
|
||||
struct from_json_fn
|
||||
{
|
||||
template<typename BasicJsonType, typename T>
|
||||
auto operator()(const BasicJsonType& j, T&& val) const
|
||||
noexcept(noexcept(from_json(j, std::forward<T>(val))))
|
||||
-> decltype(from_json(j, std::forward<T>(val)))
|
||||
{
|
||||
return from_json(j, std::forward<T>(val));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
#ifndef JSON_HAS_CPP_17
|
||||
/// namespace to hold default `from_json` function
|
||||
/// to see why this is required:
|
||||
/// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4381.html
|
||||
namespace // NOLINT(cert-dcl59-cpp,fuchsia-header-anon-namespaces,google-build-namespaces)
|
||||
{
|
||||
#endif
|
||||
JSON_INLINE_VARIABLE constexpr const auto& from_json = // NOLINT(misc-definitions-in-headers)
|
||||
detail::static_const<detail::from_json_fn>::value;
|
||||
#ifndef JSON_HAS_CPP_17
|
||||
} // namespace
|
||||
#endif
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
1118
sample/lib/json/include/nlohmann/detail/conversions/to_chars.hpp
Normal file
1118
sample/lib/json/include/nlohmann/detail/conversions/to_chars.hpp
Normal file
File diff suppressed because it is too large
Load Diff
447
sample/lib/json/include/nlohmann/detail/conversions/to_json.hpp
Normal file
447
sample/lib/json/include/nlohmann/detail/conversions/to_json.hpp
Normal file
@@ -0,0 +1,447 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <algorithm> // copy
|
||||
#include <iterator> // begin, end
|
||||
#include <string> // string
|
||||
#include <tuple> // tuple, get
|
||||
#include <type_traits> // is_same, is_constructible, is_floating_point, is_enum, underlying_type
|
||||
#include <utility> // move, forward, declval, pair
|
||||
#include <valarray> // valarray
|
||||
#include <vector> // vector
|
||||
|
||||
#include <nlohmann/detail/iterators/iteration_proxy.hpp>
|
||||
#include <nlohmann/detail/macro_scope.hpp>
|
||||
#include <nlohmann/detail/meta/cpp_future.hpp>
|
||||
#include <nlohmann/detail/meta/std_fs.hpp>
|
||||
#include <nlohmann/detail/meta/type_traits.hpp>
|
||||
#include <nlohmann/detail/value_t.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
//////////////////
|
||||
// constructors //
|
||||
//////////////////
|
||||
|
||||
/*
|
||||
* Note all external_constructor<>::construct functions need to call
|
||||
* j.m_data.m_value.destroy(j.m_data.m_type) to avoid a memory leak in case j contains an
|
||||
* allocated value (e.g., a string). See bug issue
|
||||
* https://github.com/nlohmann/json/issues/2865 for more information.
|
||||
*/
|
||||
|
||||
template<value_t> struct external_constructor;
|
||||
|
||||
template<>
|
||||
struct external_constructor<value_t::boolean>
|
||||
{
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, typename BasicJsonType::boolean_t b) noexcept
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::boolean;
|
||||
j.m_data.m_value = b;
|
||||
j.assert_invariant();
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct external_constructor<value_t::string>
|
||||
{
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, const typename BasicJsonType::string_t& s)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::string;
|
||||
j.m_data.m_value = s;
|
||||
j.assert_invariant();
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, typename BasicJsonType::string_t&& s)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::string;
|
||||
j.m_data.m_value = std::move(s);
|
||||
j.assert_invariant();
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename CompatibleStringType,
|
||||
enable_if_t < !std::is_same<CompatibleStringType, typename BasicJsonType::string_t>::value,
|
||||
int > = 0 >
|
||||
static void construct(BasicJsonType& j, const CompatibleStringType& str)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::string;
|
||||
j.m_data.m_value.string = j.template create<typename BasicJsonType::string_t>(str);
|
||||
j.assert_invariant();
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct external_constructor<value_t::binary>
|
||||
{
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, const typename BasicJsonType::binary_t& b)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::binary;
|
||||
j.m_data.m_value = typename BasicJsonType::binary_t(b);
|
||||
j.assert_invariant();
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, typename BasicJsonType::binary_t&& b)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::binary;
|
||||
j.m_data.m_value = typename BasicJsonType::binary_t(std::move(b));
|
||||
j.assert_invariant();
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct external_constructor<value_t::number_float>
|
||||
{
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, typename BasicJsonType::number_float_t val) noexcept
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::number_float;
|
||||
j.m_data.m_value = val;
|
||||
j.assert_invariant();
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct external_constructor<value_t::number_unsigned>
|
||||
{
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, typename BasicJsonType::number_unsigned_t val) noexcept
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::number_unsigned;
|
||||
j.m_data.m_value = val;
|
||||
j.assert_invariant();
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct external_constructor<value_t::number_integer>
|
||||
{
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, typename BasicJsonType::number_integer_t val) noexcept
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::number_integer;
|
||||
j.m_data.m_value = val;
|
||||
j.assert_invariant();
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct external_constructor<value_t::array>
|
||||
{
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, const typename BasicJsonType::array_t& arr)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::array;
|
||||
j.m_data.m_value = arr;
|
||||
j.set_parents();
|
||||
j.assert_invariant();
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, typename BasicJsonType::array_t&& arr)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::array;
|
||||
j.m_data.m_value = std::move(arr);
|
||||
j.set_parents();
|
||||
j.assert_invariant();
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename CompatibleArrayType,
|
||||
enable_if_t < !std::is_same<CompatibleArrayType, typename BasicJsonType::array_t>::value,
|
||||
int > = 0 >
|
||||
static void construct(BasicJsonType& j, const CompatibleArrayType& arr)
|
||||
{
|
||||
using std::begin;
|
||||
using std::end;
|
||||
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::array;
|
||||
j.m_data.m_value.array = j.template create<typename BasicJsonType::array_t>(begin(arr), end(arr));
|
||||
j.set_parents();
|
||||
j.assert_invariant();
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, const std::vector<bool>& arr)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::array;
|
||||
j.m_data.m_value = value_t::array;
|
||||
j.m_data.m_value.array->reserve(arr.size());
|
||||
for (const bool x : arr)
|
||||
{
|
||||
j.m_data.m_value.array->push_back(x);
|
||||
j.set_parent(j.m_data.m_value.array->back());
|
||||
}
|
||||
j.assert_invariant();
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename T,
|
||||
enable_if_t<std::is_convertible<T, BasicJsonType>::value, int> = 0>
|
||||
static void construct(BasicJsonType& j, const std::valarray<T>& arr)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::array;
|
||||
j.m_data.m_value = value_t::array;
|
||||
j.m_data.m_value.array->resize(arr.size());
|
||||
if (arr.size() > 0)
|
||||
{
|
||||
std::copy(std::begin(arr), std::end(arr), j.m_data.m_value.array->begin());
|
||||
}
|
||||
j.set_parents();
|
||||
j.assert_invariant();
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct external_constructor<value_t::object>
|
||||
{
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, const typename BasicJsonType::object_t& obj)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::object;
|
||||
j.m_data.m_value = obj;
|
||||
j.set_parents();
|
||||
j.assert_invariant();
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
static void construct(BasicJsonType& j, typename BasicJsonType::object_t&& obj)
|
||||
{
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::object;
|
||||
j.m_data.m_value = std::move(obj);
|
||||
j.set_parents();
|
||||
j.assert_invariant();
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename CompatibleObjectType,
|
||||
enable_if_t < !std::is_same<CompatibleObjectType, typename BasicJsonType::object_t>::value, int > = 0 >
|
||||
static void construct(BasicJsonType& j, const CompatibleObjectType& obj)
|
||||
{
|
||||
using std::begin;
|
||||
using std::end;
|
||||
|
||||
j.m_data.m_value.destroy(j.m_data.m_type);
|
||||
j.m_data.m_type = value_t::object;
|
||||
j.m_data.m_value.object = j.template create<typename BasicJsonType::object_t>(begin(obj), end(obj));
|
||||
j.set_parents();
|
||||
j.assert_invariant();
|
||||
}
|
||||
};
|
||||
|
||||
/////////////
|
||||
// to_json //
|
||||
/////////////
|
||||
|
||||
template<typename BasicJsonType, typename T,
|
||||
enable_if_t<std::is_same<T, typename BasicJsonType::boolean_t>::value, int> = 0>
|
||||
inline void to_json(BasicJsonType& j, T b) noexcept
|
||||
{
|
||||
external_constructor<value_t::boolean>::construct(j, b);
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename BoolRef,
|
||||
enable_if_t <
|
||||
((std::is_same<std::vector<bool>::reference, BoolRef>::value
|
||||
&& !std::is_same <std::vector<bool>::reference, typename BasicJsonType::boolean_t&>::value)
|
||||
|| (std::is_same<std::vector<bool>::const_reference, BoolRef>::value
|
||||
&& !std::is_same <detail::uncvref_t<std::vector<bool>::const_reference>,
|
||||
typename BasicJsonType::boolean_t >::value))
|
||||
&& std::is_convertible<const BoolRef&, typename BasicJsonType::boolean_t>::value, int > = 0 >
|
||||
inline void to_json(BasicJsonType& j, const BoolRef& b) noexcept
|
||||
{
|
||||
external_constructor<value_t::boolean>::construct(j, static_cast<typename BasicJsonType::boolean_t>(b));
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename CompatibleString,
|
||||
enable_if_t<std::is_constructible<typename BasicJsonType::string_t, CompatibleString>::value, int> = 0>
|
||||
inline void to_json(BasicJsonType& j, const CompatibleString& s)
|
||||
{
|
||||
external_constructor<value_t::string>::construct(j, s);
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void to_json(BasicJsonType& j, typename BasicJsonType::string_t&& s)
|
||||
{
|
||||
external_constructor<value_t::string>::construct(j, std::move(s));
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename FloatType,
|
||||
enable_if_t<std::is_floating_point<FloatType>::value, int> = 0>
|
||||
inline void to_json(BasicJsonType& j, FloatType val) noexcept
|
||||
{
|
||||
external_constructor<value_t::number_float>::construct(j, static_cast<typename BasicJsonType::number_float_t>(val));
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename CompatibleNumberUnsignedType,
|
||||
enable_if_t<is_compatible_integer_type<typename BasicJsonType::number_unsigned_t, CompatibleNumberUnsignedType>::value, int> = 0>
|
||||
inline void to_json(BasicJsonType& j, CompatibleNumberUnsignedType val) noexcept
|
||||
{
|
||||
external_constructor<value_t::number_unsigned>::construct(j, static_cast<typename BasicJsonType::number_unsigned_t>(val));
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename CompatibleNumberIntegerType,
|
||||
enable_if_t<is_compatible_integer_type<typename BasicJsonType::number_integer_t, CompatibleNumberIntegerType>::value, int> = 0>
|
||||
inline void to_json(BasicJsonType& j, CompatibleNumberIntegerType val) noexcept
|
||||
{
|
||||
external_constructor<value_t::number_integer>::construct(j, static_cast<typename BasicJsonType::number_integer_t>(val));
|
||||
}
|
||||
|
||||
#if !JSON_DISABLE_ENUM_SERIALIZATION
|
||||
template<typename BasicJsonType, typename EnumType,
|
||||
enable_if_t<std::is_enum<EnumType>::value, int> = 0>
|
||||
inline void to_json(BasicJsonType& j, EnumType e) noexcept
|
||||
{
|
||||
using underlying_type = typename std::underlying_type<EnumType>::type;
|
||||
static constexpr value_t integral_value_t = std::is_unsigned<underlying_type>::value ? value_t::number_unsigned : value_t::number_integer;
|
||||
external_constructor<integral_value_t>::construct(j, static_cast<underlying_type>(e));
|
||||
}
|
||||
#endif // JSON_DISABLE_ENUM_SERIALIZATION
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void to_json(BasicJsonType& j, const std::vector<bool>& e)
|
||||
{
|
||||
external_constructor<value_t::array>::construct(j, e);
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename CompatibleArrayType,
|
||||
enable_if_t < is_compatible_array_type<BasicJsonType,
|
||||
CompatibleArrayType>::value&&
|
||||
!is_compatible_object_type<BasicJsonType, CompatibleArrayType>::value&&
|
||||
!is_compatible_string_type<BasicJsonType, CompatibleArrayType>::value&&
|
||||
!std::is_same<typename BasicJsonType::binary_t, CompatibleArrayType>::value&&
|
||||
!is_basic_json<CompatibleArrayType>::value,
|
||||
int > = 0 >
|
||||
inline void to_json(BasicJsonType& j, const CompatibleArrayType& arr)
|
||||
{
|
||||
external_constructor<value_t::array>::construct(j, arr);
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void to_json(BasicJsonType& j, const typename BasicJsonType::binary_t& bin)
|
||||
{
|
||||
external_constructor<value_t::binary>::construct(j, bin);
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename T,
|
||||
enable_if_t<std::is_convertible<T, BasicJsonType>::value, int> = 0>
|
||||
inline void to_json(BasicJsonType& j, const std::valarray<T>& arr)
|
||||
{
|
||||
external_constructor<value_t::array>::construct(j, std::move(arr));
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void to_json(BasicJsonType& j, typename BasicJsonType::array_t&& arr)
|
||||
{
|
||||
external_constructor<value_t::array>::construct(j, std::move(arr));
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename CompatibleObjectType,
|
||||
enable_if_t < is_compatible_object_type<BasicJsonType, CompatibleObjectType>::value&& !is_basic_json<CompatibleObjectType>::value, int > = 0 >
|
||||
inline void to_json(BasicJsonType& j, const CompatibleObjectType& obj)
|
||||
{
|
||||
external_constructor<value_t::object>::construct(j, obj);
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
inline void to_json(BasicJsonType& j, typename BasicJsonType::object_t&& obj)
|
||||
{
|
||||
external_constructor<value_t::object>::construct(j, std::move(obj));
|
||||
}
|
||||
|
||||
template <
|
||||
typename BasicJsonType, typename T, std::size_t N,
|
||||
enable_if_t < !std::is_constructible<typename BasicJsonType::string_t,
|
||||
const T(&)[N]>::value, // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays)
|
||||
int > = 0 >
|
||||
inline void to_json(BasicJsonType& j, const T(&arr)[N]) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays)
|
||||
{
|
||||
external_constructor<value_t::array>::construct(j, arr);
|
||||
}
|
||||
|
||||
template < typename BasicJsonType, typename T1, typename T2, enable_if_t < std::is_constructible<BasicJsonType, T1>::value&& std::is_constructible<BasicJsonType, T2>::value, int > = 0 >
|
||||
inline void to_json(BasicJsonType& j, const std::pair<T1, T2>& p)
|
||||
{
|
||||
j = { p.first, p.second };
|
||||
}
|
||||
|
||||
// for https://github.com/nlohmann/json/pull/1134
|
||||
template<typename BasicJsonType, typename T,
|
||||
enable_if_t<std::is_same<T, iteration_proxy_value<typename BasicJsonType::iterator>>::value, int> = 0>
|
||||
inline void to_json(BasicJsonType& j, const T& b)
|
||||
{
|
||||
j = { {b.key(), b.value()} };
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename Tuple, std::size_t... Idx>
|
||||
inline void to_json_tuple_impl(BasicJsonType& j, const Tuple& t, index_sequence<Idx...> /*unused*/)
|
||||
{
|
||||
j = { std::get<Idx>(t)... };
|
||||
}
|
||||
|
||||
template<typename BasicJsonType, typename T, enable_if_t<is_constructible_tuple<BasicJsonType, T>::value, int > = 0>
|
||||
inline void to_json(BasicJsonType& j, const T& t)
|
||||
{
|
||||
to_json_tuple_impl(j, t, make_index_sequence<std::tuple_size<T>::value> {});
|
||||
}
|
||||
|
||||
#if JSON_HAS_FILESYSTEM || JSON_HAS_EXPERIMENTAL_FILESYSTEM
|
||||
template<typename BasicJsonType>
|
||||
inline void to_json(BasicJsonType& j, const std_fs::path& p)
|
||||
{
|
||||
j = p.string();
|
||||
}
|
||||
#endif
|
||||
|
||||
struct to_json_fn
|
||||
{
|
||||
template<typename BasicJsonType, typename T>
|
||||
auto operator()(BasicJsonType& j, T&& val) const noexcept(noexcept(to_json(j, std::forward<T>(val))))
|
||||
-> decltype(to_json(j, std::forward<T>(val)), void())
|
||||
{
|
||||
return to_json(j, std::forward<T>(val));
|
||||
}
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
#ifndef JSON_HAS_CPP_17
|
||||
/// namespace to hold default `to_json` function
|
||||
/// to see why this is required:
|
||||
/// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4381.html
|
||||
namespace // NOLINT(cert-dcl59-cpp,fuchsia-header-anon-namespaces,google-build-namespaces)
|
||||
{
|
||||
#endif
|
||||
JSON_INLINE_VARIABLE constexpr const auto& to_json = // NOLINT(misc-definitions-in-headers)
|
||||
detail::static_const<detail::to_json_fn>::value;
|
||||
#ifndef JSON_HAS_CPP_17
|
||||
} // namespace
|
||||
#endif
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
257
sample/lib/json/include/nlohmann/detail/exceptions.hpp
Normal file
257
sample/lib/json/include/nlohmann/detail/exceptions.hpp
Normal file
@@ -0,0 +1,257 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef> // nullptr_t
|
||||
#include <exception> // exception
|
||||
#if JSON_DIAGNOSTICS
|
||||
#include <numeric> // accumulate
|
||||
#endif
|
||||
#include <stdexcept> // runtime_error
|
||||
#include <string> // to_string
|
||||
#include <vector> // vector
|
||||
|
||||
#include <nlohmann/detail/value_t.hpp>
|
||||
#include <nlohmann/detail/string_escape.hpp>
|
||||
#include <nlohmann/detail/input/position_t.hpp>
|
||||
#include <nlohmann/detail/macro_scope.hpp>
|
||||
#include <nlohmann/detail/meta/cpp_future.hpp>
|
||||
#include <nlohmann/detail/meta/type_traits.hpp>
|
||||
#include <nlohmann/detail/string_concat.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
////////////////
|
||||
// exceptions //
|
||||
////////////////
|
||||
|
||||
/// @brief general exception of the @ref basic_json class
|
||||
/// @sa https://json.nlohmann.me/api/basic_json/exception/
|
||||
class exception : public std::exception
|
||||
{
|
||||
public:
|
||||
/// returns the explanatory string
|
||||
const char* what() const noexcept override
|
||||
{
|
||||
return m.what();
|
||||
}
|
||||
|
||||
/// the id of the exception
|
||||
const int id; // NOLINT(cppcoreguidelines-non-private-member-variables-in-classes)
|
||||
|
||||
protected:
|
||||
JSON_HEDLEY_NON_NULL(3)
|
||||
exception(int id_, const char* what_arg) : id(id_), m(what_arg) {} // NOLINT(bugprone-throw-keyword-missing)
|
||||
|
||||
static std::string name(const std::string& ename, int id_)
|
||||
{
|
||||
return concat("[json.exception.", ename, '.', std::to_string(id_), "] ");
|
||||
}
|
||||
|
||||
static std::string diagnostics(std::nullptr_t /*leaf_element*/)
|
||||
{
|
||||
return "";
|
||||
}
|
||||
|
||||
template<typename BasicJsonType>
|
||||
static std::string diagnostics(const BasicJsonType* leaf_element)
|
||||
{
|
||||
#if JSON_DIAGNOSTICS
|
||||
std::vector<std::string> tokens;
|
||||
for (const auto* current = leaf_element; current != nullptr && current->m_parent != nullptr; current = current->m_parent)
|
||||
{
|
||||
switch (current->m_parent->type())
|
||||
{
|
||||
case value_t::array:
|
||||
{
|
||||
for (std::size_t i = 0; i < current->m_parent->m_data.m_value.array->size(); ++i)
|
||||
{
|
||||
if (¤t->m_parent->m_data.m_value.array->operator[](i) == current)
|
||||
{
|
||||
tokens.emplace_back(std::to_string(i));
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::object:
|
||||
{
|
||||
for (const auto& element : *current->m_parent->m_data.m_value.object)
|
||||
{
|
||||
if (&element.second == current)
|
||||
{
|
||||
tokens.emplace_back(element.first.c_str());
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::null: // LCOV_EXCL_LINE
|
||||
case value_t::string: // LCOV_EXCL_LINE
|
||||
case value_t::boolean: // LCOV_EXCL_LINE
|
||||
case value_t::number_integer: // LCOV_EXCL_LINE
|
||||
case value_t::number_unsigned: // LCOV_EXCL_LINE
|
||||
case value_t::number_float: // LCOV_EXCL_LINE
|
||||
case value_t::binary: // LCOV_EXCL_LINE
|
||||
case value_t::discarded: // LCOV_EXCL_LINE
|
||||
default: // LCOV_EXCL_LINE
|
||||
break; // LCOV_EXCL_LINE
|
||||
}
|
||||
}
|
||||
|
||||
if (tokens.empty())
|
||||
{
|
||||
return "";
|
||||
}
|
||||
|
||||
auto str = std::accumulate(tokens.rbegin(), tokens.rend(), std::string{},
|
||||
[](const std::string & a, const std::string & b)
|
||||
{
|
||||
return concat(a, '/', detail::escape(b));
|
||||
});
|
||||
return concat('(', str, ") ");
|
||||
#else
|
||||
static_cast<void>(leaf_element);
|
||||
return "";
|
||||
#endif
|
||||
}
|
||||
|
||||
private:
|
||||
/// an exception object as storage for error messages
|
||||
std::runtime_error m;
|
||||
};
|
||||
|
||||
/// @brief exception indicating a parse error
|
||||
/// @sa https://json.nlohmann.me/api/basic_json/parse_error/
|
||||
class parse_error : public exception
|
||||
{
|
||||
public:
|
||||
/*!
|
||||
@brief create a parse error exception
|
||||
@param[in] id_ the id of the exception
|
||||
@param[in] pos the position where the error occurred (or with
|
||||
chars_read_total=0 if the position cannot be
|
||||
determined)
|
||||
@param[in] what_arg the explanatory string
|
||||
@return parse_error object
|
||||
*/
|
||||
template<typename BasicJsonContext, enable_if_t<is_basic_json_context<BasicJsonContext>::value, int> = 0>
|
||||
static parse_error create(int id_, const position_t& pos, const std::string& what_arg, BasicJsonContext context)
|
||||
{
|
||||
const std::string w = concat(exception::name("parse_error", id_), "parse error",
|
||||
position_string(pos), ": ", exception::diagnostics(context), what_arg);
|
||||
return {id_, pos.chars_read_total, w.c_str()};
|
||||
}
|
||||
|
||||
template<typename BasicJsonContext, enable_if_t<is_basic_json_context<BasicJsonContext>::value, int> = 0>
|
||||
static parse_error create(int id_, std::size_t byte_, const std::string& what_arg, BasicJsonContext context)
|
||||
{
|
||||
const std::string w = concat(exception::name("parse_error", id_), "parse error",
|
||||
(byte_ != 0 ? (concat(" at byte ", std::to_string(byte_))) : ""),
|
||||
": ", exception::diagnostics(context), what_arg);
|
||||
return {id_, byte_, w.c_str()};
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief byte index of the parse error
|
||||
|
||||
The byte index of the last read character in the input file.
|
||||
|
||||
@note For an input with n bytes, 1 is the index of the first character and
|
||||
n+1 is the index of the terminating null byte or the end of file.
|
||||
This also holds true when reading a byte vector (CBOR or MessagePack).
|
||||
*/
|
||||
const std::size_t byte;
|
||||
|
||||
private:
|
||||
parse_error(int id_, std::size_t byte_, const char* what_arg)
|
||||
: exception(id_, what_arg), byte(byte_) {}
|
||||
|
||||
static std::string position_string(const position_t& pos)
|
||||
{
|
||||
return concat(" at line ", std::to_string(pos.lines_read + 1),
|
||||
", column ", std::to_string(pos.chars_read_current_line));
|
||||
}
|
||||
};
|
||||
|
||||
/// @brief exception indicating errors with iterators
|
||||
/// @sa https://json.nlohmann.me/api/basic_json/invalid_iterator/
|
||||
class invalid_iterator : public exception
|
||||
{
|
||||
public:
|
||||
template<typename BasicJsonContext, enable_if_t<is_basic_json_context<BasicJsonContext>::value, int> = 0>
|
||||
static invalid_iterator create(int id_, const std::string& what_arg, BasicJsonContext context)
|
||||
{
|
||||
const std::string w = concat(exception::name("invalid_iterator", id_), exception::diagnostics(context), what_arg);
|
||||
return {id_, w.c_str()};
|
||||
}
|
||||
|
||||
private:
|
||||
JSON_HEDLEY_NON_NULL(3)
|
||||
invalid_iterator(int id_, const char* what_arg)
|
||||
: exception(id_, what_arg) {}
|
||||
};
|
||||
|
||||
/// @brief exception indicating executing a member function with a wrong type
|
||||
/// @sa https://json.nlohmann.me/api/basic_json/type_error/
|
||||
class type_error : public exception
|
||||
{
|
||||
public:
|
||||
template<typename BasicJsonContext, enable_if_t<is_basic_json_context<BasicJsonContext>::value, int> = 0>
|
||||
static type_error create(int id_, const std::string& what_arg, BasicJsonContext context)
|
||||
{
|
||||
const std::string w = concat(exception::name("type_error", id_), exception::diagnostics(context), what_arg);
|
||||
return {id_, w.c_str()};
|
||||
}
|
||||
|
||||
private:
|
||||
JSON_HEDLEY_NON_NULL(3)
|
||||
type_error(int id_, const char* what_arg) : exception(id_, what_arg) {}
|
||||
};
|
||||
|
||||
/// @brief exception indicating access out of the defined range
|
||||
/// @sa https://json.nlohmann.me/api/basic_json/out_of_range/
|
||||
class out_of_range : public exception
|
||||
{
|
||||
public:
|
||||
template<typename BasicJsonContext, enable_if_t<is_basic_json_context<BasicJsonContext>::value, int> = 0>
|
||||
static out_of_range create(int id_, const std::string& what_arg, BasicJsonContext context)
|
||||
{
|
||||
const std::string w = concat(exception::name("out_of_range", id_), exception::diagnostics(context), what_arg);
|
||||
return {id_, w.c_str()};
|
||||
}
|
||||
|
||||
private:
|
||||
JSON_HEDLEY_NON_NULL(3)
|
||||
out_of_range(int id_, const char* what_arg) : exception(id_, what_arg) {}
|
||||
};
|
||||
|
||||
/// @brief exception indicating other library errors
|
||||
/// @sa https://json.nlohmann.me/api/basic_json/other_error/
|
||||
class other_error : public exception
|
||||
{
|
||||
public:
|
||||
template<typename BasicJsonContext, enable_if_t<is_basic_json_context<BasicJsonContext>::value, int> = 0>
|
||||
static other_error create(int id_, const std::string& what_arg, BasicJsonContext context)
|
||||
{
|
||||
const std::string w = concat(exception::name("other_error", id_), exception::diagnostics(context), what_arg);
|
||||
return {id_, w.c_str()};
|
||||
}
|
||||
|
||||
private:
|
||||
JSON_HEDLEY_NON_NULL(3)
|
||||
other_error(int id_, const char* what_arg) : exception(id_, what_arg) {}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
129
sample/lib/json/include/nlohmann/detail/hash.hpp
Normal file
129
sample/lib/json/include/nlohmann/detail/hash.hpp
Normal file
@@ -0,0 +1,129 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint> // uint8_t
|
||||
#include <cstddef> // size_t
|
||||
#include <functional> // hash
|
||||
|
||||
#include <nlohmann/detail/abi_macros.hpp>
|
||||
#include <nlohmann/detail/value_t.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
// boost::hash_combine
|
||||
inline std::size_t combine(std::size_t seed, std::size_t h) noexcept
|
||||
{
|
||||
seed ^= h + 0x9e3779b9 + (seed << 6U) + (seed >> 2U);
|
||||
return seed;
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief hash a JSON value
|
||||
|
||||
The hash function tries to rely on std::hash where possible. Furthermore, the
|
||||
type of the JSON value is taken into account to have different hash values for
|
||||
null, 0, 0U, and false, etc.
|
||||
|
||||
@tparam BasicJsonType basic_json specialization
|
||||
@param j JSON value to hash
|
||||
@return hash value of j
|
||||
*/
|
||||
template<typename BasicJsonType>
|
||||
std::size_t hash(const BasicJsonType& j)
|
||||
{
|
||||
using string_t = typename BasicJsonType::string_t;
|
||||
using number_integer_t = typename BasicJsonType::number_integer_t;
|
||||
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
|
||||
using number_float_t = typename BasicJsonType::number_float_t;
|
||||
|
||||
const auto type = static_cast<std::size_t>(j.type());
|
||||
switch (j.type())
|
||||
{
|
||||
case BasicJsonType::value_t::null:
|
||||
case BasicJsonType::value_t::discarded:
|
||||
{
|
||||
return combine(type, 0);
|
||||
}
|
||||
|
||||
case BasicJsonType::value_t::object:
|
||||
{
|
||||
auto seed = combine(type, j.size());
|
||||
for (const auto& element : j.items())
|
||||
{
|
||||
const auto h = std::hash<string_t> {}(element.key());
|
||||
seed = combine(seed, h);
|
||||
seed = combine(seed, hash(element.value()));
|
||||
}
|
||||
return seed;
|
||||
}
|
||||
|
||||
case BasicJsonType::value_t::array:
|
||||
{
|
||||
auto seed = combine(type, j.size());
|
||||
for (const auto& element : j)
|
||||
{
|
||||
seed = combine(seed, hash(element));
|
||||
}
|
||||
return seed;
|
||||
}
|
||||
|
||||
case BasicJsonType::value_t::string:
|
||||
{
|
||||
const auto h = std::hash<string_t> {}(j.template get_ref<const string_t&>());
|
||||
return combine(type, h);
|
||||
}
|
||||
|
||||
case BasicJsonType::value_t::boolean:
|
||||
{
|
||||
const auto h = std::hash<bool> {}(j.template get<bool>());
|
||||
return combine(type, h);
|
||||
}
|
||||
|
||||
case BasicJsonType::value_t::number_integer:
|
||||
{
|
||||
const auto h = std::hash<number_integer_t> {}(j.template get<number_integer_t>());
|
||||
return combine(type, h);
|
||||
}
|
||||
|
||||
case BasicJsonType::value_t::number_unsigned:
|
||||
{
|
||||
const auto h = std::hash<number_unsigned_t> {}(j.template get<number_unsigned_t>());
|
||||
return combine(type, h);
|
||||
}
|
||||
|
||||
case BasicJsonType::value_t::number_float:
|
||||
{
|
||||
const auto h = std::hash<number_float_t> {}(j.template get<number_float_t>());
|
||||
return combine(type, h);
|
||||
}
|
||||
|
||||
case BasicJsonType::value_t::binary:
|
||||
{
|
||||
auto seed = combine(type, j.get_binary().size());
|
||||
const auto h = std::hash<bool> {}(j.get_binary().has_subtype());
|
||||
seed = combine(seed, h);
|
||||
seed = combine(seed, static_cast<std::size_t>(j.get_binary().subtype()));
|
||||
for (const auto byte : j.get_binary())
|
||||
{
|
||||
seed = combine(seed, std::hash<std::uint8_t> {}(byte));
|
||||
}
|
||||
return seed;
|
||||
}
|
||||
|
||||
default: // LCOV_EXCL_LINE
|
||||
JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE
|
||||
return 0; // LCOV_EXCL_LINE
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
3009
sample/lib/json/include/nlohmann/detail/input/binary_reader.hpp
Normal file
3009
sample/lib/json/include/nlohmann/detail/input/binary_reader.hpp
Normal file
File diff suppressed because it is too large
Load Diff
492
sample/lib/json/include/nlohmann/detail/input/input_adapters.hpp
Normal file
492
sample/lib/json/include/nlohmann/detail/input/input_adapters.hpp
Normal file
@@ -0,0 +1,492 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array> // array
|
||||
#include <cstddef> // size_t
|
||||
#include <cstring> // strlen
|
||||
#include <iterator> // begin, end, iterator_traits, random_access_iterator_tag, distance, next
|
||||
#include <memory> // shared_ptr, make_shared, addressof
|
||||
#include <numeric> // accumulate
|
||||
#include <string> // string, char_traits
|
||||
#include <type_traits> // enable_if, is_base_of, is_pointer, is_integral, remove_pointer
|
||||
#include <utility> // pair, declval
|
||||
|
||||
#ifndef JSON_NO_IO
|
||||
#include <cstdio> // FILE *
|
||||
#include <istream> // istream
|
||||
#endif // JSON_NO_IO
|
||||
|
||||
#include <nlohmann/detail/iterators/iterator_traits.hpp>
|
||||
#include <nlohmann/detail/macro_scope.hpp>
|
||||
#include <nlohmann/detail/meta/type_traits.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
/// the supported input formats
|
||||
enum class input_format_t { json, cbor, msgpack, ubjson, bson, bjdata };
|
||||
|
||||
////////////////////
|
||||
// input adapters //
|
||||
////////////////////
|
||||
|
||||
#ifndef JSON_NO_IO
|
||||
/*!
|
||||
Input adapter for stdio file access. This adapter read only 1 byte and do not use any
|
||||
buffer. This adapter is a very low level adapter.
|
||||
*/
|
||||
class file_input_adapter
|
||||
{
|
||||
public:
|
||||
using char_type = char;
|
||||
|
||||
JSON_HEDLEY_NON_NULL(2)
|
||||
explicit file_input_adapter(std::FILE* f) noexcept
|
||||
: m_file(f)
|
||||
{
|
||||
JSON_ASSERT(m_file != nullptr);
|
||||
}
|
||||
|
||||
// make class move-only
|
||||
file_input_adapter(const file_input_adapter&) = delete;
|
||||
file_input_adapter(file_input_adapter&&) noexcept = default;
|
||||
file_input_adapter& operator=(const file_input_adapter&) = delete;
|
||||
file_input_adapter& operator=(file_input_adapter&&) = delete;
|
||||
~file_input_adapter() = default;
|
||||
|
||||
std::char_traits<char>::int_type get_character() noexcept
|
||||
{
|
||||
return std::fgetc(m_file);
|
||||
}
|
||||
|
||||
private:
|
||||
/// the file pointer to read from
|
||||
std::FILE* m_file;
|
||||
};
|
||||
|
||||
/*!
|
||||
Input adapter for a (caching) istream. Ignores a UFT Byte Order Mark at
|
||||
beginning of input. Does not support changing the underlying std::streambuf
|
||||
in mid-input. Maintains underlying std::istream and std::streambuf to support
|
||||
subsequent use of standard std::istream operations to process any input
|
||||
characters following those used in parsing the JSON input. Clears the
|
||||
std::istream flags; any input errors (e.g., EOF) will be detected by the first
|
||||
subsequent call for input from the std::istream.
|
||||
*/
|
||||
class input_stream_adapter
|
||||
{
|
||||
public:
|
||||
using char_type = char;
|
||||
|
||||
~input_stream_adapter()
|
||||
{
|
||||
// clear stream flags; we use underlying streambuf I/O, do not
|
||||
// maintain ifstream flags, except eof
|
||||
if (is != nullptr)
|
||||
{
|
||||
is->clear(is->rdstate() & std::ios::eofbit);
|
||||
}
|
||||
}
|
||||
|
||||
explicit input_stream_adapter(std::istream& i)
|
||||
: is(&i), sb(i.rdbuf())
|
||||
{}
|
||||
|
||||
// delete because of pointer members
|
||||
input_stream_adapter(const input_stream_adapter&) = delete;
|
||||
input_stream_adapter& operator=(input_stream_adapter&) = delete;
|
||||
input_stream_adapter& operator=(input_stream_adapter&&) = delete;
|
||||
|
||||
input_stream_adapter(input_stream_adapter&& rhs) noexcept
|
||||
: is(rhs.is), sb(rhs.sb)
|
||||
{
|
||||
rhs.is = nullptr;
|
||||
rhs.sb = nullptr;
|
||||
}
|
||||
|
||||
// std::istream/std::streambuf use std::char_traits<char>::to_int_type, to
|
||||
// ensure that std::char_traits<char>::eof() and the character 0xFF do not
|
||||
// end up as the same value, e.g. 0xFFFFFFFF.
|
||||
std::char_traits<char>::int_type get_character()
|
||||
{
|
||||
auto res = sb->sbumpc();
|
||||
// set eof manually, as we don't use the istream interface.
|
||||
if (JSON_HEDLEY_UNLIKELY(res == std::char_traits<char>::eof()))
|
||||
{
|
||||
is->clear(is->rdstate() | std::ios::eofbit);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
private:
|
||||
/// the associated input stream
|
||||
std::istream* is = nullptr;
|
||||
std::streambuf* sb = nullptr;
|
||||
};
|
||||
#endif // JSON_NO_IO
|
||||
|
||||
// General-purpose iterator-based adapter. It might not be as fast as
|
||||
// theoretically possible for some containers, but it is extremely versatile.
|
||||
template<typename IteratorType>
|
||||
class iterator_input_adapter
|
||||
{
|
||||
public:
|
||||
using char_type = typename std::iterator_traits<IteratorType>::value_type;
|
||||
|
||||
iterator_input_adapter(IteratorType first, IteratorType last)
|
||||
: current(std::move(first)), end(std::move(last))
|
||||
{}
|
||||
|
||||
typename char_traits<char_type>::int_type get_character()
|
||||
{
|
||||
if (JSON_HEDLEY_LIKELY(current != end))
|
||||
{
|
||||
auto result = char_traits<char_type>::to_int_type(*current);
|
||||
std::advance(current, 1);
|
||||
return result;
|
||||
}
|
||||
|
||||
return char_traits<char_type>::eof();
|
||||
}
|
||||
|
||||
private:
|
||||
IteratorType current;
|
||||
IteratorType end;
|
||||
|
||||
template<typename BaseInputAdapter, size_t T>
|
||||
friend struct wide_string_input_helper;
|
||||
|
||||
bool empty() const
|
||||
{
|
||||
return current == end;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename BaseInputAdapter, size_t T>
|
||||
struct wide_string_input_helper;
|
||||
|
||||
template<typename BaseInputAdapter>
|
||||
struct wide_string_input_helper<BaseInputAdapter, 4>
|
||||
{
|
||||
// UTF-32
|
||||
static void fill_buffer(BaseInputAdapter& input,
|
||||
std::array<std::char_traits<char>::int_type, 4>& utf8_bytes,
|
||||
size_t& utf8_bytes_index,
|
||||
size_t& utf8_bytes_filled)
|
||||
{
|
||||
utf8_bytes_index = 0;
|
||||
|
||||
if (JSON_HEDLEY_UNLIKELY(input.empty()))
|
||||
{
|
||||
utf8_bytes[0] = std::char_traits<char>::eof();
|
||||
utf8_bytes_filled = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
// get the current character
|
||||
const auto wc = input.get_character();
|
||||
|
||||
// UTF-32 to UTF-8 encoding
|
||||
if (wc < 0x80)
|
||||
{
|
||||
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(wc);
|
||||
utf8_bytes_filled = 1;
|
||||
}
|
||||
else if (wc <= 0x7FF)
|
||||
{
|
||||
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xC0u | ((static_cast<unsigned int>(wc) >> 6u) & 0x1Fu));
|
||||
utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
|
||||
utf8_bytes_filled = 2;
|
||||
}
|
||||
else if (wc <= 0xFFFF)
|
||||
{
|
||||
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xE0u | ((static_cast<unsigned int>(wc) >> 12u) & 0x0Fu));
|
||||
utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | ((static_cast<unsigned int>(wc) >> 6u) & 0x3Fu));
|
||||
utf8_bytes[2] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
|
||||
utf8_bytes_filled = 3;
|
||||
}
|
||||
else if (wc <= 0x10FFFF)
|
||||
{
|
||||
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xF0u | ((static_cast<unsigned int>(wc) >> 18u) & 0x07u));
|
||||
utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | ((static_cast<unsigned int>(wc) >> 12u) & 0x3Fu));
|
||||
utf8_bytes[2] = static_cast<std::char_traits<char>::int_type>(0x80u | ((static_cast<unsigned int>(wc) >> 6u) & 0x3Fu));
|
||||
utf8_bytes[3] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
|
||||
utf8_bytes_filled = 4;
|
||||
}
|
||||
else
|
||||
{
|
||||
// unknown character
|
||||
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(wc);
|
||||
utf8_bytes_filled = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template<typename BaseInputAdapter>
|
||||
struct wide_string_input_helper<BaseInputAdapter, 2>
|
||||
{
|
||||
// UTF-16
|
||||
static void fill_buffer(BaseInputAdapter& input,
|
||||
std::array<std::char_traits<char>::int_type, 4>& utf8_bytes,
|
||||
size_t& utf8_bytes_index,
|
||||
size_t& utf8_bytes_filled)
|
||||
{
|
||||
utf8_bytes_index = 0;
|
||||
|
||||
if (JSON_HEDLEY_UNLIKELY(input.empty()))
|
||||
{
|
||||
utf8_bytes[0] = std::char_traits<char>::eof();
|
||||
utf8_bytes_filled = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
// get the current character
|
||||
const auto wc = input.get_character();
|
||||
|
||||
// UTF-16 to UTF-8 encoding
|
||||
if (wc < 0x80)
|
||||
{
|
||||
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(wc);
|
||||
utf8_bytes_filled = 1;
|
||||
}
|
||||
else if (wc <= 0x7FF)
|
||||
{
|
||||
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xC0u | ((static_cast<unsigned int>(wc) >> 6u)));
|
||||
utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
|
||||
utf8_bytes_filled = 2;
|
||||
}
|
||||
else if (0xD800 > wc || wc >= 0xE000)
|
||||
{
|
||||
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xE0u | ((static_cast<unsigned int>(wc) >> 12u)));
|
||||
utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | ((static_cast<unsigned int>(wc) >> 6u) & 0x3Fu));
|
||||
utf8_bytes[2] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
|
||||
utf8_bytes_filled = 3;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!input.empty()))
|
||||
{
|
||||
const auto wc2 = static_cast<unsigned int>(input.get_character());
|
||||
const auto charcode = 0x10000u + (((static_cast<unsigned int>(wc) & 0x3FFu) << 10u) | (wc2 & 0x3FFu));
|
||||
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xF0u | (charcode >> 18u));
|
||||
utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | ((charcode >> 12u) & 0x3Fu));
|
||||
utf8_bytes[2] = static_cast<std::char_traits<char>::int_type>(0x80u | ((charcode >> 6u) & 0x3Fu));
|
||||
utf8_bytes[3] = static_cast<std::char_traits<char>::int_type>(0x80u | (charcode & 0x3Fu));
|
||||
utf8_bytes_filled = 4;
|
||||
}
|
||||
else
|
||||
{
|
||||
utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(wc);
|
||||
utf8_bytes_filled = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Wraps another input adapter to convert wide character types into individual bytes.
|
||||
template<typename BaseInputAdapter, typename WideCharType>
|
||||
class wide_string_input_adapter
|
||||
{
|
||||
public:
|
||||
using char_type = char;
|
||||
|
||||
wide_string_input_adapter(BaseInputAdapter base)
|
||||
: base_adapter(base) {}
|
||||
|
||||
typename std::char_traits<char>::int_type get_character() noexcept
|
||||
{
|
||||
// check if buffer needs to be filled
|
||||
if (utf8_bytes_index == utf8_bytes_filled)
|
||||
{
|
||||
fill_buffer<sizeof(WideCharType)>();
|
||||
|
||||
JSON_ASSERT(utf8_bytes_filled > 0);
|
||||
JSON_ASSERT(utf8_bytes_index == 0);
|
||||
}
|
||||
|
||||
// use buffer
|
||||
JSON_ASSERT(utf8_bytes_filled > 0);
|
||||
JSON_ASSERT(utf8_bytes_index < utf8_bytes_filled);
|
||||
return utf8_bytes[utf8_bytes_index++];
|
||||
}
|
||||
|
||||
private:
|
||||
BaseInputAdapter base_adapter;
|
||||
|
||||
template<size_t T>
|
||||
void fill_buffer()
|
||||
{
|
||||
wide_string_input_helper<BaseInputAdapter, T>::fill_buffer(base_adapter, utf8_bytes, utf8_bytes_index, utf8_bytes_filled);
|
||||
}
|
||||
|
||||
/// a buffer for UTF-8 bytes
|
||||
std::array<std::char_traits<char>::int_type, 4> utf8_bytes = {{0, 0, 0, 0}};
|
||||
|
||||
/// index to the utf8_codes array for the next valid byte
|
||||
std::size_t utf8_bytes_index = 0;
|
||||
/// number of valid bytes in the utf8_codes array
|
||||
std::size_t utf8_bytes_filled = 0;
|
||||
};
|
||||
|
||||
template<typename IteratorType, typename Enable = void>
|
||||
struct iterator_input_adapter_factory
|
||||
{
|
||||
using iterator_type = IteratorType;
|
||||
using char_type = typename std::iterator_traits<iterator_type>::value_type;
|
||||
using adapter_type = iterator_input_adapter<iterator_type>;
|
||||
|
||||
static adapter_type create(IteratorType first, IteratorType last)
|
||||
{
|
||||
return adapter_type(std::move(first), std::move(last));
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct is_iterator_of_multibyte
|
||||
{
|
||||
using value_type = typename std::iterator_traits<T>::value_type;
|
||||
enum
|
||||
{
|
||||
value = sizeof(value_type) > 1
|
||||
};
|
||||
};
|
||||
|
||||
template<typename IteratorType>
|
||||
struct iterator_input_adapter_factory<IteratorType, enable_if_t<is_iterator_of_multibyte<IteratorType>::value>>
|
||||
{
|
||||
using iterator_type = IteratorType;
|
||||
using char_type = typename std::iterator_traits<iterator_type>::value_type;
|
||||
using base_adapter_type = iterator_input_adapter<iterator_type>;
|
||||
using adapter_type = wide_string_input_adapter<base_adapter_type, char_type>;
|
||||
|
||||
static adapter_type create(IteratorType first, IteratorType last)
|
||||
{
|
||||
return adapter_type(base_adapter_type(std::move(first), std::move(last)));
|
||||
}
|
||||
};
|
||||
|
||||
// General purpose iterator-based input
|
||||
template<typename IteratorType>
|
||||
typename iterator_input_adapter_factory<IteratorType>::adapter_type input_adapter(IteratorType first, IteratorType last)
|
||||
{
|
||||
using factory_type = iterator_input_adapter_factory<IteratorType>;
|
||||
return factory_type::create(first, last);
|
||||
}
|
||||
|
||||
// Convenience shorthand from container to iterator
|
||||
// Enables ADL on begin(container) and end(container)
|
||||
// Encloses the using declarations in namespace for not to leak them to outside scope
|
||||
|
||||
namespace container_input_adapter_factory_impl
|
||||
{
|
||||
|
||||
using std::begin;
|
||||
using std::end;
|
||||
|
||||
template<typename ContainerType, typename Enable = void>
|
||||
struct container_input_adapter_factory {};
|
||||
|
||||
template<typename ContainerType>
|
||||
struct container_input_adapter_factory< ContainerType,
|
||||
void_t<decltype(begin(std::declval<ContainerType>()), end(std::declval<ContainerType>()))>>
|
||||
{
|
||||
using adapter_type = decltype(input_adapter(begin(std::declval<ContainerType>()), end(std::declval<ContainerType>())));
|
||||
|
||||
static adapter_type create(const ContainerType& container)
|
||||
{
|
||||
return input_adapter(begin(container), end(container));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace container_input_adapter_factory_impl
|
||||
|
||||
template<typename ContainerType>
|
||||
typename container_input_adapter_factory_impl::container_input_adapter_factory<ContainerType>::adapter_type input_adapter(const ContainerType& container)
|
||||
{
|
||||
return container_input_adapter_factory_impl::container_input_adapter_factory<ContainerType>::create(container);
|
||||
}
|
||||
|
||||
#ifndef JSON_NO_IO
|
||||
// Special cases with fast paths
|
||||
inline file_input_adapter input_adapter(std::FILE* file)
|
||||
{
|
||||
return file_input_adapter(file);
|
||||
}
|
||||
|
||||
inline input_stream_adapter input_adapter(std::istream& stream)
|
||||
{
|
||||
return input_stream_adapter(stream);
|
||||
}
|
||||
|
||||
inline input_stream_adapter input_adapter(std::istream&& stream)
|
||||
{
|
||||
return input_stream_adapter(stream);
|
||||
}
|
||||
#endif // JSON_NO_IO
|
||||
|
||||
using contiguous_bytes_input_adapter = decltype(input_adapter(std::declval<const char*>(), std::declval<const char*>()));
|
||||
|
||||
// Null-delimited strings, and the like.
|
||||
template < typename CharT,
|
||||
typename std::enable_if <
|
||||
std::is_pointer<CharT>::value&&
|
||||
!std::is_array<CharT>::value&&
|
||||
std::is_integral<typename std::remove_pointer<CharT>::type>::value&&
|
||||
sizeof(typename std::remove_pointer<CharT>::type) == 1,
|
||||
int >::type = 0 >
|
||||
contiguous_bytes_input_adapter input_adapter(CharT b)
|
||||
{
|
||||
auto length = std::strlen(reinterpret_cast<const char*>(b));
|
||||
const auto* ptr = reinterpret_cast<const char*>(b);
|
||||
return input_adapter(ptr, ptr + length);
|
||||
}
|
||||
|
||||
template<typename T, std::size_t N>
|
||||
auto input_adapter(T (&array)[N]) -> decltype(input_adapter(array, array + N)) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays)
|
||||
{
|
||||
return input_adapter(array, array + N);
|
||||
}
|
||||
|
||||
// This class only handles inputs of input_buffer_adapter type.
|
||||
// It's required so that expressions like {ptr, len} can be implicitly cast
|
||||
// to the correct adapter.
|
||||
class span_input_adapter
|
||||
{
|
||||
public:
|
||||
template < typename CharT,
|
||||
typename std::enable_if <
|
||||
std::is_pointer<CharT>::value&&
|
||||
std::is_integral<typename std::remove_pointer<CharT>::type>::value&&
|
||||
sizeof(typename std::remove_pointer<CharT>::type) == 1,
|
||||
int >::type = 0 >
|
||||
span_input_adapter(CharT b, std::size_t l)
|
||||
: ia(reinterpret_cast<const char*>(b), reinterpret_cast<const char*>(b) + l) {}
|
||||
|
||||
template<class IteratorType,
|
||||
typename std::enable_if<
|
||||
std::is_same<typename iterator_traits<IteratorType>::iterator_category, std::random_access_iterator_tag>::value,
|
||||
int>::type = 0>
|
||||
span_input_adapter(IteratorType first, IteratorType last)
|
||||
: ia(input_adapter(first, last)) {}
|
||||
|
||||
contiguous_bytes_input_adapter&& get()
|
||||
{
|
||||
return std::move(ia); // NOLINT(hicpp-move-const-arg,performance-move-const-arg)
|
||||
}
|
||||
|
||||
private:
|
||||
contiguous_bytes_input_adapter ia;
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
727
sample/lib/json/include/nlohmann/detail/input/json_sax.hpp
Normal file
727
sample/lib/json/include/nlohmann/detail/input/json_sax.hpp
Normal file
@@ -0,0 +1,727 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <string> // string
|
||||
#include <utility> // move
|
||||
#include <vector> // vector
|
||||
|
||||
#include <nlohmann/detail/exceptions.hpp>
|
||||
#include <nlohmann/detail/macro_scope.hpp>
|
||||
#include <nlohmann/detail/string_concat.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
|
||||
/*!
|
||||
@brief SAX interface
|
||||
|
||||
This class describes the SAX interface used by @ref nlohmann::json::sax_parse.
|
||||
Each function is called in different situations while the input is parsed. The
|
||||
boolean return value informs the parser whether to continue processing the
|
||||
input.
|
||||
*/
|
||||
template<typename BasicJsonType>
|
||||
struct json_sax
|
||||
{
|
||||
using number_integer_t = typename BasicJsonType::number_integer_t;
|
||||
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
|
||||
using number_float_t = typename BasicJsonType::number_float_t;
|
||||
using string_t = typename BasicJsonType::string_t;
|
||||
using binary_t = typename BasicJsonType::binary_t;
|
||||
|
||||
/*!
|
||||
@brief a null value was read
|
||||
@return whether parsing should proceed
|
||||
*/
|
||||
virtual bool null() = 0;
|
||||
|
||||
/*!
|
||||
@brief a boolean value was read
|
||||
@param[in] val boolean value
|
||||
@return whether parsing should proceed
|
||||
*/
|
||||
virtual bool boolean(bool val) = 0;
|
||||
|
||||
/*!
|
||||
@brief an integer number was read
|
||||
@param[in] val integer value
|
||||
@return whether parsing should proceed
|
||||
*/
|
||||
virtual bool number_integer(number_integer_t val) = 0;
|
||||
|
||||
/*!
|
||||
@brief an unsigned integer number was read
|
||||
@param[in] val unsigned integer value
|
||||
@return whether parsing should proceed
|
||||
*/
|
||||
virtual bool number_unsigned(number_unsigned_t val) = 0;
|
||||
|
||||
/*!
|
||||
@brief a floating-point number was read
|
||||
@param[in] val floating-point value
|
||||
@param[in] s raw token value
|
||||
@return whether parsing should proceed
|
||||
*/
|
||||
virtual bool number_float(number_float_t val, const string_t& s) = 0;
|
||||
|
||||
/*!
|
||||
@brief a string value was read
|
||||
@param[in] val string value
|
||||
@return whether parsing should proceed
|
||||
@note It is safe to move the passed string value.
|
||||
*/
|
||||
virtual bool string(string_t& val) = 0;
|
||||
|
||||
/*!
|
||||
@brief a binary value was read
|
||||
@param[in] val binary value
|
||||
@return whether parsing should proceed
|
||||
@note It is safe to move the passed binary value.
|
||||
*/
|
||||
virtual bool binary(binary_t& val) = 0;
|
||||
|
||||
/*!
|
||||
@brief the beginning of an object was read
|
||||
@param[in] elements number of object elements or -1 if unknown
|
||||
@return whether parsing should proceed
|
||||
@note binary formats may report the number of elements
|
||||
*/
|
||||
virtual bool start_object(std::size_t elements) = 0;
|
||||
|
||||
/*!
|
||||
@brief an object key was read
|
||||
@param[in] val object key
|
||||
@return whether parsing should proceed
|
||||
@note It is safe to move the passed string.
|
||||
*/
|
||||
virtual bool key(string_t& val) = 0;
|
||||
|
||||
/*!
|
||||
@brief the end of an object was read
|
||||
@return whether parsing should proceed
|
||||
*/
|
||||
virtual bool end_object() = 0;
|
||||
|
||||
/*!
|
||||
@brief the beginning of an array was read
|
||||
@param[in] elements number of array elements or -1 if unknown
|
||||
@return whether parsing should proceed
|
||||
@note binary formats may report the number of elements
|
||||
*/
|
||||
virtual bool start_array(std::size_t elements) = 0;
|
||||
|
||||
/*!
|
||||
@brief the end of an array was read
|
||||
@return whether parsing should proceed
|
||||
*/
|
||||
virtual bool end_array() = 0;
|
||||
|
||||
/*!
|
||||
@brief a parse error occurred
|
||||
@param[in] position the position in the input where the error occurs
|
||||
@param[in] last_token the last read token
|
||||
@param[in] ex an exception object describing the error
|
||||
@return whether parsing should proceed (must return false)
|
||||
*/
|
||||
virtual bool parse_error(std::size_t position,
|
||||
const std::string& last_token,
|
||||
const detail::exception& ex) = 0;
|
||||
|
||||
json_sax() = default;
|
||||
json_sax(const json_sax&) = default;
|
||||
json_sax(json_sax&&) noexcept = default;
|
||||
json_sax& operator=(const json_sax&) = default;
|
||||
json_sax& operator=(json_sax&&) noexcept = default;
|
||||
virtual ~json_sax() = default;
|
||||
};
|
||||
|
||||
namespace detail
|
||||
{
|
||||
/*!
|
||||
@brief SAX implementation to create a JSON value from SAX events
|
||||
|
||||
This class implements the @ref json_sax interface and processes the SAX events
|
||||
to create a JSON value which makes it basically a DOM parser. The structure or
|
||||
hierarchy of the JSON value is managed by the stack `ref_stack` which contains
|
||||
a pointer to the respective array or object for each recursion depth.
|
||||
|
||||
After successful parsing, the value that is passed by reference to the
|
||||
constructor contains the parsed value.
|
||||
|
||||
@tparam BasicJsonType the JSON type
|
||||
*/
|
||||
template<typename BasicJsonType>
|
||||
class json_sax_dom_parser
|
||||
{
|
||||
public:
|
||||
using number_integer_t = typename BasicJsonType::number_integer_t;
|
||||
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
|
||||
using number_float_t = typename BasicJsonType::number_float_t;
|
||||
using string_t = typename BasicJsonType::string_t;
|
||||
using binary_t = typename BasicJsonType::binary_t;
|
||||
|
||||
/*!
|
||||
@param[in,out] r reference to a JSON value that is manipulated while
|
||||
parsing
|
||||
@param[in] allow_exceptions_ whether parse errors yield exceptions
|
||||
*/
|
||||
explicit json_sax_dom_parser(BasicJsonType& r, const bool allow_exceptions_ = true)
|
||||
: root(r), allow_exceptions(allow_exceptions_)
|
||||
{}
|
||||
|
||||
// make class move-only
|
||||
json_sax_dom_parser(const json_sax_dom_parser&) = delete;
|
||||
json_sax_dom_parser(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor)
|
||||
json_sax_dom_parser& operator=(const json_sax_dom_parser&) = delete;
|
||||
json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor)
|
||||
~json_sax_dom_parser() = default;
|
||||
|
||||
bool null()
|
||||
{
|
||||
handle_value(nullptr);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool boolean(bool val)
|
||||
{
|
||||
handle_value(val);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool number_integer(number_integer_t val)
|
||||
{
|
||||
handle_value(val);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool number_unsigned(number_unsigned_t val)
|
||||
{
|
||||
handle_value(val);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool number_float(number_float_t val, const string_t& /*unused*/)
|
||||
{
|
||||
handle_value(val);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool string(string_t& val)
|
||||
{
|
||||
handle_value(val);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool binary(binary_t& val)
|
||||
{
|
||||
handle_value(std::move(val));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool start_object(std::size_t len)
|
||||
{
|
||||
ref_stack.push_back(handle_value(BasicJsonType::value_t::object));
|
||||
|
||||
if (JSON_HEDLEY_UNLIKELY(len != static_cast<std::size_t>(-1) && len > ref_stack.back()->max_size()))
|
||||
{
|
||||
JSON_THROW(out_of_range::create(408, concat("excessive object size: ", std::to_string(len)), ref_stack.back()));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool key(string_t& val)
|
||||
{
|
||||
JSON_ASSERT(!ref_stack.empty());
|
||||
JSON_ASSERT(ref_stack.back()->is_object());
|
||||
|
||||
// add null at given key and store the reference for later
|
||||
object_element = &(ref_stack.back()->m_data.m_value.object->operator[](val));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool end_object()
|
||||
{
|
||||
JSON_ASSERT(!ref_stack.empty());
|
||||
JSON_ASSERT(ref_stack.back()->is_object());
|
||||
|
||||
ref_stack.back()->set_parents();
|
||||
ref_stack.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool start_array(std::size_t len)
|
||||
{
|
||||
ref_stack.push_back(handle_value(BasicJsonType::value_t::array));
|
||||
|
||||
if (JSON_HEDLEY_UNLIKELY(len != static_cast<std::size_t>(-1) && len > ref_stack.back()->max_size()))
|
||||
{
|
||||
JSON_THROW(out_of_range::create(408, concat("excessive array size: ", std::to_string(len)), ref_stack.back()));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool end_array()
|
||||
{
|
||||
JSON_ASSERT(!ref_stack.empty());
|
||||
JSON_ASSERT(ref_stack.back()->is_array());
|
||||
|
||||
ref_stack.back()->set_parents();
|
||||
ref_stack.pop_back();
|
||||
return true;
|
||||
}
|
||||
|
||||
template<class Exception>
|
||||
bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/,
|
||||
const Exception& ex)
|
||||
{
|
||||
errored = true;
|
||||
static_cast<void>(ex);
|
||||
if (allow_exceptions)
|
||||
{
|
||||
JSON_THROW(ex);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
constexpr bool is_errored() const
|
||||
{
|
||||
return errored;
|
||||
}
|
||||
|
||||
private:
|
||||
/*!
|
||||
@invariant If the ref stack is empty, then the passed value will be the new
|
||||
root.
|
||||
@invariant If the ref stack contains a value, then it is an array or an
|
||||
object to which we can add elements
|
||||
*/
|
||||
template<typename Value>
|
||||
JSON_HEDLEY_RETURNS_NON_NULL
|
||||
BasicJsonType* handle_value(Value&& v)
|
||||
{
|
||||
if (ref_stack.empty())
|
||||
{
|
||||
root = BasicJsonType(std::forward<Value>(v));
|
||||
return &root;
|
||||
}
|
||||
|
||||
JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object());
|
||||
|
||||
if (ref_stack.back()->is_array())
|
||||
{
|
||||
ref_stack.back()->m_data.m_value.array->emplace_back(std::forward<Value>(v));
|
||||
return &(ref_stack.back()->m_data.m_value.array->back());
|
||||
}
|
||||
|
||||
JSON_ASSERT(ref_stack.back()->is_object());
|
||||
JSON_ASSERT(object_element);
|
||||
*object_element = BasicJsonType(std::forward<Value>(v));
|
||||
return object_element;
|
||||
}
|
||||
|
||||
/// the parsed JSON value
|
||||
BasicJsonType& root;
|
||||
/// stack to model hierarchy of values
|
||||
std::vector<BasicJsonType*> ref_stack {};
|
||||
/// helper to hold the reference for the next object element
|
||||
BasicJsonType* object_element = nullptr;
|
||||
/// whether a syntax error occurred
|
||||
bool errored = false;
|
||||
/// whether to throw exceptions in case of errors
|
||||
const bool allow_exceptions = true;
|
||||
};
|
||||
|
||||
template<typename BasicJsonType>
|
||||
class json_sax_dom_callback_parser
|
||||
{
|
||||
public:
|
||||
using number_integer_t = typename BasicJsonType::number_integer_t;
|
||||
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
|
||||
using number_float_t = typename BasicJsonType::number_float_t;
|
||||
using string_t = typename BasicJsonType::string_t;
|
||||
using binary_t = typename BasicJsonType::binary_t;
|
||||
using parser_callback_t = typename BasicJsonType::parser_callback_t;
|
||||
using parse_event_t = typename BasicJsonType::parse_event_t;
|
||||
|
||||
json_sax_dom_callback_parser(BasicJsonType& r,
|
||||
const parser_callback_t cb,
|
||||
const bool allow_exceptions_ = true)
|
||||
: root(r), callback(cb), allow_exceptions(allow_exceptions_)
|
||||
{
|
||||
keep_stack.push_back(true);
|
||||
}
|
||||
|
||||
// make class move-only
|
||||
json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = delete;
|
||||
json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor)
|
||||
json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_parser&) = delete;
|
||||
json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor)
|
||||
~json_sax_dom_callback_parser() = default;
|
||||
|
||||
bool null()
|
||||
{
|
||||
handle_value(nullptr);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool boolean(bool val)
|
||||
{
|
||||
handle_value(val);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool number_integer(number_integer_t val)
|
||||
{
|
||||
handle_value(val);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool number_unsigned(number_unsigned_t val)
|
||||
{
|
||||
handle_value(val);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool number_float(number_float_t val, const string_t& /*unused*/)
|
||||
{
|
||||
handle_value(val);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool string(string_t& val)
|
||||
{
|
||||
handle_value(val);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool binary(binary_t& val)
|
||||
{
|
||||
handle_value(std::move(val));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool start_object(std::size_t len)
|
||||
{
|
||||
// check callback for object start
|
||||
const bool keep = callback(static_cast<int>(ref_stack.size()), parse_event_t::object_start, discarded);
|
||||
keep_stack.push_back(keep);
|
||||
|
||||
auto val = handle_value(BasicJsonType::value_t::object, true);
|
||||
ref_stack.push_back(val.second);
|
||||
|
||||
// check object limit
|
||||
if (ref_stack.back() && JSON_HEDLEY_UNLIKELY(len != static_cast<std::size_t>(-1) && len > ref_stack.back()->max_size()))
|
||||
{
|
||||
JSON_THROW(out_of_range::create(408, concat("excessive object size: ", std::to_string(len)), ref_stack.back()));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool key(string_t& val)
|
||||
{
|
||||
BasicJsonType k = BasicJsonType(val);
|
||||
|
||||
// check callback for key
|
||||
const bool keep = callback(static_cast<int>(ref_stack.size()), parse_event_t::key, k);
|
||||
key_keep_stack.push_back(keep);
|
||||
|
||||
// add discarded value at given key and store the reference for later
|
||||
if (keep && ref_stack.back())
|
||||
{
|
||||
object_element = &(ref_stack.back()->m_data.m_value.object->operator[](val) = discarded);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool end_object()
|
||||
{
|
||||
if (ref_stack.back())
|
||||
{
|
||||
if (!callback(static_cast<int>(ref_stack.size()) - 1, parse_event_t::object_end, *ref_stack.back()))
|
||||
{
|
||||
// discard object
|
||||
*ref_stack.back() = discarded;
|
||||
}
|
||||
else
|
||||
{
|
||||
ref_stack.back()->set_parents();
|
||||
}
|
||||
}
|
||||
|
||||
JSON_ASSERT(!ref_stack.empty());
|
||||
JSON_ASSERT(!keep_stack.empty());
|
||||
ref_stack.pop_back();
|
||||
keep_stack.pop_back();
|
||||
|
||||
if (!ref_stack.empty() && ref_stack.back() && ref_stack.back()->is_structured())
|
||||
{
|
||||
// remove discarded value
|
||||
for (auto it = ref_stack.back()->begin(); it != ref_stack.back()->end(); ++it)
|
||||
{
|
||||
if (it->is_discarded())
|
||||
{
|
||||
ref_stack.back()->erase(it);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool start_array(std::size_t len)
|
||||
{
|
||||
const bool keep = callback(static_cast<int>(ref_stack.size()), parse_event_t::array_start, discarded);
|
||||
keep_stack.push_back(keep);
|
||||
|
||||
auto val = handle_value(BasicJsonType::value_t::array, true);
|
||||
ref_stack.push_back(val.second);
|
||||
|
||||
// check array limit
|
||||
if (ref_stack.back() && JSON_HEDLEY_UNLIKELY(len != static_cast<std::size_t>(-1) && len > ref_stack.back()->max_size()))
|
||||
{
|
||||
JSON_THROW(out_of_range::create(408, concat("excessive array size: ", std::to_string(len)), ref_stack.back()));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool end_array()
|
||||
{
|
||||
bool keep = true;
|
||||
|
||||
if (ref_stack.back())
|
||||
{
|
||||
keep = callback(static_cast<int>(ref_stack.size()) - 1, parse_event_t::array_end, *ref_stack.back());
|
||||
if (keep)
|
||||
{
|
||||
ref_stack.back()->set_parents();
|
||||
}
|
||||
else
|
||||
{
|
||||
// discard array
|
||||
*ref_stack.back() = discarded;
|
||||
}
|
||||
}
|
||||
|
||||
JSON_ASSERT(!ref_stack.empty());
|
||||
JSON_ASSERT(!keep_stack.empty());
|
||||
ref_stack.pop_back();
|
||||
keep_stack.pop_back();
|
||||
|
||||
// remove discarded value
|
||||
if (!keep && !ref_stack.empty() && ref_stack.back()->is_array())
|
||||
{
|
||||
ref_stack.back()->m_data.m_value.array->pop_back();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template<class Exception>
|
||||
bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/,
|
||||
const Exception& ex)
|
||||
{
|
||||
errored = true;
|
||||
static_cast<void>(ex);
|
||||
if (allow_exceptions)
|
||||
{
|
||||
JSON_THROW(ex);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
constexpr bool is_errored() const
|
||||
{
|
||||
return errored;
|
||||
}
|
||||
|
||||
private:
|
||||
/*!
|
||||
@param[in] v value to add to the JSON value we build during parsing
|
||||
@param[in] skip_callback whether we should skip calling the callback
|
||||
function; this is required after start_array() and
|
||||
start_object() SAX events, because otherwise we would call the
|
||||
callback function with an empty array or object, respectively.
|
||||
|
||||
@invariant If the ref stack is empty, then the passed value will be the new
|
||||
root.
|
||||
@invariant If the ref stack contains a value, then it is an array or an
|
||||
object to which we can add elements
|
||||
|
||||
@return pair of boolean (whether value should be kept) and pointer (to the
|
||||
passed value in the ref_stack hierarchy; nullptr if not kept)
|
||||
*/
|
||||
template<typename Value>
|
||||
std::pair<bool, BasicJsonType*> handle_value(Value&& v, const bool skip_callback = false)
|
||||
{
|
||||
JSON_ASSERT(!keep_stack.empty());
|
||||
|
||||
// do not handle this value if we know it would be added to a discarded
|
||||
// container
|
||||
if (!keep_stack.back())
|
||||
{
|
||||
return {false, nullptr};
|
||||
}
|
||||
|
||||
// create value
|
||||
auto value = BasicJsonType(std::forward<Value>(v));
|
||||
|
||||
// check callback
|
||||
const bool keep = skip_callback || callback(static_cast<int>(ref_stack.size()), parse_event_t::value, value);
|
||||
|
||||
// do not handle this value if we just learnt it shall be discarded
|
||||
if (!keep)
|
||||
{
|
||||
return {false, nullptr};
|
||||
}
|
||||
|
||||
if (ref_stack.empty())
|
||||
{
|
||||
root = std::move(value);
|
||||
return {true, & root};
|
||||
}
|
||||
|
||||
// skip this value if we already decided to skip the parent
|
||||
// (https://github.com/nlohmann/json/issues/971#issuecomment-413678360)
|
||||
if (!ref_stack.back())
|
||||
{
|
||||
return {false, nullptr};
|
||||
}
|
||||
|
||||
// we now only expect arrays and objects
|
||||
JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object());
|
||||
|
||||
// array
|
||||
if (ref_stack.back()->is_array())
|
||||
{
|
||||
ref_stack.back()->m_data.m_value.array->emplace_back(std::move(value));
|
||||
return {true, & (ref_stack.back()->m_data.m_value.array->back())};
|
||||
}
|
||||
|
||||
// object
|
||||
JSON_ASSERT(ref_stack.back()->is_object());
|
||||
// check if we should store an element for the current key
|
||||
JSON_ASSERT(!key_keep_stack.empty());
|
||||
const bool store_element = key_keep_stack.back();
|
||||
key_keep_stack.pop_back();
|
||||
|
||||
if (!store_element)
|
||||
{
|
||||
return {false, nullptr};
|
||||
}
|
||||
|
||||
JSON_ASSERT(object_element);
|
||||
*object_element = std::move(value);
|
||||
return {true, object_element};
|
||||
}
|
||||
|
||||
/// the parsed JSON value
|
||||
BasicJsonType& root;
|
||||
/// stack to model hierarchy of values
|
||||
std::vector<BasicJsonType*> ref_stack {};
|
||||
/// stack to manage which values to keep
|
||||
std::vector<bool> keep_stack {}; // NOLINT(readability-redundant-member-init)
|
||||
/// stack to manage which object keys to keep
|
||||
std::vector<bool> key_keep_stack {}; // NOLINT(readability-redundant-member-init)
|
||||
/// helper to hold the reference for the next object element
|
||||
BasicJsonType* object_element = nullptr;
|
||||
/// whether a syntax error occurred
|
||||
bool errored = false;
|
||||
/// callback function
|
||||
const parser_callback_t callback = nullptr;
|
||||
/// whether to throw exceptions in case of errors
|
||||
const bool allow_exceptions = true;
|
||||
/// a discarded value for the callback
|
||||
BasicJsonType discarded = BasicJsonType::value_t::discarded;
|
||||
};
|
||||
|
||||
template<typename BasicJsonType>
|
||||
class json_sax_acceptor
|
||||
{
|
||||
public:
|
||||
using number_integer_t = typename BasicJsonType::number_integer_t;
|
||||
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
|
||||
using number_float_t = typename BasicJsonType::number_float_t;
|
||||
using string_t = typename BasicJsonType::string_t;
|
||||
using binary_t = typename BasicJsonType::binary_t;
|
||||
|
||||
bool null()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool boolean(bool /*unused*/)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool number_integer(number_integer_t /*unused*/)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool number_unsigned(number_unsigned_t /*unused*/)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool number_float(number_float_t /*unused*/, const string_t& /*unused*/)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool string(string_t& /*unused*/)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool binary(binary_t& /*unused*/)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool start_object(std::size_t /*unused*/ = static_cast<std::size_t>(-1))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool key(string_t& /*unused*/)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool end_object()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool start_array(std::size_t /*unused*/ = static_cast<std::size_t>(-1))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool end_array()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const detail::exception& /*unused*/)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
1633
sample/lib/json/include/nlohmann/detail/input/lexer.hpp
Normal file
1633
sample/lib/json/include/nlohmann/detail/input/lexer.hpp
Normal file
File diff suppressed because it is too large
Load Diff
519
sample/lib/json/include/nlohmann/detail/input/parser.hpp
Normal file
519
sample/lib/json/include/nlohmann/detail/input/parser.hpp
Normal file
@@ -0,0 +1,519 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cmath> // isfinite
|
||||
#include <cstdint> // uint8_t
|
||||
#include <functional> // function
|
||||
#include <string> // string
|
||||
#include <utility> // move
|
||||
#include <vector> // vector
|
||||
|
||||
#include <nlohmann/detail/exceptions.hpp>
|
||||
#include <nlohmann/detail/input/input_adapters.hpp>
|
||||
#include <nlohmann/detail/input/json_sax.hpp>
|
||||
#include <nlohmann/detail/input/lexer.hpp>
|
||||
#include <nlohmann/detail/macro_scope.hpp>
|
||||
#include <nlohmann/detail/meta/is_sax.hpp>
|
||||
#include <nlohmann/detail/string_concat.hpp>
|
||||
#include <nlohmann/detail/value_t.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
////////////
|
||||
// parser //
|
||||
////////////
|
||||
|
||||
enum class parse_event_t : std::uint8_t
|
||||
{
|
||||
/// the parser read `{` and started to process a JSON object
|
||||
object_start,
|
||||
/// the parser read `}` and finished processing a JSON object
|
||||
object_end,
|
||||
/// the parser read `[` and started to process a JSON array
|
||||
array_start,
|
||||
/// the parser read `]` and finished processing a JSON array
|
||||
array_end,
|
||||
/// the parser read a key of a value in an object
|
||||
key,
|
||||
/// the parser finished reading a JSON value
|
||||
value
|
||||
};
|
||||
|
||||
template<typename BasicJsonType>
|
||||
using parser_callback_t =
|
||||
std::function<bool(int /*depth*/, parse_event_t /*event*/, BasicJsonType& /*parsed*/)>;
|
||||
|
||||
/*!
|
||||
@brief syntax analysis
|
||||
|
||||
This class implements a recursive descent parser.
|
||||
*/
|
||||
template<typename BasicJsonType, typename InputAdapterType>
|
||||
class parser
|
||||
{
|
||||
using number_integer_t = typename BasicJsonType::number_integer_t;
|
||||
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
|
||||
using number_float_t = typename BasicJsonType::number_float_t;
|
||||
using string_t = typename BasicJsonType::string_t;
|
||||
using lexer_t = lexer<BasicJsonType, InputAdapterType>;
|
||||
using token_type = typename lexer_t::token_type;
|
||||
|
||||
public:
|
||||
/// a parser reading from an input adapter
|
||||
explicit parser(InputAdapterType&& adapter,
|
||||
const parser_callback_t<BasicJsonType> cb = nullptr,
|
||||
const bool allow_exceptions_ = true,
|
||||
const bool skip_comments = false)
|
||||
: callback(cb)
|
||||
, m_lexer(std::move(adapter), skip_comments)
|
||||
, allow_exceptions(allow_exceptions_)
|
||||
{
|
||||
// read first token
|
||||
get_token();
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief public parser interface
|
||||
|
||||
@param[in] strict whether to expect the last token to be EOF
|
||||
@param[in,out] result parsed JSON value
|
||||
|
||||
@throw parse_error.101 in case of an unexpected token
|
||||
@throw parse_error.102 if to_unicode fails or surrogate error
|
||||
@throw parse_error.103 if to_unicode fails
|
||||
*/
|
||||
void parse(const bool strict, BasicJsonType& result)
|
||||
{
|
||||
if (callback)
|
||||
{
|
||||
json_sax_dom_callback_parser<BasicJsonType> sdp(result, callback, allow_exceptions);
|
||||
sax_parse_internal(&sdp);
|
||||
|
||||
// in strict mode, input must be completely read
|
||||
if (strict && (get_token() != token_type::end_of_input))
|
||||
{
|
||||
sdp.parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(),
|
||||
exception_message(token_type::end_of_input, "value"), nullptr));
|
||||
}
|
||||
|
||||
// in case of an error, return discarded value
|
||||
if (sdp.is_errored())
|
||||
{
|
||||
result = value_t::discarded;
|
||||
return;
|
||||
}
|
||||
|
||||
// set top-level value to null if it was discarded by the callback
|
||||
// function
|
||||
if (result.is_discarded())
|
||||
{
|
||||
result = nullptr;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
json_sax_dom_parser<BasicJsonType> sdp(result, allow_exceptions);
|
||||
sax_parse_internal(&sdp);
|
||||
|
||||
// in strict mode, input must be completely read
|
||||
if (strict && (get_token() != token_type::end_of_input))
|
||||
{
|
||||
sdp.parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::end_of_input, "value"), nullptr));
|
||||
}
|
||||
|
||||
// in case of an error, return discarded value
|
||||
if (sdp.is_errored())
|
||||
{
|
||||
result = value_t::discarded;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
result.assert_invariant();
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief public accept interface
|
||||
|
||||
@param[in] strict whether to expect the last token to be EOF
|
||||
@return whether the input is a proper JSON text
|
||||
*/
|
||||
bool accept(const bool strict = true)
|
||||
{
|
||||
json_sax_acceptor<BasicJsonType> sax_acceptor;
|
||||
return sax_parse(&sax_acceptor, strict);
|
||||
}
|
||||
|
||||
template<typename SAX>
|
||||
JSON_HEDLEY_NON_NULL(2)
|
||||
bool sax_parse(SAX* sax, const bool strict = true)
|
||||
{
|
||||
(void)detail::is_sax_static_asserts<SAX, BasicJsonType> {};
|
||||
const bool result = sax_parse_internal(sax);
|
||||
|
||||
// strict mode: next byte must be EOF
|
||||
if (result && strict && (get_token() != token_type::end_of_input))
|
||||
{
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::end_of_input, "value"), nullptr));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
private:
|
||||
template<typename SAX>
|
||||
JSON_HEDLEY_NON_NULL(2)
|
||||
bool sax_parse_internal(SAX* sax)
|
||||
{
|
||||
// stack to remember the hierarchy of structured values we are parsing
|
||||
// true = array; false = object
|
||||
std::vector<bool> states;
|
||||
// value to avoid a goto (see comment where set to true)
|
||||
bool skip_to_state_evaluation = false;
|
||||
|
||||
while (true)
|
||||
{
|
||||
if (!skip_to_state_evaluation)
|
||||
{
|
||||
// invariant: get_token() was called before each iteration
|
||||
switch (last_token)
|
||||
{
|
||||
case token_type::begin_object:
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->start_object(static_cast<std::size_t>(-1))))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// closing } -> we are done
|
||||
if (get_token() == token_type::end_object)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->end_object()))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// parse key
|
||||
if (JSON_HEDLEY_UNLIKELY(last_token != token_type::value_string))
|
||||
{
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::value_string, "object key"), nullptr));
|
||||
}
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->key(m_lexer.get_string())))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// parse separator (:)
|
||||
if (JSON_HEDLEY_UNLIKELY(get_token() != token_type::name_separator))
|
||||
{
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::name_separator, "object separator"), nullptr));
|
||||
}
|
||||
|
||||
// remember we are now inside an object
|
||||
states.push_back(false);
|
||||
|
||||
// parse values
|
||||
get_token();
|
||||
continue;
|
||||
}
|
||||
|
||||
case token_type::begin_array:
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->start_array(static_cast<std::size_t>(-1))))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// closing ] -> we are done
|
||||
if (get_token() == token_type::end_array)
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->end_array()))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// remember we are now inside an array
|
||||
states.push_back(true);
|
||||
|
||||
// parse values (no need to call get_token)
|
||||
continue;
|
||||
}
|
||||
|
||||
case token_type::value_float:
|
||||
{
|
||||
const auto res = m_lexer.get_number_float();
|
||||
|
||||
if (JSON_HEDLEY_UNLIKELY(!std::isfinite(res)))
|
||||
{
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
out_of_range::create(406, concat("number overflow parsing '", m_lexer.get_token_string(), '\''), nullptr));
|
||||
}
|
||||
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->number_float(res, m_lexer.get_string())))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case token_type::literal_false:
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->boolean(false)))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case token_type::literal_null:
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->null()))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case token_type::literal_true:
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->boolean(true)))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case token_type::value_integer:
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->number_integer(m_lexer.get_number_integer())))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case token_type::value_string:
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->string(m_lexer.get_string())))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case token_type::value_unsigned:
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->number_unsigned(m_lexer.get_number_unsigned())))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case token_type::parse_error:
|
||||
{
|
||||
// using "uninitialized" to avoid "expected" message
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::uninitialized, "value"), nullptr));
|
||||
}
|
||||
case token_type::end_of_input:
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(m_lexer.get_position().chars_read_total == 1))
|
||||
{
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(),
|
||||
"attempting to parse an empty input; check that your input string or stream contains the expected JSON", nullptr));
|
||||
}
|
||||
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::literal_or_value, "value"), nullptr));
|
||||
}
|
||||
case token_type::uninitialized:
|
||||
case token_type::end_array:
|
||||
case token_type::end_object:
|
||||
case token_type::name_separator:
|
||||
case token_type::value_separator:
|
||||
case token_type::literal_or_value:
|
||||
default: // the last token was unexpected
|
||||
{
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::literal_or_value, "value"), nullptr));
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
skip_to_state_evaluation = false;
|
||||
}
|
||||
|
||||
// we reached this line after we successfully parsed a value
|
||||
if (states.empty())
|
||||
{
|
||||
// empty stack: we reached the end of the hierarchy: done
|
||||
return true;
|
||||
}
|
||||
|
||||
if (states.back()) // array
|
||||
{
|
||||
// comma -> next value
|
||||
if (get_token() == token_type::value_separator)
|
||||
{
|
||||
// parse a new value
|
||||
get_token();
|
||||
continue;
|
||||
}
|
||||
|
||||
// closing ]
|
||||
if (JSON_HEDLEY_LIKELY(last_token == token_type::end_array))
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->end_array()))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// We are done with this array. Before we can parse a
|
||||
// new value, we need to evaluate the new state first.
|
||||
// By setting skip_to_state_evaluation to false, we
|
||||
// are effectively jumping to the beginning of this if.
|
||||
JSON_ASSERT(!states.empty());
|
||||
states.pop_back();
|
||||
skip_to_state_evaluation = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::end_array, "array"), nullptr));
|
||||
}
|
||||
|
||||
// states.back() is false -> object
|
||||
|
||||
// comma -> next value
|
||||
if (get_token() == token_type::value_separator)
|
||||
{
|
||||
// parse key
|
||||
if (JSON_HEDLEY_UNLIKELY(get_token() != token_type::value_string))
|
||||
{
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::value_string, "object key"), nullptr));
|
||||
}
|
||||
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->key(m_lexer.get_string())))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// parse separator (:)
|
||||
if (JSON_HEDLEY_UNLIKELY(get_token() != token_type::name_separator))
|
||||
{
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::name_separator, "object separator"), nullptr));
|
||||
}
|
||||
|
||||
// parse values
|
||||
get_token();
|
||||
continue;
|
||||
}
|
||||
|
||||
// closing }
|
||||
if (JSON_HEDLEY_LIKELY(last_token == token_type::end_object))
|
||||
{
|
||||
if (JSON_HEDLEY_UNLIKELY(!sax->end_object()))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// We are done with this object. Before we can parse a
|
||||
// new value, we need to evaluate the new state first.
|
||||
// By setting skip_to_state_evaluation to false, we
|
||||
// are effectively jumping to the beginning of this if.
|
||||
JSON_ASSERT(!states.empty());
|
||||
states.pop_back();
|
||||
skip_to_state_evaluation = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
return sax->parse_error(m_lexer.get_position(),
|
||||
m_lexer.get_token_string(),
|
||||
parse_error::create(101, m_lexer.get_position(), exception_message(token_type::end_object, "object"), nullptr));
|
||||
}
|
||||
}
|
||||
|
||||
/// get next token from lexer
|
||||
token_type get_token()
|
||||
{
|
||||
return last_token = m_lexer.scan();
|
||||
}
|
||||
|
||||
std::string exception_message(const token_type expected, const std::string& context)
|
||||
{
|
||||
std::string error_msg = "syntax error ";
|
||||
|
||||
if (!context.empty())
|
||||
{
|
||||
error_msg += concat("while parsing ", context, ' ');
|
||||
}
|
||||
|
||||
error_msg += "- ";
|
||||
|
||||
if (last_token == token_type::parse_error)
|
||||
{
|
||||
error_msg += concat(m_lexer.get_error_message(), "; last read: '",
|
||||
m_lexer.get_token_string(), '\'');
|
||||
}
|
||||
else
|
||||
{
|
||||
error_msg += concat("unexpected ", lexer_t::token_type_name(last_token));
|
||||
}
|
||||
|
||||
if (expected != token_type::uninitialized)
|
||||
{
|
||||
error_msg += concat("; expected ", lexer_t::token_type_name(expected));
|
||||
}
|
||||
|
||||
return error_msg;
|
||||
}
|
||||
|
||||
private:
|
||||
/// callback function
|
||||
const parser_callback_t<BasicJsonType> callback = nullptr;
|
||||
/// the type of the last read token
|
||||
token_type last_token = token_type::uninitialized;
|
||||
/// the lexer
|
||||
lexer_t m_lexer;
|
||||
/// whether to throw exceptions in case of errors
|
||||
const bool allow_exceptions = true;
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
37
sample/lib/json/include/nlohmann/detail/input/position_t.hpp
Normal file
37
sample/lib/json/include/nlohmann/detail/input/position_t.hpp
Normal file
@@ -0,0 +1,37 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef> // size_t
|
||||
|
||||
#include <nlohmann/detail/abi_macros.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
/// struct to capture the start position of the current token
|
||||
struct position_t
|
||||
{
|
||||
/// the total number of characters read
|
||||
std::size_t chars_read_total = 0;
|
||||
/// the number of characters read in the current line
|
||||
std::size_t chars_read_current_line = 0;
|
||||
/// the number of lines read
|
||||
std::size_t lines_read = 0;
|
||||
|
||||
/// conversion to size_t to preserve SAX interface
|
||||
constexpr operator size_t() const
|
||||
{
|
||||
return chars_read_total;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
@@ -0,0 +1,35 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <nlohmann/detail/abi_macros.hpp>
|
||||
#include <nlohmann/detail/iterators/primitive_iterator.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
/*!
|
||||
@brief an iterator value
|
||||
|
||||
@note This structure could easily be a union, but MSVC currently does not allow
|
||||
unions members with complex constructors, see https://github.com/nlohmann/json/pull/105.
|
||||
*/
|
||||
template<typename BasicJsonType> struct internal_iterator
|
||||
{
|
||||
/// iterator for JSON objects
|
||||
typename BasicJsonType::object_t::iterator object_iterator {};
|
||||
/// iterator for JSON arrays
|
||||
typename BasicJsonType::array_t::iterator array_iterator {};
|
||||
/// generic iterator for all other types
|
||||
primitive_iterator_t primitive_iterator {};
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
751
sample/lib/json/include/nlohmann/detail/iterators/iter_impl.hpp
Normal file
751
sample/lib/json/include/nlohmann/detail/iterators/iter_impl.hpp
Normal file
@@ -0,0 +1,751 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <iterator> // iterator, random_access_iterator_tag, bidirectional_iterator_tag, advance, next
|
||||
#include <type_traits> // conditional, is_const, remove_const
|
||||
|
||||
#include <nlohmann/detail/exceptions.hpp>
|
||||
#include <nlohmann/detail/iterators/internal_iterator.hpp>
|
||||
#include <nlohmann/detail/iterators/primitive_iterator.hpp>
|
||||
#include <nlohmann/detail/macro_scope.hpp>
|
||||
#include <nlohmann/detail/meta/cpp_future.hpp>
|
||||
#include <nlohmann/detail/meta/type_traits.hpp>
|
||||
#include <nlohmann/detail/value_t.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
// forward declare, to be able to friend it later on
|
||||
template<typename IteratorType> class iteration_proxy;
|
||||
template<typename IteratorType> class iteration_proxy_value;
|
||||
|
||||
/*!
|
||||
@brief a template for a bidirectional iterator for the @ref basic_json class
|
||||
This class implements a both iterators (iterator and const_iterator) for the
|
||||
@ref basic_json class.
|
||||
@note An iterator is called *initialized* when a pointer to a JSON value has
|
||||
been set (e.g., by a constructor or a copy assignment). If the iterator is
|
||||
default-constructed, it is *uninitialized* and most methods are undefined.
|
||||
**The library uses assertions to detect calls on uninitialized iterators.**
|
||||
@requirement The class satisfies the following concept requirements:
|
||||
-
|
||||
[BidirectionalIterator](https://en.cppreference.com/w/cpp/named_req/BidirectionalIterator):
|
||||
The iterator that can be moved can be moved in both directions (i.e.
|
||||
incremented and decremented).
|
||||
@since version 1.0.0, simplified in version 2.0.9, change to bidirectional
|
||||
iterators in version 3.0.0 (see https://github.com/nlohmann/json/issues/593)
|
||||
*/
|
||||
template<typename BasicJsonType>
|
||||
class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-special-member-functions)
|
||||
{
|
||||
/// the iterator with BasicJsonType of different const-ness
|
||||
using other_iter_impl = iter_impl<typename std::conditional<std::is_const<BasicJsonType>::value, typename std::remove_const<BasicJsonType>::type, const BasicJsonType>::type>;
|
||||
/// allow basic_json to access private members
|
||||
friend other_iter_impl;
|
||||
friend BasicJsonType;
|
||||
friend iteration_proxy<iter_impl>;
|
||||
friend iteration_proxy_value<iter_impl>;
|
||||
|
||||
using object_t = typename BasicJsonType::object_t;
|
||||
using array_t = typename BasicJsonType::array_t;
|
||||
// make sure BasicJsonType is basic_json or const basic_json
|
||||
static_assert(is_basic_json<typename std::remove_const<BasicJsonType>::type>::value,
|
||||
"iter_impl only accepts (const) basic_json");
|
||||
// superficial check for the LegacyBidirectionalIterator named requirement
|
||||
static_assert(std::is_base_of<std::bidirectional_iterator_tag, std::bidirectional_iterator_tag>::value
|
||||
&& std::is_base_of<std::bidirectional_iterator_tag, typename std::iterator_traits<typename array_t::iterator>::iterator_category>::value,
|
||||
"basic_json iterator assumes array and object type iterators satisfy the LegacyBidirectionalIterator named requirement.");
|
||||
|
||||
public:
|
||||
/// The std::iterator class template (used as a base class to provide typedefs) is deprecated in C++17.
|
||||
/// The C++ Standard has never required user-defined iterators to derive from std::iterator.
|
||||
/// A user-defined iterator should provide publicly accessible typedefs named
|
||||
/// iterator_category, value_type, difference_type, pointer, and reference.
|
||||
/// Note that value_type is required to be non-const, even for constant iterators.
|
||||
using iterator_category = std::bidirectional_iterator_tag;
|
||||
|
||||
/// the type of the values when the iterator is dereferenced
|
||||
using value_type = typename BasicJsonType::value_type;
|
||||
/// a type to represent differences between iterators
|
||||
using difference_type = typename BasicJsonType::difference_type;
|
||||
/// defines a pointer to the type iterated over (value_type)
|
||||
using pointer = typename std::conditional<std::is_const<BasicJsonType>::value,
|
||||
typename BasicJsonType::const_pointer,
|
||||
typename BasicJsonType::pointer>::type;
|
||||
/// defines a reference to the type iterated over (value_type)
|
||||
using reference =
|
||||
typename std::conditional<std::is_const<BasicJsonType>::value,
|
||||
typename BasicJsonType::const_reference,
|
||||
typename BasicJsonType::reference>::type;
|
||||
|
||||
iter_impl() = default;
|
||||
~iter_impl() = default;
|
||||
iter_impl(iter_impl&&) noexcept = default;
|
||||
iter_impl& operator=(iter_impl&&) noexcept = default;
|
||||
|
||||
/*!
|
||||
@brief constructor for a given JSON instance
|
||||
@param[in] object pointer to a JSON object for this iterator
|
||||
@pre object != nullptr
|
||||
@post The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
explicit iter_impl(pointer object) noexcept : m_object(object)
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
{
|
||||
m_it.object_iterator = typename object_t::iterator();
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::array:
|
||||
{
|
||||
m_it.array_iterator = typename array_t::iterator();
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::null:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
{
|
||||
m_it.primitive_iterator = primitive_iterator_t();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
@note The conventional copy constructor and copy assignment are implicitly
|
||||
defined. Combined with the following converting constructor and
|
||||
assignment, they support: (1) copy from iterator to iterator, (2)
|
||||
copy from const iterator to const iterator, and (3) conversion from
|
||||
iterator to const iterator. However conversion from const iterator
|
||||
to iterator is not defined.
|
||||
*/
|
||||
|
||||
/*!
|
||||
@brief const copy constructor
|
||||
@param[in] other const iterator to copy from
|
||||
@note This copy constructor had to be defined explicitly to circumvent a bug
|
||||
occurring on msvc v19.0 compiler (VS 2015) debug build. For more
|
||||
information refer to: https://github.com/nlohmann/json/issues/1608
|
||||
*/
|
||||
iter_impl(const iter_impl<const BasicJsonType>& other) noexcept
|
||||
: m_object(other.m_object), m_it(other.m_it)
|
||||
{}
|
||||
|
||||
/*!
|
||||
@brief converting assignment
|
||||
@param[in] other const iterator to copy from
|
||||
@return const/non-const iterator
|
||||
@note It is not checked whether @a other is initialized.
|
||||
*/
|
||||
iter_impl& operator=(const iter_impl<const BasicJsonType>& other) noexcept
|
||||
{
|
||||
if (&other != this)
|
||||
{
|
||||
m_object = other.m_object;
|
||||
m_it = other.m_it;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief converting constructor
|
||||
@param[in] other non-const iterator to copy from
|
||||
@note It is not checked whether @a other is initialized.
|
||||
*/
|
||||
iter_impl(const iter_impl<typename std::remove_const<BasicJsonType>::type>& other) noexcept
|
||||
: m_object(other.m_object), m_it(other.m_it)
|
||||
{}
|
||||
|
||||
/*!
|
||||
@brief converting assignment
|
||||
@param[in] other non-const iterator to copy from
|
||||
@return const/non-const iterator
|
||||
@note It is not checked whether @a other is initialized.
|
||||
*/
|
||||
iter_impl& operator=(const iter_impl<typename std::remove_const<BasicJsonType>::type>& other) noexcept // NOLINT(cert-oop54-cpp)
|
||||
{
|
||||
m_object = other.m_object;
|
||||
m_it = other.m_it;
|
||||
return *this;
|
||||
}
|
||||
|
||||
JSON_PRIVATE_UNLESS_TESTED:
|
||||
/*!
|
||||
@brief set the iterator to the first value
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
void set_begin() noexcept
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
{
|
||||
m_it.object_iterator = m_object->m_data.m_value.object->begin();
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::array:
|
||||
{
|
||||
m_it.array_iterator = m_object->m_data.m_value.array->begin();
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::null:
|
||||
{
|
||||
// set to end so begin()==end() is true: null is empty
|
||||
m_it.primitive_iterator.set_end();
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
{
|
||||
m_it.primitive_iterator.set_begin();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief set the iterator past the last value
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
void set_end() noexcept
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
{
|
||||
m_it.object_iterator = m_object->m_data.m_value.object->end();
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::array:
|
||||
{
|
||||
m_it.array_iterator = m_object->m_data.m_value.array->end();
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::null:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
{
|
||||
m_it.primitive_iterator.set_end();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
/*!
|
||||
@brief return a reference to the value pointed to by the iterator
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
reference operator*() const
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
{
|
||||
JSON_ASSERT(m_it.object_iterator != m_object->m_data.m_value.object->end());
|
||||
return m_it.object_iterator->second;
|
||||
}
|
||||
|
||||
case value_t::array:
|
||||
{
|
||||
JSON_ASSERT(m_it.array_iterator != m_object->m_data.m_value.array->end());
|
||||
return *m_it.array_iterator;
|
||||
}
|
||||
|
||||
case value_t::null:
|
||||
JSON_THROW(invalid_iterator::create(214, "cannot get value", m_object));
|
||||
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
{
|
||||
if (JSON_HEDLEY_LIKELY(m_it.primitive_iterator.is_begin()))
|
||||
{
|
||||
return *m_object;
|
||||
}
|
||||
|
||||
JSON_THROW(invalid_iterator::create(214, "cannot get value", m_object));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief dereference the iterator
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
pointer operator->() const
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
{
|
||||
JSON_ASSERT(m_it.object_iterator != m_object->m_data.m_value.object->end());
|
||||
return &(m_it.object_iterator->second);
|
||||
}
|
||||
|
||||
case value_t::array:
|
||||
{
|
||||
JSON_ASSERT(m_it.array_iterator != m_object->m_data.m_value.array->end());
|
||||
return &*m_it.array_iterator;
|
||||
}
|
||||
|
||||
case value_t::null:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
{
|
||||
if (JSON_HEDLEY_LIKELY(m_it.primitive_iterator.is_begin()))
|
||||
{
|
||||
return m_object;
|
||||
}
|
||||
|
||||
JSON_THROW(invalid_iterator::create(214, "cannot get value", m_object));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief post-increment (it++)
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
iter_impl operator++(int)& // NOLINT(cert-dcl21-cpp)
|
||||
{
|
||||
auto result = *this;
|
||||
++(*this);
|
||||
return result;
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief pre-increment (++it)
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
iter_impl& operator++()
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
{
|
||||
std::advance(m_it.object_iterator, 1);
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::array:
|
||||
{
|
||||
std::advance(m_it.array_iterator, 1);
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::null:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
{
|
||||
++m_it.primitive_iterator;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief post-decrement (it--)
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
iter_impl operator--(int)& // NOLINT(cert-dcl21-cpp)
|
||||
{
|
||||
auto result = *this;
|
||||
--(*this);
|
||||
return result;
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief pre-decrement (--it)
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
iter_impl& operator--()
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
{
|
||||
std::advance(m_it.object_iterator, -1);
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::array:
|
||||
{
|
||||
std::advance(m_it.array_iterator, -1);
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::null:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
{
|
||||
--m_it.primitive_iterator;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief comparison: equal
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
template < typename IterImpl, detail::enable_if_t < (std::is_same<IterImpl, iter_impl>::value || std::is_same<IterImpl, other_iter_impl>::value), std::nullptr_t > = nullptr >
|
||||
bool operator==(const IterImpl& other) const
|
||||
{
|
||||
// if objects are not the same, the comparison is undefined
|
||||
if (JSON_HEDLEY_UNLIKELY(m_object != other.m_object))
|
||||
{
|
||||
JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers", m_object));
|
||||
}
|
||||
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
return (m_it.object_iterator == other.m_it.object_iterator);
|
||||
|
||||
case value_t::array:
|
||||
return (m_it.array_iterator == other.m_it.array_iterator);
|
||||
|
||||
case value_t::null:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
return (m_it.primitive_iterator == other.m_it.primitive_iterator);
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief comparison: not equal
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
template < typename IterImpl, detail::enable_if_t < (std::is_same<IterImpl, iter_impl>::value || std::is_same<IterImpl, other_iter_impl>::value), std::nullptr_t > = nullptr >
|
||||
bool operator!=(const IterImpl& other) const
|
||||
{
|
||||
return !operator==(other);
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief comparison: smaller
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
bool operator<(const iter_impl& other) const
|
||||
{
|
||||
// if objects are not the same, the comparison is undefined
|
||||
if (JSON_HEDLEY_UNLIKELY(m_object != other.m_object))
|
||||
{
|
||||
JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers", m_object));
|
||||
}
|
||||
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
JSON_THROW(invalid_iterator::create(213, "cannot compare order of object iterators", m_object));
|
||||
|
||||
case value_t::array:
|
||||
return (m_it.array_iterator < other.m_it.array_iterator);
|
||||
|
||||
case value_t::null:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
return (m_it.primitive_iterator < other.m_it.primitive_iterator);
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief comparison: less than or equal
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
bool operator<=(const iter_impl& other) const
|
||||
{
|
||||
return !other.operator < (*this);
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief comparison: greater than
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
bool operator>(const iter_impl& other) const
|
||||
{
|
||||
return !operator<=(other);
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief comparison: greater than or equal
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
bool operator>=(const iter_impl& other) const
|
||||
{
|
||||
return !operator<(other);
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief add to iterator
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
iter_impl& operator+=(difference_type i)
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
JSON_THROW(invalid_iterator::create(209, "cannot use offsets with object iterators", m_object));
|
||||
|
||||
case value_t::array:
|
||||
{
|
||||
std::advance(m_it.array_iterator, i);
|
||||
break;
|
||||
}
|
||||
|
||||
case value_t::null:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
{
|
||||
m_it.primitive_iterator += i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief subtract from iterator
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
iter_impl& operator-=(difference_type i)
|
||||
{
|
||||
return operator+=(-i);
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief add to iterator
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
iter_impl operator+(difference_type i) const
|
||||
{
|
||||
auto result = *this;
|
||||
result += i;
|
||||
return result;
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief addition of distance and iterator
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
friend iter_impl operator+(difference_type i, const iter_impl& it)
|
||||
{
|
||||
auto result = it;
|
||||
result += i;
|
||||
return result;
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief subtract from iterator
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
iter_impl operator-(difference_type i) const
|
||||
{
|
||||
auto result = *this;
|
||||
result -= i;
|
||||
return result;
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief return difference
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
difference_type operator-(const iter_impl& other) const
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
JSON_THROW(invalid_iterator::create(209, "cannot use offsets with object iterators", m_object));
|
||||
|
||||
case value_t::array:
|
||||
return m_it.array_iterator - other.m_it.array_iterator;
|
||||
|
||||
case value_t::null:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
return m_it.primitive_iterator - other.m_it.primitive_iterator;
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief access to successor
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
reference operator[](difference_type n) const
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
switch (m_object->m_data.m_type)
|
||||
{
|
||||
case value_t::object:
|
||||
JSON_THROW(invalid_iterator::create(208, "cannot use operator[] for object iterators", m_object));
|
||||
|
||||
case value_t::array:
|
||||
return *std::next(m_it.array_iterator, n);
|
||||
|
||||
case value_t::null:
|
||||
JSON_THROW(invalid_iterator::create(214, "cannot get value", m_object));
|
||||
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
{
|
||||
if (JSON_HEDLEY_LIKELY(m_it.primitive_iterator.get_value() == -n))
|
||||
{
|
||||
return *m_object;
|
||||
}
|
||||
|
||||
JSON_THROW(invalid_iterator::create(214, "cannot get value", m_object));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief return the key of an object iterator
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
const typename object_t::key_type& key() const
|
||||
{
|
||||
JSON_ASSERT(m_object != nullptr);
|
||||
|
||||
if (JSON_HEDLEY_LIKELY(m_object->is_object()))
|
||||
{
|
||||
return m_it.object_iterator->first;
|
||||
}
|
||||
|
||||
JSON_THROW(invalid_iterator::create(207, "cannot use key() for non-object iterators", m_object));
|
||||
}
|
||||
|
||||
/*!
|
||||
@brief return the value of an iterator
|
||||
@pre The iterator is initialized; i.e. `m_object != nullptr`.
|
||||
*/
|
||||
reference value() const
|
||||
{
|
||||
return operator*();
|
||||
}
|
||||
|
||||
JSON_PRIVATE_UNLESS_TESTED:
|
||||
/// associated JSON instance
|
||||
pointer m_object = nullptr;
|
||||
/// the actual iterator of the associated instance
|
||||
internal_iterator<typename std::remove_const<BasicJsonType>::type> m_it {};
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
@@ -0,0 +1,242 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef> // size_t
|
||||
#include <iterator> // input_iterator_tag
|
||||
#include <string> // string, to_string
|
||||
#include <tuple> // tuple_size, get, tuple_element
|
||||
#include <utility> // move
|
||||
|
||||
#if JSON_HAS_RANGES
|
||||
#include <ranges> // enable_borrowed_range
|
||||
#endif
|
||||
|
||||
#include <nlohmann/detail/abi_macros.hpp>
|
||||
#include <nlohmann/detail/meta/type_traits.hpp>
|
||||
#include <nlohmann/detail/value_t.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
template<typename string_type>
|
||||
void int_to_string( string_type& target, std::size_t value )
|
||||
{
|
||||
// For ADL
|
||||
using std::to_string;
|
||||
target = to_string(value);
|
||||
}
|
||||
template<typename IteratorType> class iteration_proxy_value
|
||||
{
|
||||
public:
|
||||
using difference_type = std::ptrdiff_t;
|
||||
using value_type = iteration_proxy_value;
|
||||
using pointer = value_type *;
|
||||
using reference = value_type &;
|
||||
using iterator_category = std::input_iterator_tag;
|
||||
using string_type = typename std::remove_cv< typename std::remove_reference<decltype( std::declval<IteratorType>().key() ) >::type >::type;
|
||||
|
||||
private:
|
||||
/// the iterator
|
||||
IteratorType anchor{};
|
||||
/// an index for arrays (used to create key names)
|
||||
std::size_t array_index = 0;
|
||||
/// last stringified array index
|
||||
mutable std::size_t array_index_last = 0;
|
||||
/// a string representation of the array index
|
||||
mutable string_type array_index_str = "0";
|
||||
/// an empty string (to return a reference for primitive values)
|
||||
string_type empty_str{};
|
||||
|
||||
public:
|
||||
explicit iteration_proxy_value() = default;
|
||||
explicit iteration_proxy_value(IteratorType it, std::size_t array_index_ = 0)
|
||||
noexcept(std::is_nothrow_move_constructible<IteratorType>::value
|
||||
&& std::is_nothrow_default_constructible<string_type>::value)
|
||||
: anchor(std::move(it))
|
||||
, array_index(array_index_)
|
||||
{}
|
||||
|
||||
iteration_proxy_value(iteration_proxy_value const&) = default;
|
||||
iteration_proxy_value& operator=(iteration_proxy_value const&) = default;
|
||||
// older GCCs are a bit fussy and require explicit noexcept specifiers on defaulted functions
|
||||
iteration_proxy_value(iteration_proxy_value&&)
|
||||
noexcept(std::is_nothrow_move_constructible<IteratorType>::value
|
||||
&& std::is_nothrow_move_constructible<string_type>::value) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor,cppcoreguidelines-noexcept-move-operations)
|
||||
iteration_proxy_value& operator=(iteration_proxy_value&&)
|
||||
noexcept(std::is_nothrow_move_assignable<IteratorType>::value
|
||||
&& std::is_nothrow_move_assignable<string_type>::value) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor,cppcoreguidelines-noexcept-move-operations)
|
||||
~iteration_proxy_value() = default;
|
||||
|
||||
/// dereference operator (needed for range-based for)
|
||||
const iteration_proxy_value& operator*() const
|
||||
{
|
||||
return *this;
|
||||
}
|
||||
|
||||
/// increment operator (needed for range-based for)
|
||||
iteration_proxy_value& operator++()
|
||||
{
|
||||
++anchor;
|
||||
++array_index;
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
iteration_proxy_value operator++(int)& // NOLINT(cert-dcl21-cpp)
|
||||
{
|
||||
auto tmp = iteration_proxy_value(anchor, array_index);
|
||||
++anchor;
|
||||
++array_index;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
/// equality operator (needed for InputIterator)
|
||||
bool operator==(const iteration_proxy_value& o) const
|
||||
{
|
||||
return anchor == o.anchor;
|
||||
}
|
||||
|
||||
/// inequality operator (needed for range-based for)
|
||||
bool operator!=(const iteration_proxy_value& o) const
|
||||
{
|
||||
return anchor != o.anchor;
|
||||
}
|
||||
|
||||
/// return key of the iterator
|
||||
const string_type& key() const
|
||||
{
|
||||
JSON_ASSERT(anchor.m_object != nullptr);
|
||||
|
||||
switch (anchor.m_object->type())
|
||||
{
|
||||
// use integer array index as key
|
||||
case value_t::array:
|
||||
{
|
||||
if (array_index != array_index_last)
|
||||
{
|
||||
int_to_string( array_index_str, array_index );
|
||||
array_index_last = array_index;
|
||||
}
|
||||
return array_index_str;
|
||||
}
|
||||
|
||||
// use key from the object
|
||||
case value_t::object:
|
||||
return anchor.key();
|
||||
|
||||
// use an empty key for all primitive types
|
||||
case value_t::null:
|
||||
case value_t::string:
|
||||
case value_t::boolean:
|
||||
case value_t::number_integer:
|
||||
case value_t::number_unsigned:
|
||||
case value_t::number_float:
|
||||
case value_t::binary:
|
||||
case value_t::discarded:
|
||||
default:
|
||||
return empty_str;
|
||||
}
|
||||
}
|
||||
|
||||
/// return value of the iterator
|
||||
typename IteratorType::reference value() const
|
||||
{
|
||||
return anchor.value();
|
||||
}
|
||||
};
|
||||
|
||||
/// proxy class for the items() function
|
||||
template<typename IteratorType> class iteration_proxy
|
||||
{
|
||||
private:
|
||||
/// the container to iterate
|
||||
typename IteratorType::pointer container = nullptr;
|
||||
|
||||
public:
|
||||
explicit iteration_proxy() = default;
|
||||
|
||||
/// construct iteration proxy from a container
|
||||
explicit iteration_proxy(typename IteratorType::reference cont) noexcept
|
||||
: container(&cont) {}
|
||||
|
||||
iteration_proxy(iteration_proxy const&) = default;
|
||||
iteration_proxy& operator=(iteration_proxy const&) = default;
|
||||
iteration_proxy(iteration_proxy&&) noexcept = default;
|
||||
iteration_proxy& operator=(iteration_proxy&&) noexcept = default;
|
||||
~iteration_proxy() = default;
|
||||
|
||||
/// return iterator begin (needed for range-based for)
|
||||
iteration_proxy_value<IteratorType> begin() const noexcept
|
||||
{
|
||||
return iteration_proxy_value<IteratorType>(container->begin());
|
||||
}
|
||||
|
||||
/// return iterator end (needed for range-based for)
|
||||
iteration_proxy_value<IteratorType> end() const noexcept
|
||||
{
|
||||
return iteration_proxy_value<IteratorType>(container->end());
|
||||
}
|
||||
};
|
||||
|
||||
// Structured Bindings Support
|
||||
// For further reference see https://blog.tartanllama.xyz/structured-bindings/
|
||||
// And see https://github.com/nlohmann/json/pull/1391
|
||||
template<std::size_t N, typename IteratorType, enable_if_t<N == 0, int> = 0>
|
||||
auto get(const nlohmann::detail::iteration_proxy_value<IteratorType>& i) -> decltype(i.key())
|
||||
{
|
||||
return i.key();
|
||||
}
|
||||
// Structured Bindings Support
|
||||
// For further reference see https://blog.tartanllama.xyz/structured-bindings/
|
||||
// And see https://github.com/nlohmann/json/pull/1391
|
||||
template<std::size_t N, typename IteratorType, enable_if_t<N == 1, int> = 0>
|
||||
auto get(const nlohmann::detail::iteration_proxy_value<IteratorType>& i) -> decltype(i.value())
|
||||
{
|
||||
return i.value();
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
||||
|
||||
// The Addition to the STD Namespace is required to add
|
||||
// Structured Bindings Support to the iteration_proxy_value class
|
||||
// For further reference see https://blog.tartanllama.xyz/structured-bindings/
|
||||
// And see https://github.com/nlohmann/json/pull/1391
|
||||
namespace std
|
||||
{
|
||||
|
||||
#if defined(__clang__)
|
||||
// Fix: https://github.com/nlohmann/json/issues/1401
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wmismatched-tags"
|
||||
#endif
|
||||
template<typename IteratorType>
|
||||
class tuple_size<::nlohmann::detail::iteration_proxy_value<IteratorType>> // NOLINT(cert-dcl58-cpp)
|
||||
: public std::integral_constant<std::size_t, 2> {};
|
||||
|
||||
template<std::size_t N, typename IteratorType>
|
||||
class tuple_element<N, ::nlohmann::detail::iteration_proxy_value<IteratorType >> // NOLINT(cert-dcl58-cpp)
|
||||
{
|
||||
public:
|
||||
using type = decltype(
|
||||
get<N>(std::declval <
|
||||
::nlohmann::detail::iteration_proxy_value<IteratorType >> ()));
|
||||
};
|
||||
#if defined(__clang__)
|
||||
#pragma clang diagnostic pop
|
||||
#endif
|
||||
|
||||
} // namespace std
|
||||
|
||||
#if JSON_HAS_RANGES
|
||||
template <typename IteratorType>
|
||||
inline constexpr bool ::std::ranges::enable_borrowed_range<::nlohmann::detail::iteration_proxy<IteratorType>> = true;
|
||||
#endif
|
@@ -0,0 +1,61 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <iterator> // random_access_iterator_tag
|
||||
|
||||
#include <nlohmann/detail/abi_macros.hpp>
|
||||
#include <nlohmann/detail/meta/void_t.hpp>
|
||||
#include <nlohmann/detail/meta/cpp_future.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
template<typename It, typename = void>
|
||||
struct iterator_types {};
|
||||
|
||||
template<typename It>
|
||||
struct iterator_types <
|
||||
It,
|
||||
void_t<typename It::difference_type, typename It::value_type, typename It::pointer,
|
||||
typename It::reference, typename It::iterator_category >>
|
||||
{
|
||||
using difference_type = typename It::difference_type;
|
||||
using value_type = typename It::value_type;
|
||||
using pointer = typename It::pointer;
|
||||
using reference = typename It::reference;
|
||||
using iterator_category = typename It::iterator_category;
|
||||
};
|
||||
|
||||
// This is required as some compilers implement std::iterator_traits in a way that
|
||||
// doesn't work with SFINAE. See https://github.com/nlohmann/json/issues/1341.
|
||||
template<typename T, typename = void>
|
||||
struct iterator_traits
|
||||
{
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct iterator_traits < T, enable_if_t < !std::is_pointer<T>::value >>
|
||||
: iterator_types<T>
|
||||
{
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct iterator_traits<T*, enable_if_t<std::is_object<T>::value>>
|
||||
{
|
||||
using iterator_category = std::random_access_iterator_tag;
|
||||
using value_type = T;
|
||||
using difference_type = ptrdiff_t;
|
||||
using pointer = T*;
|
||||
using reference = T&;
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
@@ -0,0 +1,130 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef> // ptrdiff_t
|
||||
#include <iterator> // reverse_iterator
|
||||
#include <utility> // declval
|
||||
|
||||
#include <nlohmann/detail/abi_macros.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
//////////////////////
|
||||
// reverse_iterator //
|
||||
//////////////////////
|
||||
|
||||
/*!
|
||||
@brief a template for a reverse iterator class
|
||||
|
||||
@tparam Base the base iterator type to reverse. Valid types are @ref
|
||||
iterator (to create @ref reverse_iterator) and @ref const_iterator (to
|
||||
create @ref const_reverse_iterator).
|
||||
|
||||
@requirement The class satisfies the following concept requirements:
|
||||
-
|
||||
[BidirectionalIterator](https://en.cppreference.com/w/cpp/named_req/BidirectionalIterator):
|
||||
The iterator that can be moved can be moved in both directions (i.e.
|
||||
incremented and decremented).
|
||||
- [OutputIterator](https://en.cppreference.com/w/cpp/named_req/OutputIterator):
|
||||
It is possible to write to the pointed-to element (only if @a Base is
|
||||
@ref iterator).
|
||||
|
||||
@since version 1.0.0
|
||||
*/
|
||||
template<typename Base>
|
||||
class json_reverse_iterator : public std::reverse_iterator<Base>
|
||||
{
|
||||
public:
|
||||
using difference_type = std::ptrdiff_t;
|
||||
/// shortcut to the reverse iterator adapter
|
||||
using base_iterator = std::reverse_iterator<Base>;
|
||||
/// the reference type for the pointed-to element
|
||||
using reference = typename Base::reference;
|
||||
|
||||
/// create reverse iterator from iterator
|
||||
explicit json_reverse_iterator(const typename base_iterator::iterator_type& it) noexcept
|
||||
: base_iterator(it) {}
|
||||
|
||||
/// create reverse iterator from base class
|
||||
explicit json_reverse_iterator(const base_iterator& it) noexcept : base_iterator(it) {}
|
||||
|
||||
/// post-increment (it++)
|
||||
json_reverse_iterator operator++(int)& // NOLINT(cert-dcl21-cpp)
|
||||
{
|
||||
return static_cast<json_reverse_iterator>(base_iterator::operator++(1));
|
||||
}
|
||||
|
||||
/// pre-increment (++it)
|
||||
json_reverse_iterator& operator++()
|
||||
{
|
||||
return static_cast<json_reverse_iterator&>(base_iterator::operator++());
|
||||
}
|
||||
|
||||
/// post-decrement (it--)
|
||||
json_reverse_iterator operator--(int)& // NOLINT(cert-dcl21-cpp)
|
||||
{
|
||||
return static_cast<json_reverse_iterator>(base_iterator::operator--(1));
|
||||
}
|
||||
|
||||
/// pre-decrement (--it)
|
||||
json_reverse_iterator& operator--()
|
||||
{
|
||||
return static_cast<json_reverse_iterator&>(base_iterator::operator--());
|
||||
}
|
||||
|
||||
/// add to iterator
|
||||
json_reverse_iterator& operator+=(difference_type i)
|
||||
{
|
||||
return static_cast<json_reverse_iterator&>(base_iterator::operator+=(i));
|
||||
}
|
||||
|
||||
/// add to iterator
|
||||
json_reverse_iterator operator+(difference_type i) const
|
||||
{
|
||||
return static_cast<json_reverse_iterator>(base_iterator::operator+(i));
|
||||
}
|
||||
|
||||
/// subtract from iterator
|
||||
json_reverse_iterator operator-(difference_type i) const
|
||||
{
|
||||
return static_cast<json_reverse_iterator>(base_iterator::operator-(i));
|
||||
}
|
||||
|
||||
/// return difference
|
||||
difference_type operator-(const json_reverse_iterator& other) const
|
||||
{
|
||||
return base_iterator(*this) - base_iterator(other);
|
||||
}
|
||||
|
||||
/// access to successor
|
||||
reference operator[](difference_type n) const
|
||||
{
|
||||
return *(this->operator+(n));
|
||||
}
|
||||
|
||||
/// return the key of an object iterator
|
||||
auto key() const -> decltype(std::declval<Base>().key())
|
||||
{
|
||||
auto it = --this->base();
|
||||
return it.key();
|
||||
}
|
||||
|
||||
/// return the value of an iterator
|
||||
reference value() const
|
||||
{
|
||||
auto it = --this->base();
|
||||
return it.operator * ();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
@@ -0,0 +1,132 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef> // ptrdiff_t
|
||||
#include <limits> // numeric_limits
|
||||
|
||||
#include <nlohmann/detail/macro_scope.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
/*
|
||||
@brief an iterator for primitive JSON types
|
||||
|
||||
This class models an iterator for primitive JSON types (boolean, number,
|
||||
string). It's only purpose is to allow the iterator/const_iterator classes
|
||||
to "iterate" over primitive values. Internally, the iterator is modeled by
|
||||
a `difference_type` variable. Value begin_value (`0`) models the begin,
|
||||
end_value (`1`) models past the end.
|
||||
*/
|
||||
class primitive_iterator_t
|
||||
{
|
||||
private:
|
||||
using difference_type = std::ptrdiff_t;
|
||||
static constexpr difference_type begin_value = 0;
|
||||
static constexpr difference_type end_value = begin_value + 1;
|
||||
|
||||
JSON_PRIVATE_UNLESS_TESTED:
|
||||
/// iterator as signed integer type
|
||||
difference_type m_it = (std::numeric_limits<std::ptrdiff_t>::min)();
|
||||
|
||||
public:
|
||||
constexpr difference_type get_value() const noexcept
|
||||
{
|
||||
return m_it;
|
||||
}
|
||||
|
||||
/// set iterator to a defined beginning
|
||||
void set_begin() noexcept
|
||||
{
|
||||
m_it = begin_value;
|
||||
}
|
||||
|
||||
/// set iterator to a defined past the end
|
||||
void set_end() noexcept
|
||||
{
|
||||
m_it = end_value;
|
||||
}
|
||||
|
||||
/// return whether the iterator can be dereferenced
|
||||
constexpr bool is_begin() const noexcept
|
||||
{
|
||||
return m_it == begin_value;
|
||||
}
|
||||
|
||||
/// return whether the iterator is at end
|
||||
constexpr bool is_end() const noexcept
|
||||
{
|
||||
return m_it == end_value;
|
||||
}
|
||||
|
||||
friend constexpr bool operator==(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept
|
||||
{
|
||||
return lhs.m_it == rhs.m_it;
|
||||
}
|
||||
|
||||
friend constexpr bool operator<(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept
|
||||
{
|
||||
return lhs.m_it < rhs.m_it;
|
||||
}
|
||||
|
||||
primitive_iterator_t operator+(difference_type n) noexcept
|
||||
{
|
||||
auto result = *this;
|
||||
result += n;
|
||||
return result;
|
||||
}
|
||||
|
||||
friend constexpr difference_type operator-(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept
|
||||
{
|
||||
return lhs.m_it - rhs.m_it;
|
||||
}
|
||||
|
||||
primitive_iterator_t& operator++() noexcept
|
||||
{
|
||||
++m_it;
|
||||
return *this;
|
||||
}
|
||||
|
||||
primitive_iterator_t operator++(int)& noexcept // NOLINT(cert-dcl21-cpp)
|
||||
{
|
||||
auto result = *this;
|
||||
++m_it;
|
||||
return result;
|
||||
}
|
||||
|
||||
primitive_iterator_t& operator--() noexcept
|
||||
{
|
||||
--m_it;
|
||||
return *this;
|
||||
}
|
||||
|
||||
primitive_iterator_t operator--(int)& noexcept // NOLINT(cert-dcl21-cpp)
|
||||
{
|
||||
auto result = *this;
|
||||
--m_it;
|
||||
return result;
|
||||
}
|
||||
|
||||
primitive_iterator_t& operator+=(difference_type n) noexcept
|
||||
{
|
||||
m_it += n;
|
||||
return *this;
|
||||
}
|
||||
|
||||
primitive_iterator_t& operator-=(difference_type n) noexcept
|
||||
{
|
||||
m_it -= n;
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
@@ -0,0 +1,39 @@
|
||||
// __ _____ _____ _____
|
||||
// __| | __| | | | JSON for Modern C++
|
||||
// | | |__ | | | | | | version 3.11.3
|
||||
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
|
||||
//
|
||||
// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann <https://nlohmann.me>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <type_traits> // conditional, is_same
|
||||
|
||||
#include <nlohmann/detail/abi_macros.hpp>
|
||||
|
||||
NLOHMANN_JSON_NAMESPACE_BEGIN
|
||||
namespace detail
|
||||
{
|
||||
|
||||
/*!
|
||||
@brief Default base class of the @ref basic_json class.
|
||||
|
||||
So that the correct implementations of the copy / move ctors / assign operators
|
||||
of @ref basic_json do not require complex case distinctions
|
||||
(no base class / custom base class used as customization point),
|
||||
@ref basic_json always has a base class.
|
||||
By default, this class is used because it is empty and thus has no effect
|
||||
on the behavior of @ref basic_json.
|
||||
*/
|
||||
struct json_default_base {};
|
||||
|
||||
template<class T>
|
||||
using json_base_class = typename std::conditional <
|
||||
std::is_same<T, void>::value,
|
||||
json_default_base,
|
||||
T
|
||||
>::type;
|
||||
|
||||
} // namespace detail
|
||||
NLOHMANN_JSON_NAMESPACE_END
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user