convergence_best #27

Merged
rmontanana merged 5 commits from convergence_best into main 2024-04-30 16:22:09 +00:00
721 changed files with 206095 additions and 2496 deletions
Showing only changes of commit ae469b8146 - Show all commits

4
.gitmodules vendored
View File

@ -3,8 +3,8 @@
url = https://github.com/rmontanana/mdlp
main = main
update = merge
[submodule "lib/catch2"]
path = lib/catch2
[submodule "tests/lib/catch2"]
path = tests/lib/catch2
main = v2.x
update = merge
url = https://github.com/catchorg/Catch2.git

2
.vscode/launch.json vendored
View File

@ -16,7 +16,7 @@
"name": "test",
"program": "${workspaceFolder}/build_debug/tests/TestBayesNet",
"args": [
"Block Update"
"\"Test Cannot build dataset with wrong data vector\""
],
"cwd": "${workspaceFolder}/build_debug/tests"
},

View File

@ -10,7 +10,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Added
- Add the Library logo generated with <https://openart.ai> to README.md
- Add link to the coverage report in the README.md coverage label
- Add link to the coverage report in the README.md coverage label.
- Add the *convergence_best* hyperparameter to the BoostAODE class, to control the way the prior accuracy is computed if convergence is set. Default value is *false*.
### Internal
- Refactor library ArffFile to limit the number of samples with a parameter.
- Refactor tests libraries location to test/lib
- Refactor loadDataset function in tests.
## [1.0.5] 2024-04-20

View File

@ -66,14 +66,14 @@ add_git_submodule("lib/json")
# Subdirectories
# --------------
add_subdirectory(config)
add_subdirectory(lib/Files)
add_subdirectory(bayesnet)
# Testing
# -------
if (ENABLE_TESTING)
MESSAGE("Testing enabled")
add_git_submodule("lib/catch2")
MESSAGE("Testing enabled")
add_git_submodule("tests/lib/catch2")
add_subdirectory(tests/lib/Files)
include(CTest)
add_subdirectory(tests)
endif (ENABLE_TESTING)

View File

@ -7,7 +7,7 @@
[![Security Rating](https://sonarcloud.io/api/project_badges/measure?project=rmontanana_BayesNet&metric=security_rating)](https://sonarcloud.io/summary/new_code?id=rmontanana_BayesNet)
[![Reliability Rating](https://sonarcloud.io/api/project_badges/measure?project=rmontanana_BayesNet&metric=reliability_rating)](https://sonarcloud.io/summary/new_code?id=rmontanana_BayesNet)
![Gitea Last Commit](https://img.shields.io/gitea/last-commit/rmontanana/bayesnet?gitea_url=https://gitea.rmontanana.es:3000&logo=gitea)
[![Coverage Badge](https://img.shields.io/badge/Coverage-97,2%25-green)](html/index.html)
[![Coverage Badge](https://img.shields.io/badge/Coverage-97,1%25-green)](html/index.html)
Bayesian Network Classifiers using libtorch from scratch
@ -47,11 +47,6 @@ make test
```bash
make coverage
```
or
```bash
make viewcoverage
```

View File

@ -13,13 +13,14 @@
#include "bayesnet/feature_selection/FCBF.h"
#include "bayesnet/feature_selection/IWSS.h"
#include "BoostAODE.h"
#include "lib/log/loguru.cpp"
namespace bayesnet {
BoostAODE::BoostAODE(bool predict_voting) : Ensemble(predict_voting)
{
validHyperparameters = {
"maxModels", "bisection", "order", "convergence", "threshold",
"maxModels", "bisection", "order", "convergence", "convergence_best", "threshold",
"select_features", "maxTolerance", "predict_voting", "block_update"
};
@ -70,6 +71,10 @@ namespace bayesnet {
convergence = hyperparameters["convergence"];
hyperparameters.erase("convergence");
}
if (hyperparameters.contains("convergence_best")) {
convergence_best = hyperparameters["convergence_best"];
hyperparameters.erase("convergence_best");
}
if (hyperparameters.contains("bisection")) {
bisection = hyperparameters["bisection"];
hyperparameters.erase("bisection");
@ -262,6 +267,13 @@ namespace bayesnet {
}
void BoostAODE::trainModel(const torch::Tensor& weights)
{
//
// Logging setup
//
loguru::set_thread_name("BoostAODE");
loguru::g_stderr_verbosity = loguru::Verbosity_OFF;
loguru::add_file("boostAODE.log", loguru::Truncate, loguru::Verbosity_MAX);
// Algorithm based on the adaboost algorithm for classification
// as explained in Ensemble methods (Zhi-Hua Zhou, 2012)
fitted = true;
@ -304,8 +316,9 @@ namespace bayesnet {
{ return std::find(begin(featuresUsed), end(featuresUsed), x) != end(featuresUsed);}),
end(featureSelection)
);
int k = pow(2, tolerance);
int k = bisection ? pow(2, tolerance) : 1;
int counter = 0; // The model counter of the current pack
VLOG_SCOPE_F(1, "counter=%d k=%d featureSelection.size: %zu", counter, k, featureSelection.size());
while (counter++ < k && featureSelection.size() > 0) {
auto feature = featureSelection[0];
featureSelection.erase(featureSelection.begin());
@ -324,6 +337,7 @@ namespace bayesnet {
models.push_back(std::move(model));
significanceModels.push_back(alpha_t);
n_models++;
VLOG_SCOPE_F(2, "numItemsPack: %d n_models: %d featuresUsed: %zu", numItemsPack, n_models, featuresUsed.size());
}
if (block_update) {
std::tie(weights_, alpha_t, finished) = update_weights_block(k, y_train, weights_);
@ -337,20 +351,28 @@ namespace bayesnet {
improvement = accuracy - priorAccuracy;
}
if (improvement < convergence_threshold) {
VLOG_SCOPE_F(3, " (improvement<threshold) tolerance: %d numItemsPack: %d improvement: %f prior: %f current: %f", tolerance, numItemsPack, improvement, priorAccuracy, accuracy);
tolerance++;
} else {
VLOG_SCOPE_F(3, "* (improvement>=threshold) Reset. tolerance: %d numItemsPack: %d improvement: %f prior: %f current: %f", tolerance, numItemsPack, improvement, priorAccuracy, accuracy);
tolerance = 0; // Reset the counter if the model performs better
numItemsPack = 0;
}
// Keep the best accuracy until now as the prior accuracy
priorAccuracy = std::max(accuracy, priorAccuracy);
// priorAccuracy = accuracy;
if (convergence_best) {
// Keep the best accuracy until now as the prior accuracy
priorAccuracy = std::max(accuracy, priorAccuracy);
} else {
// Keep the last accuray obtained as the prior accuracy
priorAccuracy = accuracy;
}
}
VLOG_SCOPE_F(1, "tolerance: %d featuresUsed.size: %zu features.size: %zu", tolerance, featuresUsed.size(), features.size());
finished = finished || tolerance > maxTolerance || featuresUsed.size() == features.size();
}
if (tolerance > maxTolerance) {
if (numItemsPack < n_models) {
notes.push_back("Convergence threshold reached & " + std::to_string(numItemsPack) + " models eliminated");
VLOG_SCOPE_F(4, "Convergence threshold reached & %d models eliminated of %d", numItemsPack, n_models);
for (int i = 0; i < numItemsPack; ++i) {
significanceModels.pop_back();
models.pop_back();
@ -358,6 +380,7 @@ namespace bayesnet {
}
} else {
notes.push_back("Convergence threshold reached & 0 models eliminated");
VLOG_SCOPE_F(4, "Convergence threshold reached & 0 models eliminated n_models=%d numItemsPack=%d", n_models, numItemsPack);
}
}
if (featuresUsed.size() != features.size()) {

View File

@ -39,6 +39,7 @@ namespace bayesnet {
int maxTolerance = 3;
std::string order_algorithm; // order to process the KBest features asc, desc, rand
bool convergence = true; //if true, stop when the model does not improve
bool convergence_best = false; // wether to keep the best accuracy to the moment or the last accuracy as prior accuracy
bool selectFeatures = false; // if true, use feature selection
std::string select_features_algorithm = Orders.DESC; // Selected feature selection algorithm
FeatureSelect* featureSelector = nullptr;

View File

@ -5,6 +5,7 @@
The hyperparameters defined in the algorithm are:
- ***bisection*** (*boolean*): If set to true allows the algorithm to add *k* models at once (as specified in the algorithm) to the ensemble. Default value: *true*.
- ***biesection_best*** (*boolean*): If set to *true*, the algorithm will take as *priorAccuracy* the best accuracy computed. If set to *false⁺ it will take the last accuracy as *priorAccuracy*. Default value: *false*.
- ***order*** (*{"asc", "desc", "rand"}*): Sets the order (ascending/descending/random) in which dataset variables will be processed to choose the parents of the *SPODEs*. Default value: *"desc"*.

View File

@ -105,8 +105,7 @@
2. $numItemsPack \leftarrow 0$
10. If
$(Vars == \emptyset \lor tolerance>maxTolerance) \; finished \leftarrow True$
10. If $(Vars == \emptyset \lor tolerance>maxTolerance) \; finished \leftarrow True$
11. $lastAccuracy \leftarrow max(lastAccuracy, actualAccuracy)$

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryLo">50.0&nbsp;%</td>
@ -65,7 +65,7 @@
<tr>
<td class="coverFn"><a href="BaseClassifier.h.gcov.html#L19">_ZN8bayesnet14BaseClassifierD0Ev</a></td>
<td class="coverFnHi">273</td>
<td class="coverFnHi">241</td>
</tr>
@ -79,7 +79,7 @@
<tr>
<td class="coverFnAlias"><a href="BaseClassifier.h.gcov.html#L19">_ZN8bayesnet14BaseClassifierD2Ev</a></td>
<td class="coverFnAliasHi">273</td>
<td class="coverFnAliasHi">241</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryLo">50.0&nbsp;%</td>
@ -65,7 +65,7 @@
<tr>
<td class="coverFn"><a href="BaseClassifier.h.gcov.html#L19">_ZN8bayesnet14BaseClassifierD0Ev</a></td>
<td class="coverFnHi">273</td>
<td class="coverFnHi">241</td>
</tr>
@ -79,7 +79,7 @@
<tr>
<td class="coverFnAlias"><a href="BaseClassifier.h.gcov.html#L19">_ZN8bayesnet14BaseClassifierD2Ev</a></td>
<td class="coverFnAliasHi">273</td>
<td class="coverFnAliasHi">241</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryLo">50.0&nbsp;%</td>
@ -80,7 +80,7 @@
<span id="L18"><span class="lineNum"> 18</span> : virtual BaseClassifier&amp; fit(torch::Tensor&amp; X, torch::Tensor&amp; y, const std::vector&lt;std::string&gt;&amp; features, const std::string&amp; className, std::map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states) = 0;</span>
<span id="L19"><span class="lineNum"> 19</span> : virtual BaseClassifier&amp; fit(torch::Tensor&amp; dataset, const std::vector&lt;std::string&gt;&amp; features, const std::string&amp; className, std::map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states) = 0;</span>
<span id="L20"><span class="lineNum"> 20</span> : virtual BaseClassifier&amp; fit(torch::Tensor&amp; dataset, const std::vector&lt;std::string&gt;&amp; features, const std::string&amp; className, std::map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states, const torch::Tensor&amp; weights) = 0;</span>
<span id="L21"><span class="lineNum"> 21</span> <span class="tlaGNC tlaBgGNC"> 273 : virtual ~BaseClassifier() = default;</span></span>
<span id="L21"><span class="lineNum"> 21</span> <span class="tlaGNC tlaBgGNC"> 241 : virtual ~BaseClassifier() = default;</span></span>
<span id="L22"><span class="lineNum"> 22</span> : torch::Tensor virtual predict(torch::Tensor&amp; X) = 0;</span>
<span id="L23"><span class="lineNum"> 23</span> : std::vector&lt;int&gt; virtual predict(std::vector&lt;std::vector&lt;int &gt;&gt;&amp; X) = 0;</span>
<span id="L24"><span class="lineNum"> 24</span> : torch::Tensor virtual predict_proba(torch::Tensor&amp; X) = 0;</span>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,168 +65,168 @@
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L178">_ZN8bayesnet10Classifier17topological_orderB5cxx11Ev</a></td>
<td class="coverFnHi">1</td>
<td class="coverFnHi">11</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L182">_ZNK8bayesnet10Classifier8dump_cptB5cxx11Ev</a></td>
<td class="coverFnHi">1</td>
<td class="coverFnHi">11</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L142">_ZN8bayesnet10Classifier5scoreERSt6vectorIS1_IiSaIiEESaIS3_EERS3_</a></td>
<td class="coverFnHi">4</td>
<td class="coverFnHi">44</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L101">_ZN8bayesnet10Classifier7predictERSt6vectorIS1_IiSaIiEESaIS3_EE</a></td>
<td class="coverFnHi">4</td>
<td class="coverFnHi">44</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L170">_ZNK8bayesnet10Classifier17getNumberOfStatesEv</a></td>
<td class="coverFnHi">6</td>
<td class="coverFnHi">66</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L149">_ZNK8bayesnet10Classifier4showB5cxx11Ev</a></td>
<td class="coverFnHi">6</td>
<td class="coverFnHi">66</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L186">_ZN8bayesnet10Classifier18setHyperparametersERKN8nlohmann16json_abi_v3_11_310basic_jsonISt3mapSt6vectorNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEblmdSaNS2_14adl_serializerES5_IhSaIhEEvEE</a></td>
<td class="coverFnHi">19</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L47">_ZN8bayesnet10Classifier3fitERN2at6TensorES3_RKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISA_EERKSA_RSt3mapISA_S4_IiSaIiEESt4lessISA_ESaISt4pairISF_SJ_EEE</a></td>
<td class="coverFnHi">28</td>
<td class="coverFnHi">231</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L137">_ZN8bayesnet10Classifier5scoreERN2at6TensorES3_</a></td>
<td class="coverFnHi">28</td>
<td class="coverFnHi">308</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L47">_ZN8bayesnet10Classifier3fitERN2at6TensorES3_RKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISA_EERKSA_RSt3mapISA_S4_IiSaIiEESt4lessISA_ESaISt4pairISF_SJ_EEE</a></td>
<td class="coverFnHi">322</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L55">_ZN8bayesnet10Classifier3fitERSt6vectorIS1_IiSaIiEESaIS3_EERS3_RKS1_INSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISD_EERKSD_RSt3mapISD_S3_St4lessISD_ESaISt4pairISI_S3_EEE</a></td>
<td class="coverFnHi">32</td>
<td class="coverFnHi">360</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L166">_ZNK8bayesnet10Classifier16getNumberOfEdgesEv</a></td>
<td class="coverFnHi">40</td>
<td class="coverFnHi">475</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L161">_ZNK8bayesnet10Classifier16getNumberOfNodesEv</a></td>
<td class="coverFnHi">40</td>
<td class="coverFnHi">475</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L122">_ZN8bayesnet10Classifier13predict_probaERSt6vectorIS1_IiSaIiEESaIS3_EE</a></td>
<td class="coverFnHi">67</td>
<td class="coverFnHi">766</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L174">_ZNK8bayesnet10Classifier17getClassNumStatesEv</a></td>
<td class="coverFnHi">76</td>
<td class="coverFnHi">877</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L28">_ZN8bayesnet10Classifier12buildDatasetERN2at6TensorE</a></td>
<td class="coverFnHi">77</td>
<td class="coverFnHi">888</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L66">_ZN8bayesnet10Classifier3fitERN2at6TensorERKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISA_EERKSA_RSt3mapISA_S4_IiSaIiEESt4lessISA_ESaISt4pairISF_SJ_EEE</a></td>
<td class="coverFnHi">99</td>
<td class="coverFnHi">1089</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L72">_ZN8bayesnet10Classifier3fitERN2at6TensorERKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISA_EERKSA_RSt3mapISA_S4_IiSaIiEESt4lessISA_ESaISt4pairISF_SJ_EEERKS2_</a></td>
<td class="coverFnHi">136</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L94">_ZN8bayesnet10Classifier7predictERN2at6TensorE</a></td>
<td class="coverFnHi">245</td>
<td class="coverFnHi">1686</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L42">_ZN8bayesnet10Classifier10trainModelERKN2at6TensorE</a></td>
<td class="coverFnHi">251</td>
<td class="coverFnHi">2951</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L153">_ZN8bayesnet10Classifier8addNodesEv</a></td>
<td class="coverFnHi">251</td>
<td class="coverFnHi">2951</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L94">_ZN8bayesnet10Classifier7predictERN2at6TensorE</a></td>
<td class="coverFnHi">3262</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L77">_ZN8bayesnet10Classifier18checkFitParametersEv</a></td>
<td class="coverFnHi">291</td>
<td class="coverFnHi">3413</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L12">_ZN8bayesnet10Classifier5buildERKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaIS7_EERKS7_RSt3mapIS7_S1_IiSaIiEESt4lessIS7_ESaISt4pairISC_SG_EEERKN2at6TensorE</a></td>
<td class="coverFnHi">291</td>
<td class="coverFnHi">3413</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L115">_ZN8bayesnet10Classifier13predict_probaERN2at6TensorE</a></td>
<td class="coverFnHi">306</td>
<td class="coverFnHi">3562</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L10">_ZN8bayesnet10ClassifierC2ENS_7NetworkE</a></td>
<td class="coverFnHi">413</td>
<td class="coverFnHi">4750</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,168 +65,168 @@
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L42">_ZN8bayesnet10Classifier10trainModelERKN2at6TensorE</a></td>
<td class="coverFnHi">251</td>
<td class="coverFnHi">2951</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L28">_ZN8bayesnet10Classifier12buildDatasetERN2at6TensorE</a></td>
<td class="coverFnHi">77</td>
<td class="coverFnHi">888</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L115">_ZN8bayesnet10Classifier13predict_probaERN2at6TensorE</a></td>
<td class="coverFnHi">306</td>
<td class="coverFnHi">3562</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L122">_ZN8bayesnet10Classifier13predict_probaERSt6vectorIS1_IiSaIiEESaIS3_EE</a></td>
<td class="coverFnHi">67</td>
<td class="coverFnHi">766</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L178">_ZN8bayesnet10Classifier17topological_orderB5cxx11Ev</a></td>
<td class="coverFnHi">1</td>
<td class="coverFnHi">11</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L77">_ZN8bayesnet10Classifier18checkFitParametersEv</a></td>
<td class="coverFnHi">291</td>
<td class="coverFnHi">3413</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L186">_ZN8bayesnet10Classifier18setHyperparametersERKN8nlohmann16json_abi_v3_11_310basic_jsonISt3mapSt6vectorNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEblmdSaNS2_14adl_serializerES5_IhSaIhEEvEE</a></td>
<td class="coverFnHi">19</td>
<td class="coverFnHi">231</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L66">_ZN8bayesnet10Classifier3fitERN2at6TensorERKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISA_EERKSA_RSt3mapISA_S4_IiSaIiEESt4lessISA_ESaISt4pairISF_SJ_EEE</a></td>
<td class="coverFnHi">99</td>
<td class="coverFnHi">1089</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L72">_ZN8bayesnet10Classifier3fitERN2at6TensorERKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISA_EERKSA_RSt3mapISA_S4_IiSaIiEESt4lessISA_ESaISt4pairISF_SJ_EEERKS2_</a></td>
<td class="coverFnHi">136</td>
<td class="coverFnHi">1686</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L47">_ZN8bayesnet10Classifier3fitERN2at6TensorES3_RKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISA_EERKSA_RSt3mapISA_S4_IiSaIiEESt4lessISA_ESaISt4pairISF_SJ_EEE</a></td>
<td class="coverFnHi">28</td>
<td class="coverFnHi">322</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L55">_ZN8bayesnet10Classifier3fitERSt6vectorIS1_IiSaIiEESaIS3_EERS3_RKS1_INSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISD_EERKSD_RSt3mapISD_S3_St4lessISD_ESaISt4pairISI_S3_EEE</a></td>
<td class="coverFnHi">32</td>
<td class="coverFnHi">360</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L12">_ZN8bayesnet10Classifier5buildERKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaIS7_EERKS7_RSt3mapIS7_S1_IiSaIiEESt4lessIS7_ESaISt4pairISC_SG_EEERKN2at6TensorE</a></td>
<td class="coverFnHi">291</td>
<td class="coverFnHi">3413</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L137">_ZN8bayesnet10Classifier5scoreERN2at6TensorES3_</a></td>
<td class="coverFnHi">28</td>
<td class="coverFnHi">308</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L142">_ZN8bayesnet10Classifier5scoreERSt6vectorIS1_IiSaIiEESaIS3_EERS3_</a></td>
<td class="coverFnHi">4</td>
<td class="coverFnHi">44</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L94">_ZN8bayesnet10Classifier7predictERN2at6TensorE</a></td>
<td class="coverFnHi">245</td>
<td class="coverFnHi">3262</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L101">_ZN8bayesnet10Classifier7predictERSt6vectorIS1_IiSaIiEESaIS3_EE</a></td>
<td class="coverFnHi">4</td>
<td class="coverFnHi">44</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L153">_ZN8bayesnet10Classifier8addNodesEv</a></td>
<td class="coverFnHi">251</td>
<td class="coverFnHi">2951</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L10">_ZN8bayesnet10ClassifierC2ENS_7NetworkE</a></td>
<td class="coverFnHi">413</td>
<td class="coverFnHi">4750</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L166">_ZNK8bayesnet10Classifier16getNumberOfEdgesEv</a></td>
<td class="coverFnHi">40</td>
<td class="coverFnHi">475</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L161">_ZNK8bayesnet10Classifier16getNumberOfNodesEv</a></td>
<td class="coverFnHi">40</td>
<td class="coverFnHi">475</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L174">_ZNK8bayesnet10Classifier17getClassNumStatesEv</a></td>
<td class="coverFnHi">76</td>
<td class="coverFnHi">877</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L170">_ZNK8bayesnet10Classifier17getNumberOfStatesEv</a></td>
<td class="coverFnHi">6</td>
<td class="coverFnHi">66</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L149">_ZNK8bayesnet10Classifier4showB5cxx11Ev</a></td>
<td class="coverFnHi">6</td>
<td class="coverFnHi">66</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.cc.gcov.html#L182">_ZNK8bayesnet10Classifier8dump_cptB5cxx11Ev</a></td>
<td class="coverFnHi">1</td>
<td class="coverFnHi">11</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -71,188 +71,188 @@
<span id="L9"><span class="lineNum"> 9</span> : #include &quot;Classifier.h&quot;</span>
<span id="L10"><span class="lineNum"> 10</span> : </span>
<span id="L11"><span class="lineNum"> 11</span> : namespace bayesnet {</span>
<span id="L12"><span class="lineNum"> 12</span> <span class="tlaGNC tlaBgGNC"> 413 : Classifier::Classifier(Network model) : model(model), m(0), n(0), metrics(Metrics()), fitted(false) {}</span></span>
<span id="L12"><span class="lineNum"> 12</span> <span class="tlaGNC tlaBgGNC"> 4750 : Classifier::Classifier(Network model) : model(model), m(0), n(0), metrics(Metrics()), fitted(false) {}</span></span>
<span id="L13"><span class="lineNum"> 13</span> : const std::string CLASSIFIER_NOT_FITTED = &quot;Classifier has not been fitted&quot;;</span>
<span id="L14"><span class="lineNum"> 14</span> <span class="tlaGNC"> 291 : Classifier&amp; Classifier::build(const std::vector&lt;std::string&gt;&amp; features, const std::string&amp; className, std::map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states, const torch::Tensor&amp; weights)</span></span>
<span id="L14"><span class="lineNum"> 14</span> <span class="tlaGNC"> 3413 : Classifier&amp; Classifier::build(const std::vector&lt;std::string&gt;&amp; features, const std::string&amp; className, std::map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states, const torch::Tensor&amp; weights)</span></span>
<span id="L15"><span class="lineNum"> 15</span> : {</span>
<span id="L16"><span class="lineNum"> 16</span> <span class="tlaGNC"> 291 : this-&gt;features = features;</span></span>
<span id="L17"><span class="lineNum"> 17</span> <span class="tlaGNC"> 291 : this-&gt;className = className;</span></span>
<span id="L18"><span class="lineNum"> 18</span> <span class="tlaGNC"> 291 : this-&gt;states = states;</span></span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 291 : m = dataset.size(1);</span></span>
<span id="L20"><span class="lineNum"> 20</span> <span class="tlaGNC"> 291 : n = features.size();</span></span>
<span id="L21"><span class="lineNum"> 21</span> <span class="tlaGNC"> 291 : checkFitParameters();</span></span>
<span id="L22"><span class="lineNum"> 22</span> <span class="tlaGNC"> 283 : auto n_classes = states.at(className).size();</span></span>
<span id="L23"><span class="lineNum"> 23</span> <span class="tlaGNC"> 283 : metrics = Metrics(dataset, features, className, n_classes);</span></span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 283 : model.initialize();</span></span>
<span id="L25"><span class="lineNum"> 25</span> <span class="tlaGNC"> 283 : buildModel(weights);</span></span>
<span id="L26"><span class="lineNum"> 26</span> <span class="tlaGNC"> 283 : trainModel(weights);</span></span>
<span id="L27"><span class="lineNum"> 27</span> <span class="tlaGNC"> 279 : fitted = true;</span></span>
<span id="L28"><span class="lineNum"> 28</span> <span class="tlaGNC"> 279 : return *this;</span></span>
<span id="L16"><span class="lineNum"> 16</span> <span class="tlaGNC"> 3413 : this-&gt;features = features;</span></span>
<span id="L17"><span class="lineNum"> 17</span> <span class="tlaGNC"> 3413 : this-&gt;className = className;</span></span>
<span id="L18"><span class="lineNum"> 18</span> <span class="tlaGNC"> 3413 : this-&gt;states = states;</span></span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 3413 : m = dataset.size(1);</span></span>
<span id="L20"><span class="lineNum"> 20</span> <span class="tlaGNC"> 3413 : n = features.size();</span></span>
<span id="L21"><span class="lineNum"> 21</span> <span class="tlaGNC"> 3413 : checkFitParameters();</span></span>
<span id="L22"><span class="lineNum"> 22</span> <span class="tlaGNC"> 3325 : auto n_classes = states.at(className).size();</span></span>
<span id="L23"><span class="lineNum"> 23</span> <span class="tlaGNC"> 3325 : metrics = Metrics(dataset, features, className, n_classes);</span></span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 3325 : model.initialize();</span></span>
<span id="L25"><span class="lineNum"> 25</span> <span class="tlaGNC"> 3325 : buildModel(weights);</span></span>
<span id="L26"><span class="lineNum"> 26</span> <span class="tlaGNC"> 3325 : trainModel(weights);</span></span>
<span id="L27"><span class="lineNum"> 27</span> <span class="tlaGNC"> 3277 : fitted = true;</span></span>
<span id="L28"><span class="lineNum"> 28</span> <span class="tlaGNC"> 3277 : return *this;</span></span>
<span id="L29"><span class="lineNum"> 29</span> : }</span>
<span id="L30"><span class="lineNum"> 30</span> <span class="tlaGNC"> 77 : void Classifier::buildDataset(torch::Tensor&amp; ytmp)</span></span>
<span id="L30"><span class="lineNum"> 30</span> <span class="tlaGNC"> 888 : void Classifier::buildDataset(torch::Tensor&amp; ytmp)</span></span>
<span id="L31"><span class="lineNum"> 31</span> : {</span>
<span id="L32"><span class="lineNum"> 32</span> : try {</span>
<span id="L33"><span class="lineNum"> 33</span> <span class="tlaGNC"> 77 : auto yresized = torch::transpose(ytmp.view({ ytmp.size(0), 1 }), 0, 1);</span></span>
<span id="L34"><span class="lineNum"> 34</span> <span class="tlaGNC"> 239 : dataset = torch::cat({ dataset, yresized }, 0);</span></span>
<span id="L35"><span class="lineNum"> 35</span> <span class="tlaGNC"> 77 : }</span></span>
<span id="L36"><span class="lineNum"> 36</span> <span class="tlaGNC"> 4 : catch (const std::exception&amp; e) {</span></span>
<span id="L37"><span class="lineNum"> 37</span> <span class="tlaGNC"> 4 : std::stringstream oss;</span></span>
<span id="L38"><span class="lineNum"> 38</span> <span class="tlaGNC"> 4 : oss &lt;&lt; &quot;* Error in X and y dimensions *\n&quot;;</span></span>
<span id="L39"><span class="lineNum"> 39</span> <span class="tlaGNC"> 4 : oss &lt;&lt; &quot;X dimensions: &quot; &lt;&lt; dataset.sizes() &lt;&lt; &quot;\n&quot;;</span></span>
<span id="L40"><span class="lineNum"> 40</span> <span class="tlaGNC"> 4 : oss &lt;&lt; &quot;y dimensions: &quot; &lt;&lt; ytmp.sizes();</span></span>
<span id="L41"><span class="lineNum"> 41</span> <span class="tlaGNC"> 4 : throw std::runtime_error(oss.str());</span></span>
<span id="L42"><span class="lineNum"> 42</span> <span class="tlaGNC"> 8 : }</span></span>
<span id="L43"><span class="lineNum"> 43</span> <span class="tlaGNC"> 154 : }</span></span>
<span id="L44"><span class="lineNum"> 44</span> <span class="tlaGNC"> 251 : void Classifier::trainModel(const torch::Tensor&amp; weights)</span></span>
<span id="L33"><span class="lineNum"> 33</span> <span class="tlaGNC"> 888 : auto yresized = torch::transpose(ytmp.view({ ytmp.size(0), 1 }), 0, 1);</span></span>
<span id="L34"><span class="lineNum"> 34</span> <span class="tlaGNC"> 2752 : dataset = torch::cat({ dataset, yresized }, 0);</span></span>
<span id="L35"><span class="lineNum"> 35</span> <span class="tlaGNC"> 888 : }</span></span>
<span id="L36"><span class="lineNum"> 36</span> <span class="tlaGNC"> 44 : catch (const std::exception&amp; e) {</span></span>
<span id="L37"><span class="lineNum"> 37</span> <span class="tlaGNC"> 44 : std::stringstream oss;</span></span>
<span id="L38"><span class="lineNum"> 38</span> <span class="tlaGNC"> 44 : oss &lt;&lt; &quot;* Error in X and y dimensions *\n&quot;;</span></span>
<span id="L39"><span class="lineNum"> 39</span> <span class="tlaGNC"> 44 : oss &lt;&lt; &quot;X dimensions: &quot; &lt;&lt; dataset.sizes() &lt;&lt; &quot;\n&quot;;</span></span>
<span id="L40"><span class="lineNum"> 40</span> <span class="tlaGNC"> 44 : oss &lt;&lt; &quot;y dimensions: &quot; &lt;&lt; ytmp.sizes();</span></span>
<span id="L41"><span class="lineNum"> 41</span> <span class="tlaGNC"> 44 : throw std::runtime_error(oss.str());</span></span>
<span id="L42"><span class="lineNum"> 42</span> <span class="tlaGNC"> 88 : }</span></span>
<span id="L43"><span class="lineNum"> 43</span> <span class="tlaGNC"> 1776 : }</span></span>
<span id="L44"><span class="lineNum"> 44</span> <span class="tlaGNC"> 2951 : void Classifier::trainModel(const torch::Tensor&amp; weights)</span></span>
<span id="L45"><span class="lineNum"> 45</span> : {</span>
<span id="L46"><span class="lineNum"> 46</span> <span class="tlaGNC"> 251 : model.fit(dataset, weights, features, className, states);</span></span>
<span id="L47"><span class="lineNum"> 47</span> <span class="tlaGNC"> 251 : }</span></span>
<span id="L46"><span class="lineNum"> 46</span> <span class="tlaGNC"> 2951 : model.fit(dataset, weights, features, className, states);</span></span>
<span id="L47"><span class="lineNum"> 47</span> <span class="tlaGNC"> 2951 : }</span></span>
<span id="L48"><span class="lineNum"> 48</span> : // X is nxm where n is the number of features and m the number of samples</span>
<span id="L49"><span class="lineNum"> 49</span> <span class="tlaGNC"> 28 : Classifier&amp; Classifier::fit(torch::Tensor&amp; X, torch::Tensor&amp; y, const std::vector&lt;std::string&gt;&amp; features, const std::string&amp; className, std::map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states)</span></span>
<span id="L49"><span class="lineNum"> 49</span> <span class="tlaGNC"> 322 : Classifier&amp; Classifier::fit(torch::Tensor&amp; X, torch::Tensor&amp; y, const std::vector&lt;std::string&gt;&amp; features, const std::string&amp; className, std::map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states)</span></span>
<span id="L50"><span class="lineNum"> 50</span> : {</span>
<span id="L51"><span class="lineNum"> 51</span> <span class="tlaGNC"> 28 : dataset = X;</span></span>
<span id="L52"><span class="lineNum"> 52</span> <span class="tlaGNC"> 28 : buildDataset(y);</span></span>
<span id="L53"><span class="lineNum"> 53</span> <span class="tlaGNC"> 26 : const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);</span></span>
<span id="L54"><span class="lineNum"> 54</span> <span class="tlaGNC"> 44 : return build(features, className, states, weights);</span></span>
<span id="L55"><span class="lineNum"> 55</span> <span class="tlaGNC"> 26 : }</span></span>
<span id="L51"><span class="lineNum"> 51</span> <span class="tlaGNC"> 322 : dataset = X;</span></span>
<span id="L52"><span class="lineNum"> 52</span> <span class="tlaGNC"> 322 : buildDataset(y);</span></span>
<span id="L53"><span class="lineNum"> 53</span> <span class="tlaGNC"> 300 : const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);</span></span>
<span id="L54"><span class="lineNum"> 54</span> <span class="tlaGNC"> 512 : return build(features, className, states, weights);</span></span>
<span id="L55"><span class="lineNum"> 55</span> <span class="tlaGNC"> 300 : }</span></span>
<span id="L56"><span class="lineNum"> 56</span> : // X is nxm where n is the number of features and m the number of samples</span>
<span id="L57"><span class="lineNum"> 57</span> <span class="tlaGNC"> 32 : Classifier&amp; Classifier::fit(std::vector&lt;std::vector&lt;int&gt;&gt;&amp; X, std::vector&lt;int&gt;&amp; y, const std::vector&lt;std::string&gt;&amp; features, const std::string&amp; className, std::map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states)</span></span>
<span id="L57"><span class="lineNum"> 57</span> <span class="tlaGNC"> 360 : Classifier&amp; Classifier::fit(std::vector&lt;std::vector&lt;int&gt;&gt;&amp; X, std::vector&lt;int&gt;&amp; y, const std::vector&lt;std::string&gt;&amp; features, const std::string&amp; className, std::map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states)</span></span>
<span id="L58"><span class="lineNum"> 58</span> : {</span>
<span id="L59"><span class="lineNum"> 59</span> <span class="tlaGNC"> 32 : dataset = torch::zeros({ static_cast&lt;int&gt;(X.size()), static_cast&lt;int&gt;(X[0].size()) }, torch::kInt32);</span></span>
<span id="L60"><span class="lineNum"> 60</span> <span class="tlaGNC"> 643 : for (int i = 0; i &lt; X.size(); ++i) {</span></span>
<span id="L61"><span class="lineNum"> 61</span> <span class="tlaGNC"> 2444 : dataset.index_put_({ i, &quot;...&quot; }, torch::tensor(X[i], torch::kInt32));</span></span>
<span id="L59"><span class="lineNum"> 59</span> <span class="tlaGNC"> 360 : dataset = torch::zeros({ static_cast&lt;int&gt;(X.size()), static_cast&lt;int&gt;(X[0].size()) }, torch::kInt32);</span></span>
<span id="L60"><span class="lineNum"> 60</span> <span class="tlaGNC"> 5883 : for (int i = 0; i &lt; X.size(); ++i) {</span></span>
<span id="L61"><span class="lineNum"> 61</span> <span class="tlaGNC"> 22092 : dataset.index_put_({ i, &quot;...&quot; }, torch::tensor(X[i], torch::kInt32));</span></span>
<span id="L62"><span class="lineNum"> 62</span> : }</span>
<span id="L63"><span class="lineNum"> 63</span> <span class="tlaGNC"> 32 : auto ytmp = torch::tensor(y, torch::kInt32);</span></span>
<span id="L64"><span class="lineNum"> 64</span> <span class="tlaGNC"> 32 : buildDataset(ytmp);</span></span>
<span id="L65"><span class="lineNum"> 65</span> <span class="tlaGNC"> 30 : const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);</span></span>
<span id="L66"><span class="lineNum"> 66</span> <span class="tlaGNC"> 56 : return build(features, className, states, weights);</span></span>
<span id="L67"><span class="lineNum"> 67</span> <span class="tlaGNC"> 647 : }</span></span>
<span id="L68"><span class="lineNum"> 68</span> <span class="tlaGNC"> 99 : Classifier&amp; Classifier::fit(torch::Tensor&amp; dataset, const std::vector&lt;std::string&gt;&amp; features, const std::string&amp; className, std::map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states)</span></span>
<span id="L63"><span class="lineNum"> 63</span> <span class="tlaGNC"> 360 : auto ytmp = torch::tensor(y, torch::kInt32);</span></span>
<span id="L64"><span class="lineNum"> 64</span> <span class="tlaGNC"> 360 : buildDataset(ytmp);</span></span>
<span id="L65"><span class="lineNum"> 65</span> <span class="tlaGNC"> 338 : const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);</span></span>
<span id="L66"><span class="lineNum"> 66</span> <span class="tlaGNC"> 628 : return build(features, className, states, weights);</span></span>
<span id="L67"><span class="lineNum"> 67</span> <span class="tlaGNC"> 5931 : }</span></span>
<span id="L68"><span class="lineNum"> 68</span> <span class="tlaGNC"> 1089 : Classifier&amp; Classifier::fit(torch::Tensor&amp; dataset, const std::vector&lt;std::string&gt;&amp; features, const std::string&amp; className, std::map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states)</span></span>
<span id="L69"><span class="lineNum"> 69</span> : {</span>
<span id="L70"><span class="lineNum"> 70</span> <span class="tlaGNC"> 99 : this-&gt;dataset = dataset;</span></span>
<span id="L71"><span class="lineNum"> 71</span> <span class="tlaGNC"> 99 : const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);</span></span>
<span id="L72"><span class="lineNum"> 72</span> <span class="tlaGNC"> 198 : return build(features, className, states, weights);</span></span>
<span id="L73"><span class="lineNum"> 73</span> <span class="tlaGNC"> 99 : }</span></span>
<span id="L74"><span class="lineNum"> 74</span> <span class="tlaGNC"> 136 : Classifier&amp; Classifier::fit(torch::Tensor&amp; dataset, const std::vector&lt;std::string&gt;&amp; features, const std::string&amp; className, std::map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states, const torch::Tensor&amp; weights)</span></span>
<span id="L70"><span class="lineNum"> 70</span> <span class="tlaGNC"> 1089 : this-&gt;dataset = dataset;</span></span>
<span id="L71"><span class="lineNum"> 71</span> <span class="tlaGNC"> 1089 : const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);</span></span>
<span id="L72"><span class="lineNum"> 72</span> <span class="tlaGNC"> 2178 : return build(features, className, states, weights);</span></span>
<span id="L73"><span class="lineNum"> 73</span> <span class="tlaGNC"> 1089 : }</span></span>
<span id="L74"><span class="lineNum"> 74</span> <span class="tlaGNC"> 1686 : Classifier&amp; Classifier::fit(torch::Tensor&amp; dataset, const std::vector&lt;std::string&gt;&amp; features, const std::string&amp; className, std::map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states, const torch::Tensor&amp; weights)</span></span>
<span id="L75"><span class="lineNum"> 75</span> : {</span>
<span id="L76"><span class="lineNum"> 76</span> <span class="tlaGNC"> 136 : this-&gt;dataset = dataset;</span></span>
<span id="L77"><span class="lineNum"> 77</span> <span class="tlaGNC"> 136 : return build(features, className, states, weights);</span></span>
<span id="L76"><span class="lineNum"> 76</span> <span class="tlaGNC"> 1686 : this-&gt;dataset = dataset;</span></span>
<span id="L77"><span class="lineNum"> 77</span> <span class="tlaGNC"> 1686 : return build(features, className, states, weights);</span></span>
<span id="L78"><span class="lineNum"> 78</span> : }</span>
<span id="L79"><span class="lineNum"> 79</span> <span class="tlaGNC"> 291 : void Classifier::checkFitParameters()</span></span>
<span id="L79"><span class="lineNum"> 79</span> <span class="tlaGNC"> 3413 : void Classifier::checkFitParameters()</span></span>
<span id="L80"><span class="lineNum"> 80</span> : {</span>
<span id="L81"><span class="lineNum"> 81</span> <span class="tlaGNC"> 291 : if (torch::is_floating_point(dataset)) {</span></span>
<span id="L82"><span class="lineNum"> 82</span> <span class="tlaGNC"> 2 : throw std::invalid_argument(&quot;dataset (X, y) must be of type Integer&quot;);</span></span>
<span id="L81"><span class="lineNum"> 81</span> <span class="tlaGNC"> 3413 : if (torch::is_floating_point(dataset)) {</span></span>
<span id="L82"><span class="lineNum"> 82</span> <span class="tlaGNC"> 22 : throw std::invalid_argument(&quot;dataset (X, y) must be of type Integer&quot;);</span></span>
<span id="L83"><span class="lineNum"> 83</span> : }</span>
<span id="L84"><span class="lineNum"> 84</span> <span class="tlaGNC"> 289 : if (dataset.size(0) - 1 != features.size()) {</span></span>
<span id="L85"><span class="lineNum"> 85</span> <span class="tlaGNC"> 2 : throw std::invalid_argument(&quot;Classifier: X &quot; + std::to_string(dataset.size(0) - 1) + &quot; and features &quot; + std::to_string(features.size()) + &quot; must have the same number of features&quot;);</span></span>
<span id="L84"><span class="lineNum"> 84</span> <span class="tlaGNC"> 3391 : if (dataset.size(0) - 1 != features.size()) {</span></span>
<span id="L85"><span class="lineNum"> 85</span> <span class="tlaGNC"> 22 : throw std::invalid_argument(&quot;Classifier: X &quot; + std::to_string(dataset.size(0) - 1) + &quot; and features &quot; + std::to_string(features.size()) + &quot; must have the same number of features&quot;);</span></span>
<span id="L86"><span class="lineNum"> 86</span> : }</span>
<span id="L87"><span class="lineNum"> 87</span> <span class="tlaGNC"> 287 : if (states.find(className) == states.end()) {</span></span>
<span id="L88"><span class="lineNum"> 88</span> <span class="tlaGNC"> 2 : throw std::invalid_argument(&quot;class name not found in states&quot;);</span></span>
<span id="L87"><span class="lineNum"> 87</span> <span class="tlaGNC"> 3369 : if (states.find(className) == states.end()) {</span></span>
<span id="L88"><span class="lineNum"> 88</span> <span class="tlaGNC"> 22 : throw std::invalid_argument(&quot;class name not found in states&quot;);</span></span>
<span id="L89"><span class="lineNum"> 89</span> : }</span>
<span id="L90"><span class="lineNum"> 90</span> <span class="tlaGNC"> 9467 : for (auto feature : features) {</span></span>
<span id="L91"><span class="lineNum"> 91</span> <span class="tlaGNC"> 9184 : if (states.find(feature) == states.end()) {</span></span>
<span id="L92"><span class="lineNum"> 92</span> <span class="tlaGNC"> 2 : throw std::invalid_argument(&quot;feature [&quot; + feature + &quot;] not found in states&quot;);</span></span>
<span id="L90"><span class="lineNum"> 90</span> <span class="tlaGNC"> 124581 : for (auto feature : features) {</span></span>
<span id="L91"><span class="lineNum"> 91</span> <span class="tlaGNC"> 121256 : if (states.find(feature) == states.end()) {</span></span>
<span id="L92"><span class="lineNum"> 92</span> <span class="tlaGNC"> 22 : throw std::invalid_argument(&quot;feature [&quot; + feature + &quot;] not found in states&quot;);</span></span>
<span id="L93"><span class="lineNum"> 93</span> : }</span>
<span id="L94"><span class="lineNum"> 94</span> <span class="tlaGNC"> 9184 : }</span></span>
<span id="L95"><span class="lineNum"> 95</span> <span class="tlaGNC"> 283 : }</span></span>
<span id="L96"><span class="lineNum"> 96</span> <span class="tlaGNC"> 245 : torch::Tensor Classifier::predict(torch::Tensor&amp; X)</span></span>
<span id="L94"><span class="lineNum"> 94</span> <span class="tlaGNC"> 121256 : }</span></span>
<span id="L95"><span class="lineNum"> 95</span> <span class="tlaGNC"> 3325 : }</span></span>
<span id="L96"><span class="lineNum"> 96</span> <span class="tlaGNC"> 3262 : torch::Tensor Classifier::predict(torch::Tensor&amp; X)</span></span>
<span id="L97"><span class="lineNum"> 97</span> : {</span>
<span id="L98"><span class="lineNum"> 98</span> <span class="tlaGNC"> 245 : if (!fitted) {</span></span>
<span id="L99"><span class="lineNum"> 99</span> <span class="tlaGNC"> 4 : throw std::logic_error(CLASSIFIER_NOT_FITTED);</span></span>
<span id="L98"><span class="lineNum"> 98</span> <span class="tlaGNC"> 3262 : if (!fitted) {</span></span>
<span id="L99"><span class="lineNum"> 99</span> <span class="tlaGNC"> 44 : throw std::logic_error(CLASSIFIER_NOT_FITTED);</span></span>
<span id="L100"><span class="lineNum"> 100</span> : }</span>
<span id="L101"><span class="lineNum"> 101</span> <span class="tlaGNC"> 241 : return model.predict(X);</span></span>
<span id="L101"><span class="lineNum"> 101</span> <span class="tlaGNC"> 3218 : return model.predict(X);</span></span>
<span id="L102"><span class="lineNum"> 102</span> : }</span>
<span id="L103"><span class="lineNum"> 103</span> <span class="tlaGNC"> 4 : std::vector&lt;int&gt; Classifier::predict(std::vector&lt;std::vector&lt;int&gt;&gt;&amp; X)</span></span>
<span id="L103"><span class="lineNum"> 103</span> <span class="tlaGNC"> 44 : std::vector&lt;int&gt; Classifier::predict(std::vector&lt;std::vector&lt;int&gt;&gt;&amp; X)</span></span>
<span id="L104"><span class="lineNum"> 104</span> : {</span>
<span id="L105"><span class="lineNum"> 105</span> <span class="tlaGNC"> 4 : if (!fitted) {</span></span>
<span id="L106"><span class="lineNum"> 106</span> <span class="tlaGNC"> 2 : throw std::logic_error(CLASSIFIER_NOT_FITTED);</span></span>
<span id="L105"><span class="lineNum"> 105</span> <span class="tlaGNC"> 44 : if (!fitted) {</span></span>
<span id="L106"><span class="lineNum"> 106</span> <span class="tlaGNC"> 22 : throw std::logic_error(CLASSIFIER_NOT_FITTED);</span></span>
<span id="L107"><span class="lineNum"> 107</span> : }</span>
<span id="L108"><span class="lineNum"> 108</span> <span class="tlaGNC"> 2 : auto m_ = X[0].size();</span></span>
<span id="L109"><span class="lineNum"> 109</span> <span class="tlaGNC"> 2 : auto n_ = X.size();</span></span>
<span id="L110"><span class="lineNum"> 110</span> <span class="tlaGNC"> 2 : std::vector&lt;std::vector&lt;int&gt;&gt; Xd(n_, std::vector&lt;int&gt;(m_, 0));</span></span>
<span id="L111"><span class="lineNum"> 111</span> <span class="tlaGNC"> 10 : for (auto i = 0; i &lt; n_; i++) {</span></span>
<span id="L112"><span class="lineNum"> 112</span> <span class="tlaGNC"> 16 : Xd[i] = std::vector&lt;int&gt;(X[i].begin(), X[i].end());</span></span>
<span id="L108"><span class="lineNum"> 108</span> <span class="tlaGNC"> 22 : auto m_ = X[0].size();</span></span>
<span id="L109"><span class="lineNum"> 109</span> <span class="tlaGNC"> 22 : auto n_ = X.size();</span></span>
<span id="L110"><span class="lineNum"> 110</span> <span class="tlaGNC"> 22 : std::vector&lt;std::vector&lt;int&gt;&gt; Xd(n_, std::vector&lt;int&gt;(m_, 0));</span></span>
<span id="L111"><span class="lineNum"> 111</span> <span class="tlaGNC"> 110 : for (auto i = 0; i &lt; n_; i++) {</span></span>
<span id="L112"><span class="lineNum"> 112</span> <span class="tlaGNC"> 176 : Xd[i] = std::vector&lt;int&gt;(X[i].begin(), X[i].end());</span></span>
<span id="L113"><span class="lineNum"> 113</span> : }</span>
<span id="L114"><span class="lineNum"> 114</span> <span class="tlaGNC"> 2 : auto yp = model.predict(Xd);</span></span>
<span id="L115"><span class="lineNum"> 115</span> <span class="tlaGNC"> 4 : return yp;</span></span>
<span id="L116"><span class="lineNum"> 116</span> <span class="tlaGNC"> 2 : }</span></span>
<span id="L117"><span class="lineNum"> 117</span> <span class="tlaGNC"> 306 : torch::Tensor Classifier::predict_proba(torch::Tensor&amp; X)</span></span>
<span id="L114"><span class="lineNum"> 114</span> <span class="tlaGNC"> 22 : auto yp = model.predict(Xd);</span></span>
<span id="L115"><span class="lineNum"> 115</span> <span class="tlaGNC"> 44 : return yp;</span></span>
<span id="L116"><span class="lineNum"> 116</span> <span class="tlaGNC"> 22 : }</span></span>
<span id="L117"><span class="lineNum"> 117</span> <span class="tlaGNC"> 3562 : torch::Tensor Classifier::predict_proba(torch::Tensor&amp; X)</span></span>
<span id="L118"><span class="lineNum"> 118</span> : {</span>
<span id="L119"><span class="lineNum"> 119</span> <span class="tlaGNC"> 306 : if (!fitted) {</span></span>
<span id="L120"><span class="lineNum"> 120</span> <span class="tlaGNC"> 2 : throw std::logic_error(CLASSIFIER_NOT_FITTED);</span></span>
<span id="L119"><span class="lineNum"> 119</span> <span class="tlaGNC"> 3562 : if (!fitted) {</span></span>
<span id="L120"><span class="lineNum"> 120</span> <span class="tlaGNC"> 22 : throw std::logic_error(CLASSIFIER_NOT_FITTED);</span></span>
<span id="L121"><span class="lineNum"> 121</span> : }</span>
<span id="L122"><span class="lineNum"> 122</span> <span class="tlaGNC"> 304 : return model.predict_proba(X);</span></span>
<span id="L122"><span class="lineNum"> 122</span> <span class="tlaGNC"> 3540 : return model.predict_proba(X);</span></span>
<span id="L123"><span class="lineNum"> 123</span> : }</span>
<span id="L124"><span class="lineNum"> 124</span> <span class="tlaGNC"> 67 : std::vector&lt;std::vector&lt;double&gt;&gt; Classifier::predict_proba(std::vector&lt;std::vector&lt;int&gt;&gt;&amp; X)</span></span>
<span id="L124"><span class="lineNum"> 124</span> <span class="tlaGNC"> 766 : std::vector&lt;std::vector&lt;double&gt;&gt; Classifier::predict_proba(std::vector&lt;std::vector&lt;int&gt;&gt;&amp; X)</span></span>
<span id="L125"><span class="lineNum"> 125</span> : {</span>
<span id="L126"><span class="lineNum"> 126</span> <span class="tlaGNC"> 67 : if (!fitted) {</span></span>
<span id="L127"><span class="lineNum"> 127</span> <span class="tlaGNC"> 2 : throw std::logic_error(CLASSIFIER_NOT_FITTED);</span></span>
<span id="L126"><span class="lineNum"> 126</span> <span class="tlaGNC"> 766 : if (!fitted) {</span></span>
<span id="L127"><span class="lineNum"> 127</span> <span class="tlaGNC"> 22 : throw std::logic_error(CLASSIFIER_NOT_FITTED);</span></span>
<span id="L128"><span class="lineNum"> 128</span> : }</span>
<span id="L129"><span class="lineNum"> 129</span> <span class="tlaGNC"> 65 : auto m_ = X[0].size();</span></span>
<span id="L130"><span class="lineNum"> 130</span> <span class="tlaGNC"> 65 : auto n_ = X.size();</span></span>
<span id="L131"><span class="lineNum"> 131</span> <span class="tlaGNC"> 65 : std::vector&lt;std::vector&lt;int&gt;&gt; Xd(n_, std::vector&lt;int&gt;(m_, 0));</span></span>
<span id="L129"><span class="lineNum"> 129</span> <span class="tlaGNC"> 744 : auto m_ = X[0].size();</span></span>
<span id="L130"><span class="lineNum"> 130</span> <span class="tlaGNC"> 744 : auto n_ = X.size();</span></span>
<span id="L131"><span class="lineNum"> 131</span> <span class="tlaGNC"> 744 : std::vector&lt;std::vector&lt;int&gt;&gt; Xd(n_, std::vector&lt;int&gt;(m_, 0));</span></span>
<span id="L132"><span class="lineNum"> 132</span> : // Convert to nxm vector</span>
<span id="L133"><span class="lineNum"> 133</span> <span class="tlaGNC"> 974 : for (auto i = 0; i &lt; n_; i++) {</span></span>
<span id="L134"><span class="lineNum"> 134</span> <span class="tlaGNC"> 1818 : Xd[i] = std::vector&lt;int&gt;(X[i].begin(), X[i].end());</span></span>
<span id="L133"><span class="lineNum"> 133</span> <span class="tlaGNC"> 9722 : for (auto i = 0; i &lt; n_; i++) {</span></span>
<span id="L134"><span class="lineNum"> 134</span> <span class="tlaGNC"> 17956 : Xd[i] = std::vector&lt;int&gt;(X[i].begin(), X[i].end());</span></span>
<span id="L135"><span class="lineNum"> 135</span> : }</span>
<span id="L136"><span class="lineNum"> 136</span> <span class="tlaGNC"> 65 : auto yp = model.predict_proba(Xd);</span></span>
<span id="L137"><span class="lineNum"> 137</span> <span class="tlaGNC"> 130 : return yp;</span></span>
<span id="L138"><span class="lineNum"> 138</span> <span class="tlaGNC"> 65 : }</span></span>
<span id="L139"><span class="lineNum"> 139</span> <span class="tlaGNC"> 28 : float Classifier::score(torch::Tensor&amp; X, torch::Tensor&amp; y)</span></span>
<span id="L136"><span class="lineNum"> 136</span> <span class="tlaGNC"> 744 : auto yp = model.predict_proba(Xd);</span></span>
<span id="L137"><span class="lineNum"> 137</span> <span class="tlaGNC"> 1488 : return yp;</span></span>
<span id="L138"><span class="lineNum"> 138</span> <span class="tlaGNC"> 744 : }</span></span>
<span id="L139"><span class="lineNum"> 139</span> <span class="tlaGNC"> 308 : float Classifier::score(torch::Tensor&amp; X, torch::Tensor&amp; y)</span></span>
<span id="L140"><span class="lineNum"> 140</span> : {</span>
<span id="L141"><span class="lineNum"> 141</span> <span class="tlaGNC"> 28 : torch::Tensor y_pred = predict(X);</span></span>
<span id="L142"><span class="lineNum"> 142</span> <span class="tlaGNC"> 52 : return (y_pred == y).sum().item&lt;float&gt;() / y.size(0);</span></span>
<span id="L143"><span class="lineNum"> 143</span> <span class="tlaGNC"> 26 : }</span></span>
<span id="L144"><span class="lineNum"> 144</span> <span class="tlaGNC"> 4 : float Classifier::score(std::vector&lt;std::vector&lt;int&gt;&gt;&amp; X, std::vector&lt;int&gt;&amp; y)</span></span>
<span id="L141"><span class="lineNum"> 141</span> <span class="tlaGNC"> 308 : torch::Tensor y_pred = predict(X);</span></span>
<span id="L142"><span class="lineNum"> 142</span> <span class="tlaGNC"> 572 : return (y_pred == y).sum().item&lt;float&gt;() / y.size(0);</span></span>
<span id="L143"><span class="lineNum"> 143</span> <span class="tlaGNC"> 286 : }</span></span>
<span id="L144"><span class="lineNum"> 144</span> <span class="tlaGNC"> 44 : float Classifier::score(std::vector&lt;std::vector&lt;int&gt;&gt;&amp; X, std::vector&lt;int&gt;&amp; y)</span></span>
<span id="L145"><span class="lineNum"> 145</span> : {</span>
<span id="L146"><span class="lineNum"> 146</span> <span class="tlaGNC"> 4 : if (!fitted) {</span></span>
<span id="L147"><span class="lineNum"> 147</span> <span class="tlaGNC"> 2 : throw std::logic_error(CLASSIFIER_NOT_FITTED);</span></span>
<span id="L146"><span class="lineNum"> 146</span> <span class="tlaGNC"> 44 : if (!fitted) {</span></span>
<span id="L147"><span class="lineNum"> 147</span> <span class="tlaGNC"> 22 : throw std::logic_error(CLASSIFIER_NOT_FITTED);</span></span>
<span id="L148"><span class="lineNum"> 148</span> : }</span>
<span id="L149"><span class="lineNum"> 149</span> <span class="tlaGNC"> 2 : return model.score(X, y);</span></span>
<span id="L149"><span class="lineNum"> 149</span> <span class="tlaGNC"> 22 : return model.score(X, y);</span></span>
<span id="L150"><span class="lineNum"> 150</span> : }</span>
<span id="L151"><span class="lineNum"> 151</span> <span class="tlaGNC"> 6 : std::vector&lt;std::string&gt; Classifier::show() const</span></span>
<span id="L151"><span class="lineNum"> 151</span> <span class="tlaGNC"> 66 : std::vector&lt;std::string&gt; Classifier::show() const</span></span>
<span id="L152"><span class="lineNum"> 152</span> : {</span>
<span id="L153"><span class="lineNum"> 153</span> <span class="tlaGNC"> 6 : return model.show();</span></span>
<span id="L153"><span class="lineNum"> 153</span> <span class="tlaGNC"> 66 : return model.show();</span></span>
<span id="L154"><span class="lineNum"> 154</span> : }</span>
<span id="L155"><span class="lineNum"> 155</span> <span class="tlaGNC"> 251 : void Classifier::addNodes()</span></span>
<span id="L155"><span class="lineNum"> 155</span> <span class="tlaGNC"> 2951 : void Classifier::addNodes()</span></span>
<span id="L156"><span class="lineNum"> 156</span> : {</span>
<span id="L157"><span class="lineNum"> 157</span> : // Add all nodes to the network</span>
<span id="L158"><span class="lineNum"> 158</span> <span class="tlaGNC"> 8799 : for (const auto&amp; feature : features) {</span></span>
<span id="L159"><span class="lineNum"> 159</span> <span class="tlaGNC"> 8548 : model.addNode(feature);</span></span>
<span id="L158"><span class="lineNum"> 158</span> <span class="tlaGNC"> 116009 : for (const auto&amp; feature : features) {</span></span>
<span id="L159"><span class="lineNum"> 159</span> <span class="tlaGNC"> 113058 : model.addNode(feature);</span></span>
<span id="L160"><span class="lineNum"> 160</span> : }</span>
<span id="L161"><span class="lineNum"> 161</span> <span class="tlaGNC"> 251 : model.addNode(className);</span></span>
<span id="L162"><span class="lineNum"> 162</span> <span class="tlaGNC"> 251 : }</span></span>
<span id="L163"><span class="lineNum"> 163</span> <span class="tlaGNC"> 40 : int Classifier::getNumberOfNodes() const</span></span>
<span id="L161"><span class="lineNum"> 161</span> <span class="tlaGNC"> 2951 : model.addNode(className);</span></span>
<span id="L162"><span class="lineNum"> 162</span> <span class="tlaGNC"> 2951 : }</span></span>
<span id="L163"><span class="lineNum"> 163</span> <span class="tlaGNC"> 475 : int Classifier::getNumberOfNodes() const</span></span>
<span id="L164"><span class="lineNum"> 164</span> : {</span>
<span id="L165"><span class="lineNum"> 165</span> : // Features does not include class</span>
<span id="L166"><span class="lineNum"> 166</span> <span class="tlaGNC"> 40 : return fitted ? model.getFeatures().size() : 0;</span></span>
<span id="L166"><span class="lineNum"> 166</span> <span class="tlaGNC"> 475 : return fitted ? model.getFeatures().size() : 0;</span></span>
<span id="L167"><span class="lineNum"> 167</span> : }</span>
<span id="L168"><span class="lineNum"> 168</span> <span class="tlaGNC"> 40 : int Classifier::getNumberOfEdges() const</span></span>
<span id="L168"><span class="lineNum"> 168</span> <span class="tlaGNC"> 475 : int Classifier::getNumberOfEdges() const</span></span>
<span id="L169"><span class="lineNum"> 169</span> : {</span>
<span id="L170"><span class="lineNum"> 170</span> <span class="tlaGNC"> 40 : return fitted ? model.getNumEdges() : 0;</span></span>
<span id="L170"><span class="lineNum"> 170</span> <span class="tlaGNC"> 475 : return fitted ? model.getNumEdges() : 0;</span></span>
<span id="L171"><span class="lineNum"> 171</span> : }</span>
<span id="L172"><span class="lineNum"> 172</span> <span class="tlaGNC"> 6 : int Classifier::getNumberOfStates() const</span></span>
<span id="L172"><span class="lineNum"> 172</span> <span class="tlaGNC"> 66 : int Classifier::getNumberOfStates() const</span></span>
<span id="L173"><span class="lineNum"> 173</span> : {</span>
<span id="L174"><span class="lineNum"> 174</span> <span class="tlaGNC"> 6 : return fitted ? model.getStates() : 0;</span></span>
<span id="L174"><span class="lineNum"> 174</span> <span class="tlaGNC"> 66 : return fitted ? model.getStates() : 0;</span></span>
<span id="L175"><span class="lineNum"> 175</span> : }</span>
<span id="L176"><span class="lineNum"> 176</span> <span class="tlaGNC"> 76 : int Classifier::getClassNumStates() const</span></span>
<span id="L176"><span class="lineNum"> 176</span> <span class="tlaGNC"> 877 : int Classifier::getClassNumStates() const</span></span>
<span id="L177"><span class="lineNum"> 177</span> : {</span>
<span id="L178"><span class="lineNum"> 178</span> <span class="tlaGNC"> 76 : return fitted ? model.getClassNumStates() : 0;</span></span>
<span id="L178"><span class="lineNum"> 178</span> <span class="tlaGNC"> 877 : return fitted ? model.getClassNumStates() : 0;</span></span>
<span id="L179"><span class="lineNum"> 179</span> : }</span>
<span id="L180"><span class="lineNum"> 180</span> <span class="tlaGNC"> 1 : std::vector&lt;std::string&gt; Classifier::topological_order()</span></span>
<span id="L180"><span class="lineNum"> 180</span> <span class="tlaGNC"> 11 : std::vector&lt;std::string&gt; Classifier::topological_order()</span></span>
<span id="L181"><span class="lineNum"> 181</span> : {</span>
<span id="L182"><span class="lineNum"> 182</span> <span class="tlaGNC"> 1 : return model.topological_sort();</span></span>
<span id="L182"><span class="lineNum"> 182</span> <span class="tlaGNC"> 11 : return model.topological_sort();</span></span>
<span id="L183"><span class="lineNum"> 183</span> : }</span>
<span id="L184"><span class="lineNum"> 184</span> <span class="tlaGNC"> 1 : std::string Classifier::dump_cpt() const</span></span>
<span id="L184"><span class="lineNum"> 184</span> <span class="tlaGNC"> 11 : std::string Classifier::dump_cpt() const</span></span>
<span id="L185"><span class="lineNum"> 185</span> : {</span>
<span id="L186"><span class="lineNum"> 186</span> <span class="tlaGNC"> 1 : return model.dump_cpt();</span></span>
<span id="L186"><span class="lineNum"> 186</span> <span class="tlaGNC"> 11 : return model.dump_cpt();</span></span>
<span id="L187"><span class="lineNum"> 187</span> : }</span>
<span id="L188"><span class="lineNum"> 188</span> <span class="tlaGNC"> 19 : void Classifier::setHyperparameters(const nlohmann::json&amp; hyperparameters)</span></span>
<span id="L188"><span class="lineNum"> 188</span> <span class="tlaGNC"> 231 : void Classifier::setHyperparameters(const nlohmann::json&amp; hyperparameters)</span></span>
<span id="L189"><span class="lineNum"> 189</span> : {</span>
<span id="L190"><span class="lineNum"> 190</span> <span class="tlaGNC"> 19 : if (!hyperparameters.empty()) {</span></span>
<span id="L191"><span class="lineNum"> 191</span> <span class="tlaGNC"> 2 : throw std::invalid_argument(&quot;Invalid hyperparameters&quot; + hyperparameters.dump());</span></span>
<span id="L190"><span class="lineNum"> 190</span> <span class="tlaGNC"> 231 : if (!hyperparameters.empty()) {</span></span>
<span id="L191"><span class="lineNum"> 191</span> <span class="tlaGNC"> 22 : throw std::invalid_argument(&quot;Invalid hyperparameters&quot; + hyperparameters.dump());</span></span>
<span id="L192"><span class="lineNum"> 192</span> : }</span>
<span id="L193"><span class="lineNum"> 193</span> <span class="tlaGNC"> 17 : }</span></span>
<span id="L193"><span class="lineNum"> 193</span> <span class="tlaGNC"> 209 : }</span></span>
<span id="L194"><span class="lineNum"> 194</span> : }</span>
</pre>
</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryMed">80.0&nbsp;%</td>
@ -72,7 +72,7 @@
<tr>
<td class="coverFn"><a href="Classifier.h.gcov.html#L36">_ZNK8bayesnet10Classifier8getNotesB5cxx11Ev</a></td>
<td class="coverFnHi">20</td>
<td class="coverFnHi">12</td>
</tr>
@ -86,7 +86,7 @@
<tr>
<td class="coverFn"><a href="Classifier.h.gcov.html#L16">_ZN8bayesnet10ClassifierD0Ev</a></td>
<td class="coverFnHi">273</td>
<td class="coverFnHi">241</td>
</tr>
@ -100,7 +100,7 @@
<tr>
<td class="coverFnAlias"><a href="Classifier.h.gcov.html#L16">_ZN8bayesnet10ClassifierD2Ev</a></td>
<td class="coverFnAliasHi">273</td>
<td class="coverFnAliasHi">241</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryMed">80.0&nbsp;%</td>
@ -72,7 +72,7 @@
<tr>
<td class="coverFn"><a href="Classifier.h.gcov.html#L16">_ZN8bayesnet10ClassifierD0Ev</a></td>
<td class="coverFnHi">273</td>
<td class="coverFnHi">241</td>
</tr>
@ -86,14 +86,14 @@
<tr>
<td class="coverFnAlias"><a href="Classifier.h.gcov.html#L16">_ZN8bayesnet10ClassifierD2Ev</a></td>
<td class="coverFnAliasHi">273</td>
<td class="coverFnAliasHi">241</td>
</tr>
<tr>
<td class="coverFn"><a href="Classifier.h.gcov.html#L36">_ZNK8bayesnet10Classifier8getNotesB5cxx11Ev</a></td>
<td class="coverFnHi">20</td>
<td class="coverFnHi">12</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryMed">80.0&nbsp;%</td>
@ -77,7 +77,7 @@
<span id="L15"><span class="lineNum"> 15</span> : class Classifier : public BaseClassifier {</span>
<span id="L16"><span class="lineNum"> 16</span> : public:</span>
<span id="L17"><span class="lineNum"> 17</span> : Classifier(Network model);</span>
<span id="L18"><span class="lineNum"> 18</span> <span class="tlaGNC tlaBgGNC"> 273 : virtual ~Classifier() = default;</span></span>
<span id="L18"><span class="lineNum"> 18</span> <span class="tlaGNC tlaBgGNC"> 241 : virtual ~Classifier() = default;</span></span>
<span id="L19"><span class="lineNum"> 19</span> : Classifier&amp; fit(std::vector&lt;std::vector&lt;int&gt;&gt;&amp; X, std::vector&lt;int&gt;&amp; y, const std::vector&lt;std::string&gt;&amp; features, const std::string&amp; className, std::map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states) override;</span>
<span id="L20"><span class="lineNum"> 20</span> : Classifier&amp; fit(torch::Tensor&amp; X, torch::Tensor&amp; y, const std::vector&lt;std::string&gt;&amp; features, const std::string&amp; className, std::map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states) override;</span>
<span id="L21"><span class="lineNum"> 21</span> : Classifier&amp; fit(torch::Tensor&amp; dataset, const std::vector&lt;std::string&gt;&amp; features, const std::string&amp; className, std::map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states) override;</span>
@ -97,7 +97,7 @@
<span id="L35"><span class="lineNum"> 35</span> : float score(std::vector&lt;std::vector&lt;int&gt;&gt;&amp; X, std::vector&lt;int&gt;&amp; y) override;</span>
<span id="L36"><span class="lineNum"> 36</span> : std::vector&lt;std::string&gt; show() const override;</span>
<span id="L37"><span class="lineNum"> 37</span> : std::vector&lt;std::string&gt; topological_order() override;</span>
<span id="L38"><span class="lineNum"> 38</span> <span class="tlaGNC"> 20 : std::vector&lt;std::string&gt; getNotes() const override { return notes; }</span></span>
<span id="L38"><span class="lineNum"> 38</span> <span class="tlaGNC"> 12 : std::vector&lt;std::string&gt; getNotes() const override { return notes; }</span></span>
<span id="L39"><span class="lineNum"> 39</span> : std::string dump_cpt() const override;</span>
<span id="L40"><span class="lineNum"> 40</span> : void setHyperparameters(const nlohmann::json&amp; hyperparameters) override; //For classifiers that don't have hyperparameters</span>
<span id="L41"><span class="lineNum"> 41</span> : protected:</span>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,35 +65,35 @@
<tr>
<td class="coverFn"><a href="KDB.cc.gcov.html#L101">_ZNK8bayesnet3KDB5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">2</td>
<td class="coverFnHi">22</td>
</tr>
<tr>
<td class="coverFn"><a href="KDB.cc.gcov.html#L13">_ZN8bayesnet3KDB18setHyperparametersERKN8nlohmann16json_abi_v3_11_310basic_jsonISt3mapSt6vectorNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEblmdSaNS2_14adl_serializerES5_IhSaIhEEvEE</a></td>
<td class="coverFnHi">3</td>
<td class="coverFnHi">33</td>
</tr>
<tr>
<td class="coverFn"><a href="KDB.cc.gcov.html#L26">_ZN8bayesnet3KDB10buildModelERKN2at6TensorE</a></td>
<td class="coverFnHi">13</td>
<td class="coverFnHi">143</td>
</tr>
<tr>
<td class="coverFn"><a href="KDB.cc.gcov.html#L8">_ZN8bayesnet3KDBC2Eif</a></td>
<td class="coverFnHi">37</td>
<td class="coverFnHi">407</td>
</tr>
<tr>
<td class="coverFn"><a href="KDB.cc.gcov.html#L77">_ZN8bayesnet3KDB11add_m_edgesEiRSt6vectorIiSaIiEERN2at6TensorE</a></td>
<td class="coverFnHi">86</td>
<td class="coverFnHi">946</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,35 +65,35 @@
<tr>
<td class="coverFn"><a href="KDB.cc.gcov.html#L26">_ZN8bayesnet3KDB10buildModelERKN2at6TensorE</a></td>
<td class="coverFnHi">13</td>
<td class="coverFnHi">143</td>
</tr>
<tr>
<td class="coverFn"><a href="KDB.cc.gcov.html#L77">_ZN8bayesnet3KDB11add_m_edgesEiRSt6vectorIiSaIiEERN2at6TensorE</a></td>
<td class="coverFnHi">86</td>
<td class="coverFnHi">946</td>
</tr>
<tr>
<td class="coverFn"><a href="KDB.cc.gcov.html#L13">_ZN8bayesnet3KDB18setHyperparametersERKN8nlohmann16json_abi_v3_11_310basic_jsonISt3mapSt6vectorNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEblmdSaNS2_14adl_serializerES5_IhSaIhEEvEE</a></td>
<td class="coverFnHi">3</td>
<td class="coverFnHi">33</td>
</tr>
<tr>
<td class="coverFn"><a href="KDB.cc.gcov.html#L8">_ZN8bayesnet3KDBC2Eif</a></td>
<td class="coverFnHi">37</td>
<td class="coverFnHi">407</td>
</tr>
<tr>
<td class="coverFn"><a href="KDB.cc.gcov.html#L101">_ZNK8bayesnet3KDB5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">2</td>
<td class="coverFnHi">22</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -69,25 +69,25 @@
<span id="L7"><span class="lineNum"> 7</span> : #include &quot;KDB.h&quot;</span>
<span id="L8"><span class="lineNum"> 8</span> : </span>
<span id="L9"><span class="lineNum"> 9</span> : namespace bayesnet {</span>
<span id="L10"><span class="lineNum"> 10</span> <span class="tlaGNC tlaBgGNC"> 37 : KDB::KDB(int k, float theta) : Classifier(Network()), k(k), theta(theta)</span></span>
<span id="L10"><span class="lineNum"> 10</span> <span class="tlaGNC tlaBgGNC"> 407 : KDB::KDB(int k, float theta) : Classifier(Network()), k(k), theta(theta)</span></span>
<span id="L11"><span class="lineNum"> 11</span> : {</span>
<span id="L12"><span class="lineNum"> 12</span> <span class="tlaGNC"> 111 : validHyperparameters = { &quot;k&quot;, &quot;theta&quot; };</span></span>
<span id="L12"><span class="lineNum"> 12</span> <span class="tlaGNC"> 1221 : validHyperparameters = { &quot;k&quot;, &quot;theta&quot; };</span></span>
<span id="L13"><span class="lineNum"> 13</span> : </span>
<span id="L14"><span class="lineNum"> 14</span> <span class="tlaGNC"> 111 : }</span></span>
<span id="L15"><span class="lineNum"> 15</span> <span class="tlaGNC"> 3 : void KDB::setHyperparameters(const nlohmann::json&amp; hyperparameters_)</span></span>
<span id="L14"><span class="lineNum"> 14</span> <span class="tlaGNC"> 1221 : }</span></span>
<span id="L15"><span class="lineNum"> 15</span> <span class="tlaGNC"> 33 : void KDB::setHyperparameters(const nlohmann::json&amp; hyperparameters_)</span></span>
<span id="L16"><span class="lineNum"> 16</span> : {</span>
<span id="L17"><span class="lineNum"> 17</span> <span class="tlaGNC"> 3 : auto hyperparameters = hyperparameters_;</span></span>
<span id="L18"><span class="lineNum"> 18</span> <span class="tlaGNC"> 3 : if (hyperparameters.contains(&quot;k&quot;)) {</span></span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 1 : k = hyperparameters[&quot;k&quot;];</span></span>
<span id="L20"><span class="lineNum"> 20</span> <span class="tlaGNC"> 1 : hyperparameters.erase(&quot;k&quot;);</span></span>
<span id="L17"><span class="lineNum"> 17</span> <span class="tlaGNC"> 33 : auto hyperparameters = hyperparameters_;</span></span>
<span id="L18"><span class="lineNum"> 18</span> <span class="tlaGNC"> 33 : if (hyperparameters.contains(&quot;k&quot;)) {</span></span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 11 : k = hyperparameters[&quot;k&quot;];</span></span>
<span id="L20"><span class="lineNum"> 20</span> <span class="tlaGNC"> 11 : hyperparameters.erase(&quot;k&quot;);</span></span>
<span id="L21"><span class="lineNum"> 21</span> : }</span>
<span id="L22"><span class="lineNum"> 22</span> <span class="tlaGNC"> 3 : if (hyperparameters.contains(&quot;theta&quot;)) {</span></span>
<span id="L23"><span class="lineNum"> 23</span> <span class="tlaGNC"> 1 : theta = hyperparameters[&quot;theta&quot;];</span></span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 1 : hyperparameters.erase(&quot;theta&quot;);</span></span>
<span id="L22"><span class="lineNum"> 22</span> <span class="tlaGNC"> 33 : if (hyperparameters.contains(&quot;theta&quot;)) {</span></span>
<span id="L23"><span class="lineNum"> 23</span> <span class="tlaGNC"> 11 : theta = hyperparameters[&quot;theta&quot;];</span></span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 11 : hyperparameters.erase(&quot;theta&quot;);</span></span>
<span id="L25"><span class="lineNum"> 25</span> : }</span>
<span id="L26"><span class="lineNum"> 26</span> <span class="tlaGNC"> 3 : Classifier::setHyperparameters(hyperparameters);</span></span>
<span id="L27"><span class="lineNum"> 27</span> <span class="tlaGNC"> 3 : }</span></span>
<span id="L28"><span class="lineNum"> 28</span> <span class="tlaGNC"> 13 : void KDB::buildModel(const torch::Tensor&amp; weights)</span></span>
<span id="L26"><span class="lineNum"> 26</span> <span class="tlaGNC"> 33 : Classifier::setHyperparameters(hyperparameters);</span></span>
<span id="L27"><span class="lineNum"> 27</span> <span class="tlaGNC"> 33 : }</span></span>
<span id="L28"><span class="lineNum"> 28</span> <span class="tlaGNC"> 143 : void KDB::buildModel(const torch::Tensor&amp; weights)</span></span>
<span id="L29"><span class="lineNum"> 29</span> : {</span>
<span id="L30"><span class="lineNum"> 30</span> : /*</span>
<span id="L31"><span class="lineNum"> 31</span> : 1. For each feature Xi, compute mutual information, I(X;C),</span>
@ -110,66 +110,66 @@
<span id="L48"><span class="lineNum"> 48</span> : */</span>
<span id="L49"><span class="lineNum"> 49</span> : // 1. For each feature Xi, compute mutual information, I(X;C),</span>
<span id="L50"><span class="lineNum"> 50</span> : // where C is the class.</span>
<span id="L51"><span class="lineNum"> 51</span> <span class="tlaGNC"> 13 : addNodes();</span></span>
<span id="L52"><span class="lineNum"> 52</span> <span class="tlaGNC"> 39 : const torch::Tensor&amp; y = dataset.index({ -1, &quot;...&quot; });</span></span>
<span id="L53"><span class="lineNum"> 53</span> <span class="tlaGNC"> 13 : std::vector&lt;double&gt; mi;</span></span>
<span id="L54"><span class="lineNum"> 54</span> <span class="tlaGNC"> 99 : for (auto i = 0; i &lt; features.size(); i++) {</span></span>
<span id="L55"><span class="lineNum"> 55</span> <span class="tlaGNC"> 258 : torch::Tensor firstFeature = dataset.index({ i, &quot;...&quot; });</span></span>
<span id="L56"><span class="lineNum"> 56</span> <span class="tlaGNC"> 86 : mi.push_back(metrics.mutualInformation(firstFeature, y, weights));</span></span>
<span id="L57"><span class="lineNum"> 57</span> <span class="tlaGNC"> 86 : }</span></span>
<span id="L51"><span class="lineNum"> 51</span> <span class="tlaGNC"> 143 : addNodes();</span></span>
<span id="L52"><span class="lineNum"> 52</span> <span class="tlaGNC"> 429 : const torch::Tensor&amp; y = dataset.index({ -1, &quot;...&quot; });</span></span>
<span id="L53"><span class="lineNum"> 53</span> <span class="tlaGNC"> 143 : std::vector&lt;double&gt; mi;</span></span>
<span id="L54"><span class="lineNum"> 54</span> <span class="tlaGNC"> 1089 : for (auto i = 0; i &lt; features.size(); i++) {</span></span>
<span id="L55"><span class="lineNum"> 55</span> <span class="tlaGNC"> 2838 : torch::Tensor firstFeature = dataset.index({ i, &quot;...&quot; });</span></span>
<span id="L56"><span class="lineNum"> 56</span> <span class="tlaGNC"> 946 : mi.push_back(metrics.mutualInformation(firstFeature, y, weights));</span></span>
<span id="L57"><span class="lineNum"> 57</span> <span class="tlaGNC"> 946 : }</span></span>
<span id="L58"><span class="lineNum"> 58</span> : // 2. Compute class conditional mutual information I(Xi;XjIC), f or each</span>
<span id="L59"><span class="lineNum"> 59</span> <span class="tlaGNC"> 13 : auto conditionalEdgeWeights = metrics.conditionalEdge(weights);</span></span>
<span id="L59"><span class="lineNum"> 59</span> <span class="tlaGNC"> 143 : auto conditionalEdgeWeights = metrics.conditionalEdge(weights);</span></span>
<span id="L60"><span class="lineNum"> 60</span> : // 3. Let the used variable list, S, be empty.</span>
<span id="L61"><span class="lineNum"> 61</span> <span class="tlaGNC"> 13 : std::vector&lt;int&gt; S;</span></span>
<span id="L61"><span class="lineNum"> 61</span> <span class="tlaGNC"> 143 : std::vector&lt;int&gt; S;</span></span>
<span id="L62"><span class="lineNum"> 62</span> : // 4. Let the DAG network being constructed, BN, begin with a single</span>
<span id="L63"><span class="lineNum"> 63</span> : // class node, C.</span>
<span id="L64"><span class="lineNum"> 64</span> : // 5. Repeat until S includes all domain features</span>
<span id="L65"><span class="lineNum"> 65</span> : // 5.1. Select feature Xmax which is not in S and has the largest value</span>
<span id="L66"><span class="lineNum"> 66</span> : // I(Xmax;C).</span>
<span id="L67"><span class="lineNum"> 67</span> <span class="tlaGNC"> 13 : auto order = argsort(mi);</span></span>
<span id="L68"><span class="lineNum"> 68</span> <span class="tlaGNC"> 99 : for (auto idx : order) {</span></span>
<span id="L67"><span class="lineNum"> 67</span> <span class="tlaGNC"> 143 : auto order = argsort(mi);</span></span>
<span id="L68"><span class="lineNum"> 68</span> <span class="tlaGNC"> 1089 : for (auto idx : order) {</span></span>
<span id="L69"><span class="lineNum"> 69</span> : // 5.2. Add a node to BN representing Xmax.</span>
<span id="L70"><span class="lineNum"> 70</span> : // 5.3. Add an arc from C to Xmax in BN.</span>
<span id="L71"><span class="lineNum"> 71</span> <span class="tlaGNC"> 86 : model.addEdge(className, features[idx]);</span></span>
<span id="L71"><span class="lineNum"> 71</span> <span class="tlaGNC"> 946 : model.addEdge(className, features[idx]);</span></span>
<span id="L72"><span class="lineNum"> 72</span> : // 5.4. Add m = min(lSl,/c) arcs from m distinct features Xj in S with</span>
<span id="L73"><span class="lineNum"> 73</span> : // the highest value for I(Xmax;X,jC).</span>
<span id="L74"><span class="lineNum"> 74</span> <span class="tlaGNC"> 86 : add_m_edges(idx, S, conditionalEdgeWeights);</span></span>
<span id="L74"><span class="lineNum"> 74</span> <span class="tlaGNC"> 946 : add_m_edges(idx, S, conditionalEdgeWeights);</span></span>
<span id="L75"><span class="lineNum"> 75</span> : // 5.5. Add Xmax to S.</span>
<span id="L76"><span class="lineNum"> 76</span> <span class="tlaGNC"> 86 : S.push_back(idx);</span></span>
<span id="L76"><span class="lineNum"> 76</span> <span class="tlaGNC"> 946 : S.push_back(idx);</span></span>
<span id="L77"><span class="lineNum"> 77</span> : }</span>
<span id="L78"><span class="lineNum"> 78</span> <span class="tlaGNC"> 112 : }</span></span>
<span id="L79"><span class="lineNum"> 79</span> <span class="tlaGNC"> 86 : void KDB::add_m_edges(int idx, std::vector&lt;int&gt;&amp; S, torch::Tensor&amp; weights)</span></span>
<span id="L78"><span class="lineNum"> 78</span> <span class="tlaGNC"> 1232 : }</span></span>
<span id="L79"><span class="lineNum"> 79</span> <span class="tlaGNC"> 946 : void KDB::add_m_edges(int idx, std::vector&lt;int&gt;&amp; S, torch::Tensor&amp; weights)</span></span>
<span id="L80"><span class="lineNum"> 80</span> : {</span>
<span id="L81"><span class="lineNum"> 81</span> <span class="tlaGNC"> 86 : auto n_edges = std::min(k, static_cast&lt;int&gt;(S.size()));</span></span>
<span id="L82"><span class="lineNum"> 82</span> <span class="tlaGNC"> 86 : auto cond_w = clone(weights);</span></span>
<span id="L83"><span class="lineNum"> 83</span> <span class="tlaGNC"> 86 : bool exit_cond = k == 0;</span></span>
<span id="L84"><span class="lineNum"> 84</span> <span class="tlaGNC"> 86 : int num = 0;</span></span>
<span id="L85"><span class="lineNum"> 85</span> <span class="tlaGNC"> 251 : while (!exit_cond) {</span></span>
<span id="L86"><span class="lineNum"> 86</span> <span class="tlaGNC"> 660 : auto max_minfo = argmax(cond_w.index({ idx, &quot;...&quot; })).item&lt;int&gt;();</span></span>
<span id="L87"><span class="lineNum"> 87</span> <span class="tlaGNC"> 165 : auto belongs = find(S.begin(), S.end(), max_minfo) != S.end();</span></span>
<span id="L88"><span class="lineNum"> 88</span> <span class="tlaGNC"> 441 : if (belongs &amp;&amp; cond_w.index({ idx, max_minfo }).item&lt;float&gt;() &gt; theta) {</span></span>
<span id="L81"><span class="lineNum"> 81</span> <span class="tlaGNC"> 946 : auto n_edges = std::min(k, static_cast&lt;int&gt;(S.size()));</span></span>
<span id="L82"><span class="lineNum"> 82</span> <span class="tlaGNC"> 946 : auto cond_w = clone(weights);</span></span>
<span id="L83"><span class="lineNum"> 83</span> <span class="tlaGNC"> 946 : bool exit_cond = k == 0;</span></span>
<span id="L84"><span class="lineNum"> 84</span> <span class="tlaGNC"> 946 : int num = 0;</span></span>
<span id="L85"><span class="lineNum"> 85</span> <span class="tlaGNC"> 2761 : while (!exit_cond) {</span></span>
<span id="L86"><span class="lineNum"> 86</span> <span class="tlaGNC"> 7260 : auto max_minfo = argmax(cond_w.index({ idx, &quot;...&quot; })).item&lt;int&gt;();</span></span>
<span id="L87"><span class="lineNum"> 87</span> <span class="tlaGNC"> 1815 : auto belongs = find(S.begin(), S.end(), max_minfo) != S.end();</span></span>
<span id="L88"><span class="lineNum"> 88</span> <span class="tlaGNC"> 4851 : if (belongs &amp;&amp; cond_w.index({ idx, max_minfo }).item&lt;float&gt;() &gt; theta) {</span></span>
<span id="L89"><span class="lineNum"> 89</span> : try {</span>
<span id="L90"><span class="lineNum"> 90</span> <span class="tlaGNC"> 80 : model.addEdge(features[max_minfo], features[idx]);</span></span>
<span id="L91"><span class="lineNum"> 91</span> <span class="tlaGNC"> 80 : num++;</span></span>
<span id="L90"><span class="lineNum"> 90</span> <span class="tlaGNC"> 880 : model.addEdge(features[max_minfo], features[idx]);</span></span>
<span id="L91"><span class="lineNum"> 91</span> <span class="tlaGNC"> 880 : num++;</span></span>
<span id="L92"><span class="lineNum"> 92</span> : }</span>
<span id="L93"><span class="lineNum"> 93</span> <span class="tlaUNC tlaBgUNC"> 0 : catch (const std::invalid_argument&amp; e) {</span></span>
<span id="L94"><span class="lineNum"> 94</span> : // Loops are not allowed</span>
<span id="L95"><span class="lineNum"> 95</span> <span class="tlaUNC"> 0 : }</span></span>
<span id="L96"><span class="lineNum"> 96</span> : }</span>
<span id="L97"><span class="lineNum"> 97</span> <span class="tlaGNC tlaBgGNC"> 660 : cond_w.index_put_({ idx, max_minfo }, -1);</span></span>
<span id="L98"><span class="lineNum"> 98</span> <span class="tlaGNC"> 495 : auto candidates_mask = cond_w.index({ idx, &quot;...&quot; }).gt(theta);</span></span>
<span id="L99"><span class="lineNum"> 99</span> <span class="tlaGNC"> 165 : auto candidates = candidates_mask.nonzero();</span></span>
<span id="L100"><span class="lineNum"> 100</span> <span class="tlaGNC"> 165 : exit_cond = num == n_edges || candidates.size(0) == 0;</span></span>
<span id="L101"><span class="lineNum"> 101</span> <span class="tlaGNC"> 165 : }</span></span>
<span id="L102"><span class="lineNum"> 102</span> <span class="tlaGNC"> 673 : }</span></span>
<span id="L103"><span class="lineNum"> 103</span> <span class="tlaGNC"> 2 : std::vector&lt;std::string&gt; KDB::graph(const std::string&amp; title) const</span></span>
<span id="L97"><span class="lineNum"> 97</span> <span class="tlaGNC tlaBgGNC"> 7260 : cond_w.index_put_({ idx, max_minfo }, -1);</span></span>
<span id="L98"><span class="lineNum"> 98</span> <span class="tlaGNC"> 5445 : auto candidates_mask = cond_w.index({ idx, &quot;...&quot; }).gt(theta);</span></span>
<span id="L99"><span class="lineNum"> 99</span> <span class="tlaGNC"> 1815 : auto candidates = candidates_mask.nonzero();</span></span>
<span id="L100"><span class="lineNum"> 100</span> <span class="tlaGNC"> 1815 : exit_cond = num == n_edges || candidates.size(0) == 0;</span></span>
<span id="L101"><span class="lineNum"> 101</span> <span class="tlaGNC"> 1815 : }</span></span>
<span id="L102"><span class="lineNum"> 102</span> <span class="tlaGNC"> 7403 : }</span></span>
<span id="L103"><span class="lineNum"> 103</span> <span class="tlaGNC"> 22 : std::vector&lt;std::string&gt; KDB::graph(const std::string&amp; title) const</span></span>
<span id="L104"><span class="lineNum"> 104</span> : {</span>
<span id="L105"><span class="lineNum"> 105</span> <span class="tlaGNC"> 2 : std::string header{ title };</span></span>
<span id="L106"><span class="lineNum"> 106</span> <span class="tlaGNC"> 2 : if (title == &quot;KDB&quot;) {</span></span>
<span id="L107"><span class="lineNum"> 107</span> <span class="tlaGNC"> 2 : header += &quot; (k=&quot; + std::to_string(k) + &quot;, theta=&quot; + std::to_string(theta) + &quot;)&quot;;</span></span>
<span id="L105"><span class="lineNum"> 105</span> <span class="tlaGNC"> 22 : std::string header{ title };</span></span>
<span id="L106"><span class="lineNum"> 106</span> <span class="tlaGNC"> 22 : if (title == &quot;KDB&quot;) {</span></span>
<span id="L107"><span class="lineNum"> 107</span> <span class="tlaGNC"> 22 : header += &quot; (k=&quot; + std::to_string(k) + &quot;, theta=&quot; + std::to_string(theta) + &quot;)&quot;;</span></span>
<span id="L108"><span class="lineNum"> 108</span> : }</span>
<span id="L109"><span class="lineNum"> 109</span> <span class="tlaGNC"> 4 : return model.graph(header);</span></span>
<span id="L110"><span class="lineNum"> 110</span> <span class="tlaGNC"> 2 : }</span></span>
<span id="L109"><span class="lineNum"> 109</span> <span class="tlaGNC"> 44 : return model.graph(header);</span></span>
<span id="L110"><span class="lineNum"> 110</span> <span class="tlaGNC"> 22 : }</span></span>
<span id="L111"><span class="lineNum"> 111</span> : }</span>
</pre>
</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,28 +65,28 @@
<tr>
<td class="coverFn"><a href="KDBLd.cc.gcov.html#L29">_ZNK8bayesnet5KDBLd5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">1</td>
<td class="coverFnHi">11</td>
</tr>
<tr>
<td class="coverFn"><a href="KDBLd.cc.gcov.html#L24">_ZN8bayesnet5KDBLd7predictERN2at6TensorE</a></td>
<td class="coverFnHi">4</td>
<td class="coverFnHi">44</td>
</tr>
<tr>
<td class="coverFn"><a href="KDBLd.cc.gcov.html#L9">_ZN8bayesnet5KDBLd3fitERN2at6TensorES3_RKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISA_EERKSA_RSt3mapISA_S4_IiSaIiEESt4lessISA_ESaISt4pairISF_SJ_EEE</a></td>
<td class="coverFnHi">5</td>
<td class="coverFnHi">55</td>
</tr>
<tr>
<td class="coverFn"><a href="KDBLd.cc.gcov.html#L8">_ZN8bayesnet5KDBLdC2Ei</a></td>
<td class="coverFnHi">17</td>
<td class="coverFnHi">187</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,28 +65,28 @@
<tr>
<td class="coverFn"><a href="KDBLd.cc.gcov.html#L9">_ZN8bayesnet5KDBLd3fitERN2at6TensorES3_RKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISA_EERKSA_RSt3mapISA_S4_IiSaIiEESt4lessISA_ESaISt4pairISF_SJ_EEE</a></td>
<td class="coverFnHi">5</td>
<td class="coverFnHi">55</td>
</tr>
<tr>
<td class="coverFn"><a href="KDBLd.cc.gcov.html#L24">_ZN8bayesnet5KDBLd7predictERN2at6TensorE</a></td>
<td class="coverFnHi">4</td>
<td class="coverFnHi">44</td>
</tr>
<tr>
<td class="coverFn"><a href="KDBLd.cc.gcov.html#L8">_ZN8bayesnet5KDBLdC2Ei</a></td>
<td class="coverFnHi">17</td>
<td class="coverFnHi">187</td>
</tr>
<tr>
<td class="coverFn"><a href="KDBLd.cc.gcov.html#L29">_ZNK8bayesnet5KDBLd5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">1</td>
<td class="coverFnHi">11</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -69,30 +69,30 @@
<span id="L7"><span class="lineNum"> 7</span> : #include &quot;KDBLd.h&quot;</span>
<span id="L8"><span class="lineNum"> 8</span> : </span>
<span id="L9"><span class="lineNum"> 9</span> : namespace bayesnet {</span>
<span id="L10"><span class="lineNum"> 10</span> <span class="tlaGNC tlaBgGNC"> 17 : KDBLd::KDBLd(int k) : KDB(k), Proposal(dataset, features, className) {}</span></span>
<span id="L11"><span class="lineNum"> 11</span> <span class="tlaGNC"> 5 : KDBLd&amp; KDBLd::fit(torch::Tensor&amp; X_, torch::Tensor&amp; y_, const std::vector&lt;std::string&gt;&amp; features_, const std::string&amp; className_, map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states_)</span></span>
<span id="L10"><span class="lineNum"> 10</span> <span class="tlaGNC tlaBgGNC"> 187 : KDBLd::KDBLd(int k) : KDB(k), Proposal(dataset, features, className) {}</span></span>
<span id="L11"><span class="lineNum"> 11</span> <span class="tlaGNC"> 55 : KDBLd&amp; KDBLd::fit(torch::Tensor&amp; X_, torch::Tensor&amp; y_, const std::vector&lt;std::string&gt;&amp; features_, const std::string&amp; className_, map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states_)</span></span>
<span id="L12"><span class="lineNum"> 12</span> : {</span>
<span id="L13"><span class="lineNum"> 13</span> <span class="tlaGNC"> 5 : checkInput(X_, y_);</span></span>
<span id="L14"><span class="lineNum"> 14</span> <span class="tlaGNC"> 5 : features = features_;</span></span>
<span id="L15"><span class="lineNum"> 15</span> <span class="tlaGNC"> 5 : className = className_;</span></span>
<span id="L16"><span class="lineNum"> 16</span> <span class="tlaGNC"> 5 : Xf = X_;</span></span>
<span id="L17"><span class="lineNum"> 17</span> <span class="tlaGNC"> 5 : y = y_;</span></span>
<span id="L13"><span class="lineNum"> 13</span> <span class="tlaGNC"> 55 : checkInput(X_, y_);</span></span>
<span id="L14"><span class="lineNum"> 14</span> <span class="tlaGNC"> 55 : features = features_;</span></span>
<span id="L15"><span class="lineNum"> 15</span> <span class="tlaGNC"> 55 : className = className_;</span></span>
<span id="L16"><span class="lineNum"> 16</span> <span class="tlaGNC"> 55 : Xf = X_;</span></span>
<span id="L17"><span class="lineNum"> 17</span> <span class="tlaGNC"> 55 : y = y_;</span></span>
<span id="L18"><span class="lineNum"> 18</span> : // Fills std::vectors Xv &amp; yv with the data from tensors X_ (discretized) &amp; y</span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 5 : states = fit_local_discretization(y);</span></span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 55 : states = fit_local_discretization(y);</span></span>
<span id="L20"><span class="lineNum"> 20</span> : // We have discretized the input data</span>
<span id="L21"><span class="lineNum"> 21</span> : // 1st we need to fit the model to build the normal KDB structure, KDB::fit initializes the base Bayesian network</span>
<span id="L22"><span class="lineNum"> 22</span> <span class="tlaGNC"> 5 : KDB::fit(dataset, features, className, states);</span></span>
<span id="L23"><span class="lineNum"> 23</span> <span class="tlaGNC"> 5 : states = localDiscretizationProposal(states, model);</span></span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 5 : return *this;</span></span>
<span id="L22"><span class="lineNum"> 22</span> <span class="tlaGNC"> 55 : KDB::fit(dataset, features, className, states);</span></span>
<span id="L23"><span class="lineNum"> 23</span> <span class="tlaGNC"> 55 : states = localDiscretizationProposal(states, model);</span></span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 55 : return *this;</span></span>
<span id="L25"><span class="lineNum"> 25</span> : }</span>
<span id="L26"><span class="lineNum"> 26</span> <span class="tlaGNC"> 4 : torch::Tensor KDBLd::predict(torch::Tensor&amp; X)</span></span>
<span id="L26"><span class="lineNum"> 26</span> <span class="tlaGNC"> 44 : torch::Tensor KDBLd::predict(torch::Tensor&amp; X)</span></span>
<span id="L27"><span class="lineNum"> 27</span> : {</span>
<span id="L28"><span class="lineNum"> 28</span> <span class="tlaGNC"> 4 : auto Xt = prepareX(X);</span></span>
<span id="L29"><span class="lineNum"> 29</span> <span class="tlaGNC"> 8 : return KDB::predict(Xt);</span></span>
<span id="L30"><span class="lineNum"> 30</span> <span class="tlaGNC"> 4 : }</span></span>
<span id="L31"><span class="lineNum"> 31</span> <span class="tlaGNC"> 1 : std::vector&lt;std::string&gt; KDBLd::graph(const std::string&amp; name) const</span></span>
<span id="L28"><span class="lineNum"> 28</span> <span class="tlaGNC"> 44 : auto Xt = prepareX(X);</span></span>
<span id="L29"><span class="lineNum"> 29</span> <span class="tlaGNC"> 88 : return KDB::predict(Xt);</span></span>
<span id="L30"><span class="lineNum"> 30</span> <span class="tlaGNC"> 44 : }</span></span>
<span id="L31"><span class="lineNum"> 31</span> <span class="tlaGNC"> 11 : std::vector&lt;std::string&gt; KDBLd::graph(const std::string&amp; name) const</span></span>
<span id="L32"><span class="lineNum"> 32</span> : {</span>
<span id="L33"><span class="lineNum"> 33</span> <span class="tlaGNC"> 1 : return KDB::graph(name);</span></span>
<span id="L33"><span class="lineNum"> 33</span> <span class="tlaGNC"> 11 : return KDB::graph(name);</span></span>
<span id="L34"><span class="lineNum"> 34</span> : }</span>
<span id="L35"><span class="lineNum"> 35</span> : }</span>
</pre>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryMed">88.9&nbsp;%</td>
@ -65,14 +65,14 @@
<tr>
<td class="coverFn"><a href="Proposal.cc.gcov.html#L104">_ZN8bayesnet8Proposal8prepareXERN2at6TensorE</a></td>
<td class="coverFnHi">42</td>
<td class="coverFnHi">462</td>
</tr>
<tr>
<td class="coverFn"><a href="Proposal.cc.gcov.html#L10">_ZN8bayesnet8ProposalD0Ev</a></td>
<td class="coverFnHi">50</td>
<td class="coverFnHi">550</td>
</tr>
@ -86,49 +86,49 @@
<tr>
<td class="coverFnAlias"><a href="Proposal.cc.gcov.html#L10">_ZN8bayesnet8ProposalD2Ev</a></td>
<td class="coverFnAliasHi">50</td>
<td class="coverFnAliasHi">550</td>
</tr>
<tr>
<td class="coverFn"><a href="Proposal.cc.gcov.html#L25">_ZN8bayesnet8Proposal27localDiscretizationProposalERKSt3mapINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESt6vectorIiSaIiEESt4lessIS7_ESaISt4pairIKS7_SA_EEERNS_7NetworkE</a></td>
<td class="coverFnHi">53</td>
<td class="coverFnHi">583</td>
</tr>
<tr>
<td class="coverFn"><a href="Proposal.cc.gcov.html#L16">_ZN8bayesnet8Proposal10checkInputERKN2at6TensorES4_</a></td>
<td class="coverFnHi">57</td>
<td class="coverFnHi">627</td>
</tr>
<tr>
<td class="coverFn"><a href="Proposal.cc.gcov.html#L77">_ZN8bayesnet8Proposal24fit_local_discretizationB5cxx11ERKN2at6TensorE</a></td>
<td class="coverFnHi">58</td>
<td class="coverFnHi">638</td>
</tr>
<tr>
<td class="coverFn"><a href="Proposal.cc.gcov.html#L9">_ZN8bayesnet8ProposalC2ERN2at6TensorERSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISA_EERSA_</a></td>
<td class="coverFnHi">106</td>
<td class="coverFnHi">1166</td>
</tr>
<tr>
<td class="coverFn"><a href="Proposal.cc.gcov.html#L47">_ZZN8bayesnet8Proposal27localDiscretizationProposalERKSt3mapINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESt6vectorIiSaIiEESt4lessIS7_ESaISt4pairIKS7_SA_EEERNS_7NetworkEENKUlRKT_E0_clIS7_EEDaSO_</a></td>
<td class="coverFnHi">343</td>
<td class="coverFnHi">3773</td>
</tr>
<tr>
<td class="coverFn"><a href="Proposal.cc.gcov.html#L41">_ZZN8bayesnet8Proposal27localDiscretizationProposalERKSt3mapINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESt6vectorIiSaIiEESt4lessIS7_ESaISt4pairIKS7_SA_EEERNS_7NetworkEENKUlRKT_E_clIPNS_4NodeEEEDaSO_</a></td>
<td class="coverFnHi">674</td>
<td class="coverFnHi">7414</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryMed">88.9&nbsp;%</td>
@ -65,42 +65,42 @@
<tr>
<td class="coverFn"><a href="Proposal.cc.gcov.html#L16">_ZN8bayesnet8Proposal10checkInputERKN2at6TensorES4_</a></td>
<td class="coverFnHi">57</td>
<td class="coverFnHi">627</td>
</tr>
<tr>
<td class="coverFn"><a href="Proposal.cc.gcov.html#L77">_ZN8bayesnet8Proposal24fit_local_discretizationB5cxx11ERKN2at6TensorE</a></td>
<td class="coverFnHi">58</td>
<td class="coverFnHi">638</td>
</tr>
<tr>
<td class="coverFn"><a href="Proposal.cc.gcov.html#L25">_ZN8bayesnet8Proposal27localDiscretizationProposalERKSt3mapINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESt6vectorIiSaIiEESt4lessIS7_ESaISt4pairIKS7_SA_EEERNS_7NetworkE</a></td>
<td class="coverFnHi">53</td>
<td class="coverFnHi">583</td>
</tr>
<tr>
<td class="coverFn"><a href="Proposal.cc.gcov.html#L104">_ZN8bayesnet8Proposal8prepareXERN2at6TensorE</a></td>
<td class="coverFnHi">42</td>
<td class="coverFnHi">462</td>
</tr>
<tr>
<td class="coverFn"><a href="Proposal.cc.gcov.html#L9">_ZN8bayesnet8ProposalC2ERN2at6TensorERSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISA_EERSA_</a></td>
<td class="coverFnHi">106</td>
<td class="coverFnHi">1166</td>
</tr>
<tr>
<td class="coverFn"><a href="Proposal.cc.gcov.html#L10">_ZN8bayesnet8ProposalD0Ev</a></td>
<td class="coverFnHi">50</td>
<td class="coverFnHi">550</td>
</tr>
@ -114,21 +114,21 @@
<tr>
<td class="coverFnAlias"><a href="Proposal.cc.gcov.html#L10">_ZN8bayesnet8ProposalD2Ev</a></td>
<td class="coverFnAliasHi">50</td>
<td class="coverFnAliasHi">550</td>
</tr>
<tr>
<td class="coverFn"><a href="Proposal.cc.gcov.html#L47">_ZZN8bayesnet8Proposal27localDiscretizationProposalERKSt3mapINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESt6vectorIiSaIiEESt4lessIS7_ESaISt4pairIKS7_SA_EEERNS_7NetworkEENKUlRKT_E0_clIS7_EEDaSO_</a></td>
<td class="coverFnHi">343</td>
<td class="coverFnHi">3773</td>
</tr>
<tr>
<td class="coverFn"><a href="Proposal.cc.gcov.html#L41">_ZZN8bayesnet8Proposal27localDiscretizationProposalERKSt3mapINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESt6vectorIiSaIiEESt4lessIS7_ESaISt4pairIKS7_SA_EEERNS_7NetworkEENKUlRKT_E_clIPNS_4NodeEEEDaSO_</a></td>
<td class="coverFnHi">674</td>
<td class="coverFnHi">7414</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryMed">88.9&nbsp;%</td>
@ -70,111 +70,111 @@
<span id="L8"><span class="lineNum"> 8</span> : #include &quot;Proposal.h&quot;</span>
<span id="L9"><span class="lineNum"> 9</span> : </span>
<span id="L10"><span class="lineNum"> 10</span> : namespace bayesnet {</span>
<span id="L11"><span class="lineNum"> 11</span> <span class="tlaGNC tlaBgGNC"> 106 : Proposal::Proposal(torch::Tensor&amp; dataset_, std::vector&lt;std::string&gt;&amp; features_, std::string&amp; className_) : pDataset(dataset_), pFeatures(features_), pClassName(className_) {}</span></span>
<span id="L12"><span class="lineNum"> 12</span> <span class="tlaGNC"> 50 : Proposal::~Proposal()</span></span>
<span id="L11"><span class="lineNum"> 11</span> <span class="tlaGNC tlaBgGNC"> 1166 : Proposal::Proposal(torch::Tensor&amp; dataset_, std::vector&lt;std::string&gt;&amp; features_, std::string&amp; className_) : pDataset(dataset_), pFeatures(features_), pClassName(className_) {}</span></span>
<span id="L12"><span class="lineNum"> 12</span> <span class="tlaGNC"> 550 : Proposal::~Proposal()</span></span>
<span id="L13"><span class="lineNum"> 13</span> : {</span>
<span id="L14"><span class="lineNum"> 14</span> <span class="tlaGNC"> 474 : for (auto&amp; [key, value] : discretizers) {</span></span>
<span id="L15"><span class="lineNum"> 15</span> <span class="tlaGNC"> 424 : delete value;</span></span>
<span id="L14"><span class="lineNum"> 14</span> <span class="tlaGNC"> 5214 : for (auto&amp; [key, value] : discretizers) {</span></span>
<span id="L15"><span class="lineNum"> 15</span> <span class="tlaGNC"> 4664 : delete value;</span></span>
<span id="L16"><span class="lineNum"> 16</span> : }</span>
<span id="L17"><span class="lineNum"> 17</span> <span class="tlaGNC"> 50 : }</span></span>
<span id="L18"><span class="lineNum"> 18</span> <span class="tlaGNC"> 57 : void Proposal::checkInput(const torch::Tensor&amp; X, const torch::Tensor&amp; y)</span></span>
<span id="L17"><span class="lineNum"> 17</span> <span class="tlaGNC"> 550 : }</span></span>
<span id="L18"><span class="lineNum"> 18</span> <span class="tlaGNC"> 627 : void Proposal::checkInput(const torch::Tensor&amp; X, const torch::Tensor&amp; y)</span></span>
<span id="L19"><span class="lineNum"> 19</span> : {</span>
<span id="L20"><span class="lineNum"> 20</span> <span class="tlaGNC"> 57 : if (!torch::is_floating_point(X)) {</span></span>
<span id="L20"><span class="lineNum"> 20</span> <span class="tlaGNC"> 627 : if (!torch::is_floating_point(X)) {</span></span>
<span id="L21"><span class="lineNum"> 21</span> <span class="tlaUNC tlaBgUNC"> 0 : throw std::invalid_argument(&quot;X must be a floating point tensor&quot;);</span></span>
<span id="L22"><span class="lineNum"> 22</span> : }</span>
<span id="L23"><span class="lineNum"> 23</span> <span class="tlaGNC tlaBgGNC"> 57 : if (torch::is_floating_point(y)) {</span></span>
<span id="L23"><span class="lineNum"> 23</span> <span class="tlaGNC tlaBgGNC"> 627 : if (torch::is_floating_point(y)) {</span></span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaUNC tlaBgUNC"> 0 : throw std::invalid_argument(&quot;y must be an integer tensor&quot;);</span></span>
<span id="L25"><span class="lineNum"> 25</span> : }</span>
<span id="L26"><span class="lineNum"> 26</span> <span class="tlaGNC tlaBgGNC"> 57 : }</span></span>
<span id="L27"><span class="lineNum"> 27</span> <span class="tlaGNC"> 53 : map&lt;std::string, std::vector&lt;int&gt;&gt; Proposal::localDiscretizationProposal(const map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; oldStates, Network&amp; model)</span></span>
<span id="L26"><span class="lineNum"> 26</span> <span class="tlaGNC tlaBgGNC"> 627 : }</span></span>
<span id="L27"><span class="lineNum"> 27</span> <span class="tlaGNC"> 583 : map&lt;std::string, std::vector&lt;int&gt;&gt; Proposal::localDiscretizationProposal(const map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; oldStates, Network&amp; model)</span></span>
<span id="L28"><span class="lineNum"> 28</span> : {</span>
<span id="L29"><span class="lineNum"> 29</span> : // order of local discretization is important. no good 0, 1, 2...</span>
<span id="L30"><span class="lineNum"> 30</span> : // although we rediscretize features after the local discretization of every feature</span>
<span id="L31"><span class="lineNum"> 31</span> <span class="tlaGNC"> 53 : auto order = model.topological_sort();</span></span>
<span id="L32"><span class="lineNum"> 32</span> <span class="tlaGNC"> 53 : auto&amp; nodes = model.getNodes();</span></span>
<span id="L33"><span class="lineNum"> 33</span> <span class="tlaGNC"> 53 : map&lt;std::string, std::vector&lt;int&gt;&gt; states = oldStates;</span></span>
<span id="L34"><span class="lineNum"> 34</span> <span class="tlaGNC"> 53 : std::vector&lt;int&gt; indicesToReDiscretize;</span></span>
<span id="L35"><span class="lineNum"> 35</span> <span class="tlaGNC"> 53 : bool upgrade = false; // Flag to check if we need to upgrade the model</span></span>
<span id="L36"><span class="lineNum"> 36</span> <span class="tlaGNC"> 444 : for (auto feature : order) {</span></span>
<span id="L37"><span class="lineNum"> 37</span> <span class="tlaGNC"> 391 : auto nodeParents = nodes[feature]-&gt;getParents();</span></span>
<span id="L38"><span class="lineNum"> 38</span> <span class="tlaGNC"> 391 : if (nodeParents.size() &lt; 2) continue; // Only has class as parent</span></span>
<span id="L39"><span class="lineNum"> 39</span> <span class="tlaGNC"> 331 : upgrade = true;</span></span>
<span id="L40"><span class="lineNum"> 40</span> <span class="tlaGNC"> 331 : int index = find(pFeatures.begin(), pFeatures.end(), feature) - pFeatures.begin();</span></span>
<span id="L41"><span class="lineNum"> 41</span> <span class="tlaGNC"> 331 : indicesToReDiscretize.push_back(index); // We need to re-discretize this feature</span></span>
<span id="L42"><span class="lineNum"> 42</span> <span class="tlaGNC"> 331 : std::vector&lt;std::string&gt; parents;</span></span>
<span id="L43"><span class="lineNum"> 43</span> <span class="tlaGNC"> 1005 : transform(nodeParents.begin(), nodeParents.end(), back_inserter(parents), [](const auto&amp; p) { return p-&gt;getName(); });</span></span>
<span id="L31"><span class="lineNum"> 31</span> <span class="tlaGNC"> 583 : auto order = model.topological_sort();</span></span>
<span id="L32"><span class="lineNum"> 32</span> <span class="tlaGNC"> 583 : auto&amp; nodes = model.getNodes();</span></span>
<span id="L33"><span class="lineNum"> 33</span> <span class="tlaGNC"> 583 : map&lt;std::string, std::vector&lt;int&gt;&gt; states = oldStates;</span></span>
<span id="L34"><span class="lineNum"> 34</span> <span class="tlaGNC"> 583 : std::vector&lt;int&gt; indicesToReDiscretize;</span></span>
<span id="L35"><span class="lineNum"> 35</span> <span class="tlaGNC"> 583 : bool upgrade = false; // Flag to check if we need to upgrade the model</span></span>
<span id="L36"><span class="lineNum"> 36</span> <span class="tlaGNC"> 4884 : for (auto feature : order) {</span></span>
<span id="L37"><span class="lineNum"> 37</span> <span class="tlaGNC"> 4301 : auto nodeParents = nodes[feature]-&gt;getParents();</span></span>
<span id="L38"><span class="lineNum"> 38</span> <span class="tlaGNC"> 4301 : if (nodeParents.size() &lt; 2) continue; // Only has class as parent</span></span>
<span id="L39"><span class="lineNum"> 39</span> <span class="tlaGNC"> 3641 : upgrade = true;</span></span>
<span id="L40"><span class="lineNum"> 40</span> <span class="tlaGNC"> 3641 : int index = find(pFeatures.begin(), pFeatures.end(), feature) - pFeatures.begin();</span></span>
<span id="L41"><span class="lineNum"> 41</span> <span class="tlaGNC"> 3641 : indicesToReDiscretize.push_back(index); // We need to re-discretize this feature</span></span>
<span id="L42"><span class="lineNum"> 42</span> <span class="tlaGNC"> 3641 : std::vector&lt;std::string&gt; parents;</span></span>
<span id="L43"><span class="lineNum"> 43</span> <span class="tlaGNC"> 11055 : transform(nodeParents.begin(), nodeParents.end(), back_inserter(parents), [](const auto&amp; p) { return p-&gt;getName(); });</span></span>
<span id="L44"><span class="lineNum"> 44</span> : // Remove class as parent as it will be added later</span>
<span id="L45"><span class="lineNum"> 45</span> <span class="tlaGNC"> 331 : parents.erase(remove(parents.begin(), parents.end(), pClassName), parents.end());</span></span>
<span id="L45"><span class="lineNum"> 45</span> <span class="tlaGNC"> 3641 : parents.erase(remove(parents.begin(), parents.end(), pClassName), parents.end());</span></span>
<span id="L46"><span class="lineNum"> 46</span> : // Get the indices of the parents</span>
<span id="L47"><span class="lineNum"> 47</span> <span class="tlaGNC"> 331 : std::vector&lt;int&gt; indices;</span></span>
<span id="L48"><span class="lineNum"> 48</span> <span class="tlaGNC"> 331 : indices.push_back(-1); // Add class index</span></span>
<span id="L49"><span class="lineNum"> 49</span> <span class="tlaGNC"> 674 : transform(parents.begin(), parents.end(), back_inserter(indices), [&amp;](const auto&amp; p) {return find(pFeatures.begin(), pFeatures.end(), p) - pFeatures.begin(); });</span></span>
<span id="L47"><span class="lineNum"> 47</span> <span class="tlaGNC"> 3641 : std::vector&lt;int&gt; indices;</span></span>
<span id="L48"><span class="lineNum"> 48</span> <span class="tlaGNC"> 3641 : indices.push_back(-1); // Add class index</span></span>
<span id="L49"><span class="lineNum"> 49</span> <span class="tlaGNC"> 7414 : transform(parents.begin(), parents.end(), back_inserter(indices), [&amp;](const auto&amp; p) {return find(pFeatures.begin(), pFeatures.end(), p) - pFeatures.begin(); });</span></span>
<span id="L50"><span class="lineNum"> 50</span> : // Now we fit the discretizer of the feature, conditioned on its parents and the class i.e. discretizer.fit(X[index], X[indices] + y)</span>
<span id="L51"><span class="lineNum"> 51</span> <span class="tlaGNC"> 331 : std::vector&lt;std::string&gt; yJoinParents(Xf.size(1));</span></span>
<span id="L52"><span class="lineNum"> 52</span> <span class="tlaGNC"> 1005 : for (auto idx : indices) {</span></span>
<span id="L53"><span class="lineNum"> 53</span> <span class="tlaGNC"> 239660 : for (int i = 0; i &lt; Xf.size(1); ++i) {</span></span>
<span id="L54"><span class="lineNum"> 54</span> <span class="tlaGNC"> 716958 : yJoinParents[i] += to_string(pDataset.index({ idx, i }).item&lt;int&gt;());</span></span>
<span id="L51"><span class="lineNum"> 51</span> <span class="tlaGNC"> 3641 : std::vector&lt;std::string&gt; yJoinParents(Xf.size(1));</span></span>
<span id="L52"><span class="lineNum"> 52</span> <span class="tlaGNC"> 11055 : for (auto idx : indices) {</span></span>
<span id="L53"><span class="lineNum"> 53</span> <span class="tlaGNC"> 2636260 : for (int i = 0; i &lt; Xf.size(1); ++i) {</span></span>
<span id="L54"><span class="lineNum"> 54</span> <span class="tlaGNC"> 7886538 : yJoinParents[i] += to_string(pDataset.index({ idx, i }).item&lt;int&gt;());</span></span>
<span id="L55"><span class="lineNum"> 55</span> : }</span>
<span id="L56"><span class="lineNum"> 56</span> : }</span>
<span id="L57"><span class="lineNum"> 57</span> <span class="tlaGNC"> 331 : auto arff = ArffFiles();</span></span>
<span id="L58"><span class="lineNum"> 58</span> <span class="tlaGNC"> 331 : auto yxv = arff.factorize(yJoinParents);</span></span>
<span id="L59"><span class="lineNum"> 59</span> <span class="tlaGNC"> 662 : auto xvf_ptr = Xf.index({ index }).data_ptr&lt;float&gt;();</span></span>
<span id="L60"><span class="lineNum"> 60</span> <span class="tlaGNC"> 331 : auto xvf = std::vector&lt;mdlp::precision_t&gt;(xvf_ptr, xvf_ptr + Xf.size(1));</span></span>
<span id="L61"><span class="lineNum"> 61</span> <span class="tlaGNC"> 331 : discretizers[feature]-&gt;fit(xvf, yxv);</span></span>
<span id="L62"><span class="lineNum"> 62</span> <span class="tlaGNC"> 451 : }</span></span>
<span id="L63"><span class="lineNum"> 63</span> <span class="tlaGNC"> 53 : if (upgrade) {</span></span>
<span id="L57"><span class="lineNum"> 57</span> <span class="tlaGNC"> 3641 : auto arff = ArffFiles();</span></span>
<span id="L58"><span class="lineNum"> 58</span> <span class="tlaGNC"> 3641 : auto yxv = arff.factorize(yJoinParents);</span></span>
<span id="L59"><span class="lineNum"> 59</span> <span class="tlaGNC"> 7282 : auto xvf_ptr = Xf.index({ index }).data_ptr&lt;float&gt;();</span></span>
<span id="L60"><span class="lineNum"> 60</span> <span class="tlaGNC"> 3641 : auto xvf = std::vector&lt;mdlp::precision_t&gt;(xvf_ptr, xvf_ptr + Xf.size(1));</span></span>
<span id="L61"><span class="lineNum"> 61</span> <span class="tlaGNC"> 3641 : discretizers[feature]-&gt;fit(xvf, yxv);</span></span>
<span id="L62"><span class="lineNum"> 62</span> <span class="tlaGNC"> 4961 : }</span></span>
<span id="L63"><span class="lineNum"> 63</span> <span class="tlaGNC"> 583 : if (upgrade) {</span></span>
<span id="L64"><span class="lineNum"> 64</span> : // Discretize again X (only the affected indices) with the new fitted discretizers</span>
<span id="L65"><span class="lineNum"> 65</span> <span class="tlaGNC"> 384 : for (auto index : indicesToReDiscretize) {</span></span>
<span id="L66"><span class="lineNum"> 66</span> <span class="tlaGNC"> 662 : auto Xt_ptr = Xf.index({ index }).data_ptr&lt;float&gt;();</span></span>
<span id="L67"><span class="lineNum"> 67</span> <span class="tlaGNC"> 331 : auto Xt = std::vector&lt;float&gt;(Xt_ptr, Xt_ptr + Xf.size(1));</span></span>
<span id="L68"><span class="lineNum"> 68</span> <span class="tlaGNC"> 1324 : pDataset.index_put_({ index, &quot;...&quot; }, torch::tensor(discretizers[pFeatures[index]]-&gt;transform(Xt)));</span></span>
<span id="L69"><span class="lineNum"> 69</span> <span class="tlaGNC"> 331 : auto xStates = std::vector&lt;int&gt;(discretizers[pFeatures[index]]-&gt;getCutPoints().size() + 1);</span></span>
<span id="L70"><span class="lineNum"> 70</span> <span class="tlaGNC"> 331 : iota(xStates.begin(), xStates.end(), 0);</span></span>
<span id="L65"><span class="lineNum"> 65</span> <span class="tlaGNC"> 4224 : for (auto index : indicesToReDiscretize) {</span></span>
<span id="L66"><span class="lineNum"> 66</span> <span class="tlaGNC"> 7282 : auto Xt_ptr = Xf.index({ index }).data_ptr&lt;float&gt;();</span></span>
<span id="L67"><span class="lineNum"> 67</span> <span class="tlaGNC"> 3641 : auto Xt = std::vector&lt;float&gt;(Xt_ptr, Xt_ptr + Xf.size(1));</span></span>
<span id="L68"><span class="lineNum"> 68</span> <span class="tlaGNC"> 14564 : pDataset.index_put_({ index, &quot;...&quot; }, torch::tensor(discretizers[pFeatures[index]]-&gt;transform(Xt)));</span></span>
<span id="L69"><span class="lineNum"> 69</span> <span class="tlaGNC"> 3641 : auto xStates = std::vector&lt;int&gt;(discretizers[pFeatures[index]]-&gt;getCutPoints().size() + 1);</span></span>
<span id="L70"><span class="lineNum"> 70</span> <span class="tlaGNC"> 3641 : iota(xStates.begin(), xStates.end(), 0);</span></span>
<span id="L71"><span class="lineNum"> 71</span> : //Update new states of the feature/node</span>
<span id="L72"><span class="lineNum"> 72</span> <span class="tlaGNC"> 331 : states[pFeatures[index]] = xStates;</span></span>
<span id="L73"><span class="lineNum"> 73</span> <span class="tlaGNC"> 331 : }</span></span>
<span id="L74"><span class="lineNum"> 74</span> <span class="tlaGNC"> 53 : const torch::Tensor weights = torch::full({ pDataset.size(1) }, 1.0 / pDataset.size(1), torch::kDouble);</span></span>
<span id="L75"><span class="lineNum"> 75</span> <span class="tlaGNC"> 53 : model.fit(pDataset, weights, pFeatures, pClassName, states);</span></span>
<span id="L76"><span class="lineNum"> 76</span> <span class="tlaGNC"> 53 : }</span></span>
<span id="L77"><span class="lineNum"> 77</span> <span class="tlaGNC"> 106 : return states;</span></span>
<span id="L78"><span class="lineNum"> 78</span> <span class="tlaGNC"> 240032 : }</span></span>
<span id="L79"><span class="lineNum"> 79</span> <span class="tlaGNC"> 58 : map&lt;std::string, std::vector&lt;int&gt;&gt; Proposal::fit_local_discretization(const torch::Tensor&amp; y)</span></span>
<span id="L72"><span class="lineNum"> 72</span> <span class="tlaGNC"> 3641 : states[pFeatures[index]] = xStates;</span></span>
<span id="L73"><span class="lineNum"> 73</span> <span class="tlaGNC"> 3641 : }</span></span>
<span id="L74"><span class="lineNum"> 74</span> <span class="tlaGNC"> 583 : const torch::Tensor weights = torch::full({ pDataset.size(1) }, 1.0 / pDataset.size(1), torch::kDouble);</span></span>
<span id="L75"><span class="lineNum"> 75</span> <span class="tlaGNC"> 583 : model.fit(pDataset, weights, pFeatures, pClassName, states);</span></span>
<span id="L76"><span class="lineNum"> 76</span> <span class="tlaGNC"> 583 : }</span></span>
<span id="L77"><span class="lineNum"> 77</span> <span class="tlaGNC"> 1166 : return states;</span></span>
<span id="L78"><span class="lineNum"> 78</span> <span class="tlaGNC"> 2640352 : }</span></span>
<span id="L79"><span class="lineNum"> 79</span> <span class="tlaGNC"> 638 : map&lt;std::string, std::vector&lt;int&gt;&gt; Proposal::fit_local_discretization(const torch::Tensor&amp; y)</span></span>
<span id="L80"><span class="lineNum"> 80</span> : {</span>
<span id="L81"><span class="lineNum"> 81</span> : // Discretize the continuous input data and build pDataset (Classifier::dataset)</span>
<span id="L82"><span class="lineNum"> 82</span> <span class="tlaGNC"> 58 : int m = Xf.size(1);</span></span>
<span id="L83"><span class="lineNum"> 83</span> <span class="tlaGNC"> 58 : int n = Xf.size(0);</span></span>
<span id="L84"><span class="lineNum"> 84</span> <span class="tlaGNC"> 58 : map&lt;std::string, std::vector&lt;int&gt;&gt; states;</span></span>
<span id="L85"><span class="lineNum"> 85</span> <span class="tlaGNC"> 58 : pDataset = torch::zeros({ n + 1, m }, torch::kInt32);</span></span>
<span id="L86"><span class="lineNum"> 86</span> <span class="tlaGNC"> 58 : auto yv = std::vector&lt;int&gt;(y.data_ptr&lt;int&gt;(), y.data_ptr&lt;int&gt;() + y.size(0));</span></span>
<span id="L82"><span class="lineNum"> 82</span> <span class="tlaGNC"> 638 : int m = Xf.size(1);</span></span>
<span id="L83"><span class="lineNum"> 83</span> <span class="tlaGNC"> 638 : int n = Xf.size(0);</span></span>
<span id="L84"><span class="lineNum"> 84</span> <span class="tlaGNC"> 638 : map&lt;std::string, std::vector&lt;int&gt;&gt; states;</span></span>
<span id="L85"><span class="lineNum"> 85</span> <span class="tlaGNC"> 638 : pDataset = torch::zeros({ n + 1, m }, torch::kInt32);</span></span>
<span id="L86"><span class="lineNum"> 86</span> <span class="tlaGNC"> 638 : auto yv = std::vector&lt;int&gt;(y.data_ptr&lt;int&gt;(), y.data_ptr&lt;int&gt;() + y.size(0));</span></span>
<span id="L87"><span class="lineNum"> 87</span> : // discretize input data by feature(row)</span>
<span id="L88"><span class="lineNum"> 88</span> <span class="tlaGNC"> 486 : for (auto i = 0; i &lt; pFeatures.size(); ++i) {</span></span>
<span id="L89"><span class="lineNum"> 89</span> <span class="tlaGNC"> 428 : auto* discretizer = new mdlp::CPPFImdlp();</span></span>
<span id="L90"><span class="lineNum"> 90</span> <span class="tlaGNC"> 856 : auto Xt_ptr = Xf.index({ i }).data_ptr&lt;float&gt;();</span></span>
<span id="L91"><span class="lineNum"> 91</span> <span class="tlaGNC"> 428 : auto Xt = std::vector&lt;float&gt;(Xt_ptr, Xt_ptr + Xf.size(1));</span></span>
<span id="L92"><span class="lineNum"> 92</span> <span class="tlaGNC"> 428 : discretizer-&gt;fit(Xt, yv);</span></span>
<span id="L93"><span class="lineNum"> 93</span> <span class="tlaGNC"> 1712 : pDataset.index_put_({ i, &quot;...&quot; }, torch::tensor(discretizer-&gt;transform(Xt)));</span></span>
<span id="L94"><span class="lineNum"> 94</span> <span class="tlaGNC"> 428 : auto xStates = std::vector&lt;int&gt;(discretizer-&gt;getCutPoints().size() + 1);</span></span>
<span id="L95"><span class="lineNum"> 95</span> <span class="tlaGNC"> 428 : iota(xStates.begin(), xStates.end(), 0);</span></span>
<span id="L96"><span class="lineNum"> 96</span> <span class="tlaGNC"> 428 : states[pFeatures[i]] = xStates;</span></span>
<span id="L97"><span class="lineNum"> 97</span> <span class="tlaGNC"> 428 : discretizers[pFeatures[i]] = discretizer;</span></span>
<span id="L98"><span class="lineNum"> 98</span> <span class="tlaGNC"> 428 : }</span></span>
<span id="L99"><span class="lineNum"> 99</span> <span class="tlaGNC"> 58 : int n_classes = torch::max(y).item&lt;int&gt;() + 1;</span></span>
<span id="L100"><span class="lineNum"> 100</span> <span class="tlaGNC"> 58 : auto yStates = std::vector&lt;int&gt;(n_classes);</span></span>
<span id="L101"><span class="lineNum"> 101</span> <span class="tlaGNC"> 58 : iota(yStates.begin(), yStates.end(), 0);</span></span>
<span id="L102"><span class="lineNum"> 102</span> <span class="tlaGNC"> 58 : states[pClassName] = yStates;</span></span>
<span id="L103"><span class="lineNum"> 103</span> <span class="tlaGNC"> 174 : pDataset.index_put_({ n, &quot;...&quot; }, y);</span></span>
<span id="L104"><span class="lineNum"> 104</span> <span class="tlaGNC"> 116 : return states;</span></span>
<span id="L105"><span class="lineNum"> 105</span> <span class="tlaGNC"> 972 : }</span></span>
<span id="L106"><span class="lineNum"> 106</span> <span class="tlaGNC"> 42 : torch::Tensor Proposal::prepareX(torch::Tensor&amp; X)</span></span>
<span id="L88"><span class="lineNum"> 88</span> <span class="tlaGNC"> 5346 : for (auto i = 0; i &lt; pFeatures.size(); ++i) {</span></span>
<span id="L89"><span class="lineNum"> 89</span> <span class="tlaGNC"> 4708 : auto* discretizer = new mdlp::CPPFImdlp();</span></span>
<span id="L90"><span class="lineNum"> 90</span> <span class="tlaGNC"> 9416 : auto Xt_ptr = Xf.index({ i }).data_ptr&lt;float&gt;();</span></span>
<span id="L91"><span class="lineNum"> 91</span> <span class="tlaGNC"> 4708 : auto Xt = std::vector&lt;float&gt;(Xt_ptr, Xt_ptr + Xf.size(1));</span></span>
<span id="L92"><span class="lineNum"> 92</span> <span class="tlaGNC"> 4708 : discretizer-&gt;fit(Xt, yv);</span></span>
<span id="L93"><span class="lineNum"> 93</span> <span class="tlaGNC"> 18832 : pDataset.index_put_({ i, &quot;...&quot; }, torch::tensor(discretizer-&gt;transform(Xt)));</span></span>
<span id="L94"><span class="lineNum"> 94</span> <span class="tlaGNC"> 4708 : auto xStates = std::vector&lt;int&gt;(discretizer-&gt;getCutPoints().size() + 1);</span></span>
<span id="L95"><span class="lineNum"> 95</span> <span class="tlaGNC"> 4708 : iota(xStates.begin(), xStates.end(), 0);</span></span>
<span id="L96"><span class="lineNum"> 96</span> <span class="tlaGNC"> 4708 : states[pFeatures[i]] = xStates;</span></span>
<span id="L97"><span class="lineNum"> 97</span> <span class="tlaGNC"> 4708 : discretizers[pFeatures[i]] = discretizer;</span></span>
<span id="L98"><span class="lineNum"> 98</span> <span class="tlaGNC"> 4708 : }</span></span>
<span id="L99"><span class="lineNum"> 99</span> <span class="tlaGNC"> 638 : int n_classes = torch::max(y).item&lt;int&gt;() + 1;</span></span>
<span id="L100"><span class="lineNum"> 100</span> <span class="tlaGNC"> 638 : auto yStates = std::vector&lt;int&gt;(n_classes);</span></span>
<span id="L101"><span class="lineNum"> 101</span> <span class="tlaGNC"> 638 : iota(yStates.begin(), yStates.end(), 0);</span></span>
<span id="L102"><span class="lineNum"> 102</span> <span class="tlaGNC"> 638 : states[pClassName] = yStates;</span></span>
<span id="L103"><span class="lineNum"> 103</span> <span class="tlaGNC"> 1914 : pDataset.index_put_({ n, &quot;...&quot; }, y);</span></span>
<span id="L104"><span class="lineNum"> 104</span> <span class="tlaGNC"> 1276 : return states;</span></span>
<span id="L105"><span class="lineNum"> 105</span> <span class="tlaGNC"> 10692 : }</span></span>
<span id="L106"><span class="lineNum"> 106</span> <span class="tlaGNC"> 462 : torch::Tensor Proposal::prepareX(torch::Tensor&amp; X)</span></span>
<span id="L107"><span class="lineNum"> 107</span> : {</span>
<span id="L108"><span class="lineNum"> 108</span> <span class="tlaGNC"> 42 : auto Xtd = torch::zeros_like(X, torch::kInt32);</span></span>
<span id="L109"><span class="lineNum"> 109</span> <span class="tlaGNC"> 344 : for (int i = 0; i &lt; X.size(0); ++i) {</span></span>
<span id="L110"><span class="lineNum"> 110</span> <span class="tlaGNC"> 302 : auto Xt = std::vector&lt;float&gt;(X[i].data_ptr&lt;float&gt;(), X[i].data_ptr&lt;float&gt;() + X.size(1));</span></span>
<span id="L111"><span class="lineNum"> 111</span> <span class="tlaGNC"> 302 : auto Xd = discretizers[pFeatures[i]]-&gt;transform(Xt);</span></span>
<span id="L112"><span class="lineNum"> 112</span> <span class="tlaGNC"> 906 : Xtd.index_put_({ i }, torch::tensor(Xd, torch::kInt32));</span></span>
<span id="L113"><span class="lineNum"> 113</span> <span class="tlaGNC"> 302 : }</span></span>
<span id="L114"><span class="lineNum"> 114</span> <span class="tlaGNC"> 42 : return Xtd;</span></span>
<span id="L115"><span class="lineNum"> 115</span> <span class="tlaGNC"> 302 : }</span></span>
<span id="L108"><span class="lineNum"> 108</span> <span class="tlaGNC"> 462 : auto Xtd = torch::zeros_like(X, torch::kInt32);</span></span>
<span id="L109"><span class="lineNum"> 109</span> <span class="tlaGNC"> 3784 : for (int i = 0; i &lt; X.size(0); ++i) {</span></span>
<span id="L110"><span class="lineNum"> 110</span> <span class="tlaGNC"> 3322 : auto Xt = std::vector&lt;float&gt;(X[i].data_ptr&lt;float&gt;(), X[i].data_ptr&lt;float&gt;() + X.size(1));</span></span>
<span id="L111"><span class="lineNum"> 111</span> <span class="tlaGNC"> 3322 : auto Xd = discretizers[pFeatures[i]]-&gt;transform(Xt);</span></span>
<span id="L112"><span class="lineNum"> 112</span> <span class="tlaGNC"> 9966 : Xtd.index_put_({ i }, torch::tensor(Xd, torch::kInt32));</span></span>
<span id="L113"><span class="lineNum"> 113</span> <span class="tlaGNC"> 3322 : }</span></span>
<span id="L114"><span class="lineNum"> 114</span> <span class="tlaGNC"> 462 : return Xtd;</span></span>
<span id="L115"><span class="lineNum"> 115</span> <span class="tlaGNC"> 3322 : }</span></span>
<span id="L116"><span class="lineNum"> 116</span> : }</span>
</pre>
</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,21 +65,21 @@
<tr>
<td class="coverFn"><a href="SPODE.cc.gcov.html#L24">_ZNK8bayesnet5SPODE5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">17</td>
<td class="coverFnHi">187</td>
</tr>
<tr>
<td class="coverFn"><a href="SPODE.cc.gcov.html#L11">_ZN8bayesnet5SPODE10buildModelERKN2at6TensorE</a></td>
<td class="coverFnHi">225</td>
<td class="coverFnHi">2665</td>
</tr>
<tr>
<td class="coverFn"><a href="SPODE.cc.gcov.html#L9">_ZN8bayesnet5SPODEC2Ei</a></td>
<td class="coverFnHi">252</td>
<td class="coverFnHi">2962</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,21 +65,21 @@
<tr>
<td class="coverFn"><a href="SPODE.cc.gcov.html#L11">_ZN8bayesnet5SPODE10buildModelERKN2at6TensorE</a></td>
<td class="coverFnHi">225</td>
<td class="coverFnHi">2665</td>
</tr>
<tr>
<td class="coverFn"><a href="SPODE.cc.gcov.html#L9">_ZN8bayesnet5SPODEC2Ei</a></td>
<td class="coverFnHi">252</td>
<td class="coverFnHi">2962</td>
</tr>
<tr>
<td class="coverFn"><a href="SPODE.cc.gcov.html#L24">_ZNK8bayesnet5SPODE5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">17</td>
<td class="coverFnHi">187</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -70,24 +70,24 @@
<span id="L8"><span class="lineNum"> 8</span> : </span>
<span id="L9"><span class="lineNum"> 9</span> : namespace bayesnet {</span>
<span id="L10"><span class="lineNum"> 10</span> : </span>
<span id="L11"><span class="lineNum"> 11</span> <span class="tlaGNC tlaBgGNC"> 252 : SPODE::SPODE(int root) : Classifier(Network()), root(root) {}</span></span>
<span id="L11"><span class="lineNum"> 11</span> <span class="tlaGNC tlaBgGNC"> 2962 : SPODE::SPODE(int root) : Classifier(Network()), root(root) {}</span></span>
<span id="L12"><span class="lineNum"> 12</span> : </span>
<span id="L13"><span class="lineNum"> 13</span> <span class="tlaGNC"> 225 : void SPODE::buildModel(const torch::Tensor&amp; weights)</span></span>
<span id="L13"><span class="lineNum"> 13</span> <span class="tlaGNC"> 2665 : void SPODE::buildModel(const torch::Tensor&amp; weights)</span></span>
<span id="L14"><span class="lineNum"> 14</span> : {</span>
<span id="L15"><span class="lineNum"> 15</span> : // 0. Add all nodes to the model</span>
<span id="L16"><span class="lineNum"> 16</span> <span class="tlaGNC"> 225 : addNodes();</span></span>
<span id="L16"><span class="lineNum"> 16</span> <span class="tlaGNC"> 2665 : addNodes();</span></span>
<span id="L17"><span class="lineNum"> 17</span> : // 1. Add edges from the class node to all other nodes</span>
<span id="L18"><span class="lineNum"> 18</span> : // 2. Add edges from the root node to all other nodes</span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 8611 : for (int i = 0; i &lt; static_cast&lt;int&gt;(features.size()); ++i) {</span></span>
<span id="L20"><span class="lineNum"> 20</span> <span class="tlaGNC"> 8386 : model.addEdge(className, features[i]);</span></span>
<span id="L21"><span class="lineNum"> 21</span> <span class="tlaGNC"> 8386 : if (i != root) {</span></span>
<span id="L22"><span class="lineNum"> 22</span> <span class="tlaGNC"> 8161 : model.addEdge(features[root], features[i]);</span></span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 113941 : for (int i = 0; i &lt; static_cast&lt;int&gt;(features.size()); ++i) {</span></span>
<span id="L20"><span class="lineNum"> 20</span> <span class="tlaGNC"> 111276 : model.addEdge(className, features[i]);</span></span>
<span id="L21"><span class="lineNum"> 21</span> <span class="tlaGNC"> 111276 : if (i != root) {</span></span>
<span id="L22"><span class="lineNum"> 22</span> <span class="tlaGNC"> 108611 : model.addEdge(features[root], features[i]);</span></span>
<span id="L23"><span class="lineNum"> 23</span> : }</span>
<span id="L24"><span class="lineNum"> 24</span> : }</span>
<span id="L25"><span class="lineNum"> 25</span> <span class="tlaGNC"> 225 : }</span></span>
<span id="L26"><span class="lineNum"> 26</span> <span class="tlaGNC"> 17 : std::vector&lt;std::string&gt; SPODE::graph(const std::string&amp; name) const</span></span>
<span id="L25"><span class="lineNum"> 25</span> <span class="tlaGNC"> 2665 : }</span></span>
<span id="L26"><span class="lineNum"> 26</span> <span class="tlaGNC"> 187 : std::vector&lt;std::string&gt; SPODE::graph(const std::string&amp; name) const</span></span>
<span id="L27"><span class="lineNum"> 27</span> : {</span>
<span id="L28"><span class="lineNum"> 28</span> <span class="tlaGNC"> 17 : return model.graph(name);</span></span>
<span id="L28"><span class="lineNum"> 28</span> <span class="tlaGNC"> 187 : return model.graph(name);</span></span>
<span id="L29"><span class="lineNum"> 29</span> : }</span>
<span id="L30"><span class="lineNum"> 30</span> : </span>
<span id="L31"><span class="lineNum"> 31</span> : }</span>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,21 +65,21 @@
<tr>
<td class="coverFn"><a href="SPODE.h.gcov.html#L17">_ZN8bayesnet5SPODED0Ev</a></td>
<td class="coverFnHi">401</td>
<td class="coverFnHi">337</td>
</tr>
<tr>
<td class="coverFnAlias"><a href="SPODE.h.gcov.html#L17">_ZN8bayesnet5SPODED0Ev</a></td>
<td class="coverFnAliasHi">180</td>
<td class="coverFnAliasHi">148</td>
</tr>
<tr>
<td class="coverFnAlias"><a href="SPODE.h.gcov.html#L17">_ZN8bayesnet5SPODED2Ev</a></td>
<td class="coverFnAliasHi">221</td>
<td class="coverFnAliasHi">189</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,21 +65,21 @@
<tr>
<td class="coverFn"><a href="SPODE.h.gcov.html#L17">_ZN8bayesnet5SPODED0Ev</a></td>
<td class="coverFnHi">401</td>
<td class="coverFnHi">337</td>
</tr>
<tr>
<td class="coverFnAlias"><a href="SPODE.h.gcov.html#L17">_ZN8bayesnet5SPODED0Ev</a></td>
<td class="coverFnAliasHi">180</td>
<td class="coverFnAliasHi">148</td>
</tr>
<tr>
<td class="coverFnAlias"><a href="SPODE.h.gcov.html#L17">_ZN8bayesnet5SPODED2Ev</a></td>
<td class="coverFnAliasHi">221</td>
<td class="coverFnAliasHi">189</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -78,7 +78,7 @@
<span id="L16"><span class="lineNum"> 16</span> : void buildModel(const torch::Tensor&amp; weights) override;</span>
<span id="L17"><span class="lineNum"> 17</span> : public:</span>
<span id="L18"><span class="lineNum"> 18</span> : explicit SPODE(int root);</span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC tlaBgGNC"> 401 : virtual ~SPODE() = default;</span></span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC tlaBgGNC"> 337 : virtual ~SPODE() = default;</span></span>
<span id="L20"><span class="lineNum"> 20</span> : std::vector&lt;std::string&gt; graph(const std::string&amp; name = &quot;SPODE&quot;) const override;</span>
<span id="L21"><span class="lineNum"> 21</span> : };</span>
<span id="L22"><span class="lineNum"> 22</span> : }</span>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,42 +65,42 @@
<tr>
<td class="coverFn"><a href="SPODELd.cc.gcov.html#L17">_ZN8bayesnet7SPODELd3fitERN2at6TensorERKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISA_EERKSA_RSt3mapISA_S4_IiSaIiEESt4lessISA_ESaISt4pairISF_SJ_EEE</a></td>
<td class="coverFnHi">2</td>
<td class="coverFnHi">22</td>
</tr>
<tr>
<td class="coverFn"><a href="SPODELd.cc.gcov.html#L44">_ZNK8bayesnet7SPODELd5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">9</td>
<td class="coverFnHi">99</td>
</tr>
<tr>
<td class="coverFn"><a href="SPODELd.cc.gcov.html#L39">_ZN8bayesnet7SPODELd7predictERN2at6TensorE</a></td>
<td class="coverFnHi">34</td>
<td class="coverFnHi">374</td>
</tr>
<tr>
<td class="coverFn"><a href="SPODELd.cc.gcov.html#L9">_ZN8bayesnet7SPODELd3fitERN2at6TensorES3_RKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISA_EERKSA_RSt3mapISA_S4_IiSaIiEESt4lessISA_ESaISt4pairISF_SJ_EEE</a></td>
<td class="coverFnHi">42</td>
<td class="coverFnHi">462</td>
</tr>
<tr>
<td class="coverFn"><a href="SPODELd.cc.gcov.html#L27">_ZN8bayesnet7SPODELd9commonFitERKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaIS7_EERKS7_RSt3mapIS7_S1_IiSaIiEESt4lessIS7_ESaISt4pairISC_SG_EEE</a></td>
<td class="coverFnHi">43</td>
<td class="coverFnHi">473</td>
</tr>
<tr>
<td class="coverFn"><a href="SPODELd.cc.gcov.html#L8">_ZN8bayesnet7SPODELdC2Ei</a></td>
<td class="coverFnHi">55</td>
<td class="coverFnHi">605</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,42 +65,42 @@
<tr>
<td class="coverFn"><a href="SPODELd.cc.gcov.html#L17">_ZN8bayesnet7SPODELd3fitERN2at6TensorERKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISA_EERKSA_RSt3mapISA_S4_IiSaIiEESt4lessISA_ESaISt4pairISF_SJ_EEE</a></td>
<td class="coverFnHi">2</td>
<td class="coverFnHi">22</td>
</tr>
<tr>
<td class="coverFn"><a href="SPODELd.cc.gcov.html#L9">_ZN8bayesnet7SPODELd3fitERN2at6TensorES3_RKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISA_EERKSA_RSt3mapISA_S4_IiSaIiEESt4lessISA_ESaISt4pairISF_SJ_EEE</a></td>
<td class="coverFnHi">42</td>
<td class="coverFnHi">462</td>
</tr>
<tr>
<td class="coverFn"><a href="SPODELd.cc.gcov.html#L39">_ZN8bayesnet7SPODELd7predictERN2at6TensorE</a></td>
<td class="coverFnHi">34</td>
<td class="coverFnHi">374</td>
</tr>
<tr>
<td class="coverFn"><a href="SPODELd.cc.gcov.html#L27">_ZN8bayesnet7SPODELd9commonFitERKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaIS7_EERKS7_RSt3mapIS7_S1_IiSaIiEESt4lessIS7_ESaISt4pairISC_SG_EEE</a></td>
<td class="coverFnHi">43</td>
<td class="coverFnHi">473</td>
</tr>
<tr>
<td class="coverFn"><a href="SPODELd.cc.gcov.html#L8">_ZN8bayesnet7SPODELdC2Ei</a></td>
<td class="coverFnHi">55</td>
<td class="coverFnHi">605</td>
</tr>
<tr>
<td class="coverFn"><a href="SPODELd.cc.gcov.html#L44">_ZNK8bayesnet7SPODELd5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">9</td>
<td class="coverFnHi">99</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -69,45 +69,45 @@
<span id="L7"><span class="lineNum"> 7</span> : #include &quot;SPODELd.h&quot;</span>
<span id="L8"><span class="lineNum"> 8</span> : </span>
<span id="L9"><span class="lineNum"> 9</span> : namespace bayesnet {</span>
<span id="L10"><span class="lineNum"> 10</span> <span class="tlaGNC tlaBgGNC"> 55 : SPODELd::SPODELd(int root) : SPODE(root), Proposal(dataset, features, className) {}</span></span>
<span id="L11"><span class="lineNum"> 11</span> <span class="tlaGNC"> 42 : SPODELd&amp; SPODELd::fit(torch::Tensor&amp; X_, torch::Tensor&amp; y_, const std::vector&lt;std::string&gt;&amp; features_, const std::string&amp; className_, map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states_)</span></span>
<span id="L10"><span class="lineNum"> 10</span> <span class="tlaGNC tlaBgGNC"> 605 : SPODELd::SPODELd(int root) : SPODE(root), Proposal(dataset, features, className) {}</span></span>
<span id="L11"><span class="lineNum"> 11</span> <span class="tlaGNC"> 462 : SPODELd&amp; SPODELd::fit(torch::Tensor&amp; X_, torch::Tensor&amp; y_, const std::vector&lt;std::string&gt;&amp; features_, const std::string&amp; className_, map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states_)</span></span>
<span id="L12"><span class="lineNum"> 12</span> : {</span>
<span id="L13"><span class="lineNum"> 13</span> <span class="tlaGNC"> 42 : checkInput(X_, y_);</span></span>
<span id="L14"><span class="lineNum"> 14</span> <span class="tlaGNC"> 42 : Xf = X_;</span></span>
<span id="L15"><span class="lineNum"> 15</span> <span class="tlaGNC"> 42 : y = y_;</span></span>
<span id="L16"><span class="lineNum"> 16</span> <span class="tlaGNC"> 42 : return commonFit(features_, className_, states_);</span></span>
<span id="L13"><span class="lineNum"> 13</span> <span class="tlaGNC"> 462 : checkInput(X_, y_);</span></span>
<span id="L14"><span class="lineNum"> 14</span> <span class="tlaGNC"> 462 : Xf = X_;</span></span>
<span id="L15"><span class="lineNum"> 15</span> <span class="tlaGNC"> 462 : y = y_;</span></span>
<span id="L16"><span class="lineNum"> 16</span> <span class="tlaGNC"> 462 : return commonFit(features_, className_, states_);</span></span>
<span id="L17"><span class="lineNum"> 17</span> : }</span>
<span id="L18"><span class="lineNum"> 18</span> : </span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 2 : SPODELd&amp; SPODELd::fit(torch::Tensor&amp; dataset, const std::vector&lt;std::string&gt;&amp; features_, const std::string&amp; className_, map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states_)</span></span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 22 : SPODELd&amp; SPODELd::fit(torch::Tensor&amp; dataset, const std::vector&lt;std::string&gt;&amp; features_, const std::string&amp; className_, map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states_)</span></span>
<span id="L20"><span class="lineNum"> 20</span> : {</span>
<span id="L21"><span class="lineNum"> 21</span> <span class="tlaGNC"> 2 : if (!torch::is_floating_point(dataset)) {</span></span>
<span id="L22"><span class="lineNum"> 22</span> <span class="tlaGNC"> 1 : throw std::runtime_error(&quot;Dataset must be a floating point tensor&quot;);</span></span>
<span id="L21"><span class="lineNum"> 21</span> <span class="tlaGNC"> 22 : if (!torch::is_floating_point(dataset)) {</span></span>
<span id="L22"><span class="lineNum"> 22</span> <span class="tlaGNC"> 11 : throw std::runtime_error(&quot;Dataset must be a floating point tensor&quot;);</span></span>
<span id="L23"><span class="lineNum"> 23</span> : }</span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 4 : Xf = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), &quot;...&quot; }).clone();</span></span>
<span id="L25"><span class="lineNum"> 25</span> <span class="tlaGNC"> 3 : y = dataset.index({ -1, &quot;...&quot; }).clone().to(torch::kInt32);</span></span>
<span id="L26"><span class="lineNum"> 26</span> <span class="tlaGNC"> 1 : return commonFit(features_, className_, states_);</span></span>
<span id="L27"><span class="lineNum"> 27</span> <span class="tlaGNC"> 3 : }</span></span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 44 : Xf = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), &quot;...&quot; }).clone();</span></span>
<span id="L25"><span class="lineNum"> 25</span> <span class="tlaGNC"> 33 : y = dataset.index({ -1, &quot;...&quot; }).clone().to(torch::kInt32);</span></span>
<span id="L26"><span class="lineNum"> 26</span> <span class="tlaGNC"> 11 : return commonFit(features_, className_, states_);</span></span>
<span id="L27"><span class="lineNum"> 27</span> <span class="tlaGNC"> 33 : }</span></span>
<span id="L28"><span class="lineNum"> 28</span> : </span>
<span id="L29"><span class="lineNum"> 29</span> <span class="tlaGNC"> 43 : SPODELd&amp; SPODELd::commonFit(const std::vector&lt;std::string&gt;&amp; features_, const std::string&amp; className_, map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states_)</span></span>
<span id="L29"><span class="lineNum"> 29</span> <span class="tlaGNC"> 473 : SPODELd&amp; SPODELd::commonFit(const std::vector&lt;std::string&gt;&amp; features_, const std::string&amp; className_, map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states_)</span></span>
<span id="L30"><span class="lineNum"> 30</span> : {</span>
<span id="L31"><span class="lineNum"> 31</span> <span class="tlaGNC"> 43 : features = features_;</span></span>
<span id="L32"><span class="lineNum"> 32</span> <span class="tlaGNC"> 43 : className = className_;</span></span>
<span id="L31"><span class="lineNum"> 31</span> <span class="tlaGNC"> 473 : features = features_;</span></span>
<span id="L32"><span class="lineNum"> 32</span> <span class="tlaGNC"> 473 : className = className_;</span></span>
<span id="L33"><span class="lineNum"> 33</span> : // Fills std::vectors Xv &amp; yv with the data from tensors X_ (discretized) &amp; y</span>
<span id="L34"><span class="lineNum"> 34</span> <span class="tlaGNC"> 43 : states = fit_local_discretization(y);</span></span>
<span id="L34"><span class="lineNum"> 34</span> <span class="tlaGNC"> 473 : states = fit_local_discretization(y);</span></span>
<span id="L35"><span class="lineNum"> 35</span> : // We have discretized the input data</span>
<span id="L36"><span class="lineNum"> 36</span> : // 1st we need to fit the model to build the normal SPODE structure, SPODE::fit initializes the base Bayesian network</span>
<span id="L37"><span class="lineNum"> 37</span> <span class="tlaGNC"> 43 : SPODE::fit(dataset, features, className, states);</span></span>
<span id="L38"><span class="lineNum"> 38</span> <span class="tlaGNC"> 43 : states = localDiscretizationProposal(states, model);</span></span>
<span id="L39"><span class="lineNum"> 39</span> <span class="tlaGNC"> 43 : return *this;</span></span>
<span id="L37"><span class="lineNum"> 37</span> <span class="tlaGNC"> 473 : SPODE::fit(dataset, features, className, states);</span></span>
<span id="L38"><span class="lineNum"> 38</span> <span class="tlaGNC"> 473 : states = localDiscretizationProposal(states, model);</span></span>
<span id="L39"><span class="lineNum"> 39</span> <span class="tlaGNC"> 473 : return *this;</span></span>
<span id="L40"><span class="lineNum"> 40</span> : }</span>
<span id="L41"><span class="lineNum"> 41</span> <span class="tlaGNC"> 34 : torch::Tensor SPODELd::predict(torch::Tensor&amp; X)</span></span>
<span id="L41"><span class="lineNum"> 41</span> <span class="tlaGNC"> 374 : torch::Tensor SPODELd::predict(torch::Tensor&amp; X)</span></span>
<span id="L42"><span class="lineNum"> 42</span> : {</span>
<span id="L43"><span class="lineNum"> 43</span> <span class="tlaGNC"> 34 : auto Xt = prepareX(X);</span></span>
<span id="L44"><span class="lineNum"> 44</span> <span class="tlaGNC"> 68 : return SPODE::predict(Xt);</span></span>
<span id="L45"><span class="lineNum"> 45</span> <span class="tlaGNC"> 34 : }</span></span>
<span id="L46"><span class="lineNum"> 46</span> <span class="tlaGNC"> 9 : std::vector&lt;std::string&gt; SPODELd::graph(const std::string&amp; name) const</span></span>
<span id="L43"><span class="lineNum"> 43</span> <span class="tlaGNC"> 374 : auto Xt = prepareX(X);</span></span>
<span id="L44"><span class="lineNum"> 44</span> <span class="tlaGNC"> 748 : return SPODE::predict(Xt);</span></span>
<span id="L45"><span class="lineNum"> 45</span> <span class="tlaGNC"> 374 : }</span></span>
<span id="L46"><span class="lineNum"> 46</span> <span class="tlaGNC"> 99 : std::vector&lt;std::string&gt; SPODELd::graph(const std::string&amp; name) const</span></span>
<span id="L47"><span class="lineNum"> 47</span> : {</span>
<span id="L48"><span class="lineNum"> 48</span> <span class="tlaGNC"> 9 : return SPODE::graph(name);</span></span>
<span id="L48"><span class="lineNum"> 48</span> <span class="tlaGNC"> 99 : return SPODE::graph(name);</span></span>
<span id="L49"><span class="lineNum"> 49</span> : }</span>
<span id="L50"><span class="lineNum"> 50</span> : }</span>
</pre>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,28 +65,28 @@
<tr>
<td class="coverFn"><a href="TAN.cc.gcov.html#L39">_ZNK8bayesnet3TAN5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">2</td>
<td class="coverFnHi">22</td>
</tr>
<tr>
<td class="coverFn"><a href="TAN.cc.gcov.html#L10">_ZN8bayesnet3TAN10buildModelERKN2at6TensorE</a></td>
<td class="coverFnHi">13</td>
<td class="coverFnHi">143</td>
</tr>
<tr>
<td class="coverFn"><a href="TAN.cc.gcov.html#L8">_ZN8bayesnet3TANC2Ev</a></td>
<td class="coverFnHi">47</td>
<td class="coverFnHi">517</td>
</tr>
<tr>
<td class="coverFn"><a href="TAN.cc.gcov.html#L23">_ZZN8bayesnet3TAN10buildModelERKN2at6TensorEENKUlRKT_RKT0_E_clISt4pairIifESE_EEDaS7_SA_</a></td>
<td class="coverFnHi">162</td>
<td class="coverFnHi">1782</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,28 +65,28 @@
<tr>
<td class="coverFn"><a href="TAN.cc.gcov.html#L10">_ZN8bayesnet3TAN10buildModelERKN2at6TensorE</a></td>
<td class="coverFnHi">13</td>
<td class="coverFnHi">143</td>
</tr>
<tr>
<td class="coverFn"><a href="TAN.cc.gcov.html#L8">_ZN8bayesnet3TANC2Ev</a></td>
<td class="coverFnHi">47</td>
<td class="coverFnHi">517</td>
</tr>
<tr>
<td class="coverFn"><a href="TAN.cc.gcov.html#L39">_ZNK8bayesnet3TAN5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">2</td>
<td class="coverFnHi">22</td>
</tr>
<tr>
<td class="coverFn"><a href="TAN.cc.gcov.html#L23">_ZZN8bayesnet3TAN10buildModelERKN2at6TensorEENKUlRKT_RKT0_E_clISt4pairIifESE_EEDaS7_SA_</a></td>
<td class="coverFnHi">162</td>
<td class="coverFnHi">1782</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -69,40 +69,40 @@
<span id="L7"><span class="lineNum"> 7</span> : #include &quot;TAN.h&quot;</span>
<span id="L8"><span class="lineNum"> 8</span> : </span>
<span id="L9"><span class="lineNum"> 9</span> : namespace bayesnet {</span>
<span id="L10"><span class="lineNum"> 10</span> <span class="tlaGNC tlaBgGNC"> 47 : TAN::TAN() : Classifier(Network()) {}</span></span>
<span id="L10"><span class="lineNum"> 10</span> <span class="tlaGNC tlaBgGNC"> 517 : TAN::TAN() : Classifier(Network()) {}</span></span>
<span id="L11"><span class="lineNum"> 11</span> : </span>
<span id="L12"><span class="lineNum"> 12</span> <span class="tlaGNC"> 13 : void TAN::buildModel(const torch::Tensor&amp; weights)</span></span>
<span id="L12"><span class="lineNum"> 12</span> <span class="tlaGNC"> 143 : void TAN::buildModel(const torch::Tensor&amp; weights)</span></span>
<span id="L13"><span class="lineNum"> 13</span> : {</span>
<span id="L14"><span class="lineNum"> 14</span> : // 0. Add all nodes to the model</span>
<span id="L15"><span class="lineNum"> 15</span> <span class="tlaGNC"> 13 : addNodes();</span></span>
<span id="L15"><span class="lineNum"> 15</span> <span class="tlaGNC"> 143 : addNodes();</span></span>
<span id="L16"><span class="lineNum"> 16</span> : // 1. Compute mutual information between each feature and the class and set the root node</span>
<span id="L17"><span class="lineNum"> 17</span> : // as the highest mutual information with the class</span>
<span id="L18"><span class="lineNum"> 18</span> <span class="tlaGNC"> 13 : auto mi = std::vector &lt;std::pair&lt;int, float &gt;&gt;();</span></span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 39 : torch::Tensor class_dataset = dataset.index({ -1, &quot;...&quot; });</span></span>
<span id="L20"><span class="lineNum"> 20</span> <span class="tlaGNC"> 89 : for (int i = 0; i &lt; static_cast&lt;int&gt;(features.size()); ++i) {</span></span>
<span id="L21"><span class="lineNum"> 21</span> <span class="tlaGNC"> 228 : torch::Tensor feature_dataset = dataset.index({ i, &quot;...&quot; });</span></span>
<span id="L22"><span class="lineNum"> 22</span> <span class="tlaGNC"> 76 : auto mi_value = metrics.mutualInformation(class_dataset, feature_dataset, weights);</span></span>
<span id="L23"><span class="lineNum"> 23</span> <span class="tlaGNC"> 76 : mi.push_back({ i, mi_value });</span></span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 76 : }</span></span>
<span id="L25"><span class="lineNum"> 25</span> <span class="tlaGNC"> 175 : sort(mi.begin(), mi.end(), [](const auto&amp; left, const auto&amp; right) {return left.second &lt; right.second;});</span></span>
<span id="L26"><span class="lineNum"> 26</span> <span class="tlaGNC"> 13 : auto root = mi[mi.size() - 1].first;</span></span>
<span id="L18"><span class="lineNum"> 18</span> <span class="tlaGNC"> 143 : auto mi = std::vector &lt;std::pair&lt;int, float &gt;&gt;();</span></span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 429 : torch::Tensor class_dataset = dataset.index({ -1, &quot;...&quot; });</span></span>
<span id="L20"><span class="lineNum"> 20</span> <span class="tlaGNC"> 979 : for (int i = 0; i &lt; static_cast&lt;int&gt;(features.size()); ++i) {</span></span>
<span id="L21"><span class="lineNum"> 21</span> <span class="tlaGNC"> 2508 : torch::Tensor feature_dataset = dataset.index({ i, &quot;...&quot; });</span></span>
<span id="L22"><span class="lineNum"> 22</span> <span class="tlaGNC"> 836 : auto mi_value = metrics.mutualInformation(class_dataset, feature_dataset, weights);</span></span>
<span id="L23"><span class="lineNum"> 23</span> <span class="tlaGNC"> 836 : mi.push_back({ i, mi_value });</span></span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 836 : }</span></span>
<span id="L25"><span class="lineNum"> 25</span> <span class="tlaGNC"> 1925 : sort(mi.begin(), mi.end(), [](const auto&amp; left, const auto&amp; right) {return left.second &lt; right.second;});</span></span>
<span id="L26"><span class="lineNum"> 26</span> <span class="tlaGNC"> 143 : auto root = mi[mi.size() - 1].first;</span></span>
<span id="L27"><span class="lineNum"> 27</span> : // 2. Compute mutual information between each feature and the class</span>
<span id="L28"><span class="lineNum"> 28</span> <span class="tlaGNC"> 13 : auto weights_matrix = metrics.conditionalEdge(weights);</span></span>
<span id="L28"><span class="lineNum"> 28</span> <span class="tlaGNC"> 143 : auto weights_matrix = metrics.conditionalEdge(weights);</span></span>
<span id="L29"><span class="lineNum"> 29</span> : // 3. Compute the maximum spanning tree</span>
<span id="L30"><span class="lineNum"> 30</span> <span class="tlaGNC"> 13 : auto mst = metrics.maximumSpanningTree(features, weights_matrix, root);</span></span>
<span id="L30"><span class="lineNum"> 30</span> <span class="tlaGNC"> 143 : auto mst = metrics.maximumSpanningTree(features, weights_matrix, root);</span></span>
<span id="L31"><span class="lineNum"> 31</span> : // 4. Add edges from the maximum spanning tree to the model</span>
<span id="L32"><span class="lineNum"> 32</span> <span class="tlaGNC"> 76 : for (auto i = 0; i &lt; mst.size(); ++i) {</span></span>
<span id="L33"><span class="lineNum"> 33</span> <span class="tlaGNC"> 63 : auto [from, to] = mst[i];</span></span>
<span id="L34"><span class="lineNum"> 34</span> <span class="tlaGNC"> 63 : model.addEdge(features[from], features[to]);</span></span>
<span id="L32"><span class="lineNum"> 32</span> <span class="tlaGNC"> 836 : for (auto i = 0; i &lt; mst.size(); ++i) {</span></span>
<span id="L33"><span class="lineNum"> 33</span> <span class="tlaGNC"> 693 : auto [from, to] = mst[i];</span></span>
<span id="L34"><span class="lineNum"> 34</span> <span class="tlaGNC"> 693 : model.addEdge(features[from], features[to]);</span></span>
<span id="L35"><span class="lineNum"> 35</span> : }</span>
<span id="L36"><span class="lineNum"> 36</span> : // 5. Add edges from the class to all features</span>
<span id="L37"><span class="lineNum"> 37</span> <span class="tlaGNC"> 89 : for (auto feature : features) {</span></span>
<span id="L38"><span class="lineNum"> 38</span> <span class="tlaGNC"> 76 : model.addEdge(className, feature);</span></span>
<span id="L39"><span class="lineNum"> 39</span> <span class="tlaGNC"> 76 : }</span></span>
<span id="L40"><span class="lineNum"> 40</span> <span class="tlaGNC"> 102 : }</span></span>
<span id="L41"><span class="lineNum"> 41</span> <span class="tlaGNC"> 2 : std::vector&lt;std::string&gt; TAN::graph(const std::string&amp; title) const</span></span>
<span id="L37"><span class="lineNum"> 37</span> <span class="tlaGNC"> 979 : for (auto feature : features) {</span></span>
<span id="L38"><span class="lineNum"> 38</span> <span class="tlaGNC"> 836 : model.addEdge(className, feature);</span></span>
<span id="L39"><span class="lineNum"> 39</span> <span class="tlaGNC"> 836 : }</span></span>
<span id="L40"><span class="lineNum"> 40</span> <span class="tlaGNC"> 1122 : }</span></span>
<span id="L41"><span class="lineNum"> 41</span> <span class="tlaGNC"> 22 : std::vector&lt;std::string&gt; TAN::graph(const std::string&amp; title) const</span></span>
<span id="L42"><span class="lineNum"> 42</span> : {</span>
<span id="L43"><span class="lineNum"> 43</span> <span class="tlaGNC"> 2 : return model.graph(title);</span></span>
<span id="L43"><span class="lineNum"> 43</span> <span class="tlaGNC"> 22 : return model.graph(title);</span></span>
<span id="L44"><span class="lineNum"> 44</span> : }</span>
<span id="L45"><span class="lineNum"> 45</span> : }</span>
</pre>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,28 +65,28 @@
<tr>
<td class="coverFn"><a href="TANLd.cc.gcov.html#L30">_ZNK8bayesnet5TANLd5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">1</td>
<td class="coverFnHi">11</td>
</tr>
<tr>
<td class="coverFn"><a href="TANLd.cc.gcov.html#L25">_ZN8bayesnet5TANLd7predictERN2at6TensorE</a></td>
<td class="coverFnHi">4</td>
<td class="coverFnHi">44</td>
</tr>
<tr>
<td class="coverFn"><a href="TANLd.cc.gcov.html#L9">_ZN8bayesnet5TANLd3fitERN2at6TensorES3_RKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISA_EERKSA_RSt3mapISA_S4_IiSaIiEESt4lessISA_ESaISt4pairISF_SJ_EEE</a></td>
<td class="coverFnHi">5</td>
<td class="coverFnHi">55</td>
</tr>
<tr>
<td class="coverFn"><a href="TANLd.cc.gcov.html#L8">_ZN8bayesnet5TANLdC2Ev</a></td>
<td class="coverFnHi">17</td>
<td class="coverFnHi">187</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,28 +65,28 @@
<tr>
<td class="coverFn"><a href="TANLd.cc.gcov.html#L9">_ZN8bayesnet5TANLd3fitERN2at6TensorES3_RKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISA_EERKSA_RSt3mapISA_S4_IiSaIiEESt4lessISA_ESaISt4pairISF_SJ_EEE</a></td>
<td class="coverFnHi">5</td>
<td class="coverFnHi">55</td>
</tr>
<tr>
<td class="coverFn"><a href="TANLd.cc.gcov.html#L25">_ZN8bayesnet5TANLd7predictERN2at6TensorE</a></td>
<td class="coverFnHi">4</td>
<td class="coverFnHi">44</td>
</tr>
<tr>
<td class="coverFn"><a href="TANLd.cc.gcov.html#L8">_ZN8bayesnet5TANLdC2Ev</a></td>
<td class="coverFnHi">17</td>
<td class="coverFnHi">187</td>
</tr>
<tr>
<td class="coverFn"><a href="TANLd.cc.gcov.html#L30">_ZNK8bayesnet5TANLd5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">1</td>
<td class="coverFnHi">11</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -69,31 +69,31 @@
<span id="L7"><span class="lineNum"> 7</span> : #include &quot;TANLd.h&quot;</span>
<span id="L8"><span class="lineNum"> 8</span> : </span>
<span id="L9"><span class="lineNum"> 9</span> : namespace bayesnet {</span>
<span id="L10"><span class="lineNum"> 10</span> <span class="tlaGNC tlaBgGNC"> 17 : TANLd::TANLd() : TAN(), Proposal(dataset, features, className) {}</span></span>
<span id="L11"><span class="lineNum"> 11</span> <span class="tlaGNC"> 5 : TANLd&amp; TANLd::fit(torch::Tensor&amp; X_, torch::Tensor&amp; y_, const std::vector&lt;std::string&gt;&amp; features_, const std::string&amp; className_, map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states_)</span></span>
<span id="L10"><span class="lineNum"> 10</span> <span class="tlaGNC tlaBgGNC"> 187 : TANLd::TANLd() : TAN(), Proposal(dataset, features, className) {}</span></span>
<span id="L11"><span class="lineNum"> 11</span> <span class="tlaGNC"> 55 : TANLd&amp; TANLd::fit(torch::Tensor&amp; X_, torch::Tensor&amp; y_, const std::vector&lt;std::string&gt;&amp; features_, const std::string&amp; className_, map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states_)</span></span>
<span id="L12"><span class="lineNum"> 12</span> : {</span>
<span id="L13"><span class="lineNum"> 13</span> <span class="tlaGNC"> 5 : checkInput(X_, y_);</span></span>
<span id="L14"><span class="lineNum"> 14</span> <span class="tlaGNC"> 5 : features = features_;</span></span>
<span id="L15"><span class="lineNum"> 15</span> <span class="tlaGNC"> 5 : className = className_;</span></span>
<span id="L16"><span class="lineNum"> 16</span> <span class="tlaGNC"> 5 : Xf = X_;</span></span>
<span id="L17"><span class="lineNum"> 17</span> <span class="tlaGNC"> 5 : y = y_;</span></span>
<span id="L13"><span class="lineNum"> 13</span> <span class="tlaGNC"> 55 : checkInput(X_, y_);</span></span>
<span id="L14"><span class="lineNum"> 14</span> <span class="tlaGNC"> 55 : features = features_;</span></span>
<span id="L15"><span class="lineNum"> 15</span> <span class="tlaGNC"> 55 : className = className_;</span></span>
<span id="L16"><span class="lineNum"> 16</span> <span class="tlaGNC"> 55 : Xf = X_;</span></span>
<span id="L17"><span class="lineNum"> 17</span> <span class="tlaGNC"> 55 : y = y_;</span></span>
<span id="L18"><span class="lineNum"> 18</span> : // Fills std::vectors Xv &amp; yv with the data from tensors X_ (discretized) &amp; y</span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 5 : states = fit_local_discretization(y);</span></span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 55 : states = fit_local_discretization(y);</span></span>
<span id="L20"><span class="lineNum"> 20</span> : // We have discretized the input data</span>
<span id="L21"><span class="lineNum"> 21</span> : // 1st we need to fit the model to build the normal TAN structure, TAN::fit initializes the base Bayesian network</span>
<span id="L22"><span class="lineNum"> 22</span> <span class="tlaGNC"> 5 : TAN::fit(dataset, features, className, states);</span></span>
<span id="L23"><span class="lineNum"> 23</span> <span class="tlaGNC"> 5 : states = localDiscretizationProposal(states, model);</span></span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 5 : return *this;</span></span>
<span id="L22"><span class="lineNum"> 22</span> <span class="tlaGNC"> 55 : TAN::fit(dataset, features, className, states);</span></span>
<span id="L23"><span class="lineNum"> 23</span> <span class="tlaGNC"> 55 : states = localDiscretizationProposal(states, model);</span></span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 55 : return *this;</span></span>
<span id="L25"><span class="lineNum"> 25</span> : </span>
<span id="L26"><span class="lineNum"> 26</span> : }</span>
<span id="L27"><span class="lineNum"> 27</span> <span class="tlaGNC"> 4 : torch::Tensor TANLd::predict(torch::Tensor&amp; X)</span></span>
<span id="L27"><span class="lineNum"> 27</span> <span class="tlaGNC"> 44 : torch::Tensor TANLd::predict(torch::Tensor&amp; X)</span></span>
<span id="L28"><span class="lineNum"> 28</span> : {</span>
<span id="L29"><span class="lineNum"> 29</span> <span class="tlaGNC"> 4 : auto Xt = prepareX(X);</span></span>
<span id="L30"><span class="lineNum"> 30</span> <span class="tlaGNC"> 8 : return TAN::predict(Xt);</span></span>
<span id="L31"><span class="lineNum"> 31</span> <span class="tlaGNC"> 4 : }</span></span>
<span id="L32"><span class="lineNum"> 32</span> <span class="tlaGNC"> 1 : std::vector&lt;std::string&gt; TANLd::graph(const std::string&amp; name) const</span></span>
<span id="L29"><span class="lineNum"> 29</span> <span class="tlaGNC"> 44 : auto Xt = prepareX(X);</span></span>
<span id="L30"><span class="lineNum"> 30</span> <span class="tlaGNC"> 88 : return TAN::predict(Xt);</span></span>
<span id="L31"><span class="lineNum"> 31</span> <span class="tlaGNC"> 44 : }</span></span>
<span id="L32"><span class="lineNum"> 32</span> <span class="tlaGNC"> 11 : std::vector&lt;std::string&gt; TANLd::graph(const std::string&amp; name) const</span></span>
<span id="L33"><span class="lineNum"> 33</span> : {</span>
<span id="L34"><span class="lineNum"> 34</span> <span class="tlaGNC"> 1 : return TAN::graph(name);</span></span>
<span id="L34"><span class="lineNum"> 34</span> <span class="tlaGNC"> 11 : return TAN::graph(name);</span></span>
<span id="L35"><span class="lineNum"> 35</span> : }</span>
<span id="L36"><span class="lineNum"> 36</span> : }</span>
</pre>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">97.4&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">97.4&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">97.4&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,28 +65,28 @@
<tr>
<td class="coverFn"><a href="AODE.cc.gcov.html#L13">_ZN8bayesnet4AODE18setHyperparametersERKN8nlohmann16json_abi_v3_11_310basic_jsonISt3mapSt6vectorNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEblmdSaNS2_14adl_serializerES5_IhSaIhEEvEE</a></td>
<td class="coverFnHi">1</td>
<td class="coverFnHi">11</td>
</tr>
<tr>
<td class="coverFn"><a href="AODE.cc.gcov.html#L32">_ZNK8bayesnet4AODE5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">1</td>
<td class="coverFnHi">11</td>
</tr>
<tr>
<td class="coverFn"><a href="AODE.cc.gcov.html#L22">_ZN8bayesnet4AODE10buildModelERKN2at6TensorE</a></td>
<td class="coverFnHi">6</td>
<td class="coverFnHi">66</td>
</tr>
<tr>
<td class="coverFn"><a href="AODE.cc.gcov.html#L8">_ZN8bayesnet4AODEC2Eb</a></td>
<td class="coverFnHi">19</td>
<td class="coverFnHi">209</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,28 +65,28 @@
<tr>
<td class="coverFn"><a href="AODE.cc.gcov.html#L22">_ZN8bayesnet4AODE10buildModelERKN2at6TensorE</a></td>
<td class="coverFnHi">6</td>
<td class="coverFnHi">66</td>
</tr>
<tr>
<td class="coverFn"><a href="AODE.cc.gcov.html#L13">_ZN8bayesnet4AODE18setHyperparametersERKN8nlohmann16json_abi_v3_11_310basic_jsonISt3mapSt6vectorNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEblmdSaNS2_14adl_serializerES5_IhSaIhEEvEE</a></td>
<td class="coverFnHi">1</td>
<td class="coverFnHi">11</td>
</tr>
<tr>
<td class="coverFn"><a href="AODE.cc.gcov.html#L8">_ZN8bayesnet4AODEC2Eb</a></td>
<td class="coverFnHi">19</td>
<td class="coverFnHi">209</td>
</tr>
<tr>
<td class="coverFn"><a href="AODE.cc.gcov.html#L32">_ZNK8bayesnet4AODE5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">1</td>
<td class="coverFnHi">11</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -69,33 +69,33 @@
<span id="L7"><span class="lineNum"> 7</span> : #include &quot;AODE.h&quot;</span>
<span id="L8"><span class="lineNum"> 8</span> : </span>
<span id="L9"><span class="lineNum"> 9</span> : namespace bayesnet {</span>
<span id="L10"><span class="lineNum"> 10</span> <span class="tlaGNC tlaBgGNC"> 19 : AODE::AODE(bool predict_voting) : Ensemble(predict_voting)</span></span>
<span id="L10"><span class="lineNum"> 10</span> <span class="tlaGNC tlaBgGNC"> 209 : AODE::AODE(bool predict_voting) : Ensemble(predict_voting)</span></span>
<span id="L11"><span class="lineNum"> 11</span> : {</span>
<span id="L12"><span class="lineNum"> 12</span> <span class="tlaGNC"> 38 : validHyperparameters = { &quot;predict_voting&quot; };</span></span>
<span id="L12"><span class="lineNum"> 12</span> <span class="tlaGNC"> 418 : validHyperparameters = { &quot;predict_voting&quot; };</span></span>
<span id="L13"><span class="lineNum"> 13</span> : </span>
<span id="L14"><span class="lineNum"> 14</span> <span class="tlaGNC"> 57 : }</span></span>
<span id="L15"><span class="lineNum"> 15</span> <span class="tlaGNC"> 1 : void AODE::setHyperparameters(const nlohmann::json&amp; hyperparameters_)</span></span>
<span id="L14"><span class="lineNum"> 14</span> <span class="tlaGNC"> 627 : }</span></span>
<span id="L15"><span class="lineNum"> 15</span> <span class="tlaGNC"> 11 : void AODE::setHyperparameters(const nlohmann::json&amp; hyperparameters_)</span></span>
<span id="L16"><span class="lineNum"> 16</span> : {</span>
<span id="L17"><span class="lineNum"> 17</span> <span class="tlaGNC"> 1 : auto hyperparameters = hyperparameters_;</span></span>
<span id="L18"><span class="lineNum"> 18</span> <span class="tlaGNC"> 1 : if (hyperparameters.contains(&quot;predict_voting&quot;)) {</span></span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 1 : predict_voting = hyperparameters[&quot;predict_voting&quot;];</span></span>
<span id="L20"><span class="lineNum"> 20</span> <span class="tlaGNC"> 1 : hyperparameters.erase(&quot;predict_voting&quot;);</span></span>
<span id="L17"><span class="lineNum"> 17</span> <span class="tlaGNC"> 11 : auto hyperparameters = hyperparameters_;</span></span>
<span id="L18"><span class="lineNum"> 18</span> <span class="tlaGNC"> 11 : if (hyperparameters.contains(&quot;predict_voting&quot;)) {</span></span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 11 : predict_voting = hyperparameters[&quot;predict_voting&quot;];</span></span>
<span id="L20"><span class="lineNum"> 20</span> <span class="tlaGNC"> 11 : hyperparameters.erase(&quot;predict_voting&quot;);</span></span>
<span id="L21"><span class="lineNum"> 21</span> : }</span>
<span id="L22"><span class="lineNum"> 22</span> <span class="tlaGNC"> 1 : Classifier::setHyperparameters(hyperparameters);</span></span>
<span id="L23"><span class="lineNum"> 23</span> <span class="tlaGNC"> 1 : }</span></span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 6 : void AODE::buildModel(const torch::Tensor&amp; weights)</span></span>
<span id="L22"><span class="lineNum"> 22</span> <span class="tlaGNC"> 11 : Classifier::setHyperparameters(hyperparameters);</span></span>
<span id="L23"><span class="lineNum"> 23</span> <span class="tlaGNC"> 11 : }</span></span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 66 : void AODE::buildModel(const torch::Tensor&amp; weights)</span></span>
<span id="L25"><span class="lineNum"> 25</span> : {</span>
<span id="L26"><span class="lineNum"> 26</span> <span class="tlaGNC"> 6 : models.clear();</span></span>
<span id="L27"><span class="lineNum"> 27</span> <span class="tlaGNC"> 6 : significanceModels.clear();</span></span>
<span id="L28"><span class="lineNum"> 28</span> <span class="tlaGNC"> 47 : for (int i = 0; i &lt; features.size(); ++i) {</span></span>
<span id="L29"><span class="lineNum"> 29</span> <span class="tlaGNC"> 41 : models.push_back(std::make_unique&lt;SPODE&gt;(i));</span></span>
<span id="L26"><span class="lineNum"> 26</span> <span class="tlaGNC"> 66 : models.clear();</span></span>
<span id="L27"><span class="lineNum"> 27</span> <span class="tlaGNC"> 66 : significanceModels.clear();</span></span>
<span id="L28"><span class="lineNum"> 28</span> <span class="tlaGNC"> 517 : for (int i = 0; i &lt; features.size(); ++i) {</span></span>
<span id="L29"><span class="lineNum"> 29</span> <span class="tlaGNC"> 451 : models.push_back(std::make_unique&lt;SPODE&gt;(i));</span></span>
<span id="L30"><span class="lineNum"> 30</span> : }</span>
<span id="L31"><span class="lineNum"> 31</span> <span class="tlaGNC"> 6 : n_models = models.size();</span></span>
<span id="L32"><span class="lineNum"> 32</span> <span class="tlaGNC"> 6 : significanceModels = std::vector&lt;double&gt;(n_models, 1.0);</span></span>
<span id="L33"><span class="lineNum"> 33</span> <span class="tlaGNC"> 6 : }</span></span>
<span id="L34"><span class="lineNum"> 34</span> <span class="tlaGNC"> 1 : std::vector&lt;std::string&gt; AODE::graph(const std::string&amp; title) const</span></span>
<span id="L31"><span class="lineNum"> 31</span> <span class="tlaGNC"> 66 : n_models = models.size();</span></span>
<span id="L32"><span class="lineNum"> 32</span> <span class="tlaGNC"> 66 : significanceModels = std::vector&lt;double&gt;(n_models, 1.0);</span></span>
<span id="L33"><span class="lineNum"> 33</span> <span class="tlaGNC"> 66 : }</span></span>
<span id="L34"><span class="lineNum"> 34</span> <span class="tlaGNC"> 11 : std::vector&lt;std::string&gt; AODE::graph(const std::string&amp; title) const</span></span>
<span id="L35"><span class="lineNum"> 35</span> : {</span>
<span id="L36"><span class="lineNum"> 36</span> <span class="tlaGNC"> 1 : return Ensemble::graph(title);</span></span>
<span id="L36"><span class="lineNum"> 36</span> <span class="tlaGNC"> 11 : return Ensemble::graph(title);</span></span>
<span id="L37"><span class="lineNum"> 37</span> : }</span>
<span id="L38"><span class="lineNum"> 38</span> : }</span>
</pre>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,35 +65,35 @@
<tr>
<td class="coverFn"><a href="AODELd.cc.gcov.html#L41">_ZNK8bayesnet6AODELd5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">1</td>
<td class="coverFnHi">11</td>
</tr>
<tr>
<td class="coverFn"><a href="AODELd.cc.gcov.html#L26">_ZN8bayesnet6AODELd10buildModelERKN2at6TensorE</a></td>
<td class="coverFnHi">5</td>
<td class="coverFnHi">55</td>
</tr>
<tr>
<td class="coverFn"><a href="AODELd.cc.gcov.html#L35">_ZN8bayesnet6AODELd10trainModelERKN2at6TensorE</a></td>
<td class="coverFnHi">5</td>
<td class="coverFnHi">55</td>
</tr>
<tr>
<td class="coverFn"><a href="AODELd.cc.gcov.html#L11">_ZN8bayesnet6AODELd3fitERN2at6TensorES3_RKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISA_EERKSA_RSt3mapISA_S4_IiSaIiEESt4lessISA_ESaISt4pairISF_SJ_EEE</a></td>
<td class="coverFnHi">5</td>
<td class="coverFnHi">55</td>
</tr>
<tr>
<td class="coverFn"><a href="AODELd.cc.gcov.html#L8">_ZN8bayesnet6AODELdC2Eb</a></td>
<td class="coverFnHi">17</td>
<td class="coverFnHi">187</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,35 +65,35 @@
<tr>
<td class="coverFn"><a href="AODELd.cc.gcov.html#L26">_ZN8bayesnet6AODELd10buildModelERKN2at6TensorE</a></td>
<td class="coverFnHi">5</td>
<td class="coverFnHi">55</td>
</tr>
<tr>
<td class="coverFn"><a href="AODELd.cc.gcov.html#L35">_ZN8bayesnet6AODELd10trainModelERKN2at6TensorE</a></td>
<td class="coverFnHi">5</td>
<td class="coverFnHi">55</td>
</tr>
<tr>
<td class="coverFn"><a href="AODELd.cc.gcov.html#L11">_ZN8bayesnet6AODELd3fitERN2at6TensorES3_RKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISA_EERKSA_RSt3mapISA_S4_IiSaIiEESt4lessISA_ESaISt4pairISF_SJ_EEE</a></td>
<td class="coverFnHi">5</td>
<td class="coverFnHi">55</td>
</tr>
<tr>
<td class="coverFn"><a href="AODELd.cc.gcov.html#L8">_ZN8bayesnet6AODELdC2Eb</a></td>
<td class="coverFnHi">17</td>
<td class="coverFnHi">187</td>
</tr>
<tr>
<td class="coverFn"><a href="AODELd.cc.gcov.html#L41">_ZNK8bayesnet6AODELd5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">1</td>
<td class="coverFnHi">11</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -69,42 +69,42 @@
<span id="L7"><span class="lineNum"> 7</span> : #include &quot;AODELd.h&quot;</span>
<span id="L8"><span class="lineNum"> 8</span> : </span>
<span id="L9"><span class="lineNum"> 9</span> : namespace bayesnet {</span>
<span id="L10"><span class="lineNum"> 10</span> <span class="tlaGNC tlaBgGNC"> 17 : AODELd::AODELd(bool predict_voting) : Ensemble(predict_voting), Proposal(dataset, features, className)</span></span>
<span id="L10"><span class="lineNum"> 10</span> <span class="tlaGNC tlaBgGNC"> 187 : AODELd::AODELd(bool predict_voting) : Ensemble(predict_voting), Proposal(dataset, features, className)</span></span>
<span id="L11"><span class="lineNum"> 11</span> : {</span>
<span id="L12"><span class="lineNum"> 12</span> <span class="tlaGNC"> 17 : }</span></span>
<span id="L13"><span class="lineNum"> 13</span> <span class="tlaGNC"> 5 : AODELd&amp; AODELd::fit(torch::Tensor&amp; X_, torch::Tensor&amp; y_, const std::vector&lt;std::string&gt;&amp; features_, const std::string&amp; className_, map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states_)</span></span>
<span id="L12"><span class="lineNum"> 12</span> <span class="tlaGNC"> 187 : }</span></span>
<span id="L13"><span class="lineNum"> 13</span> <span class="tlaGNC"> 55 : AODELd&amp; AODELd::fit(torch::Tensor&amp; X_, torch::Tensor&amp; y_, const std::vector&lt;std::string&gt;&amp; features_, const std::string&amp; className_, map&lt;std::string, std::vector&lt;int&gt;&gt;&amp; states_)</span></span>
<span id="L14"><span class="lineNum"> 14</span> : {</span>
<span id="L15"><span class="lineNum"> 15</span> <span class="tlaGNC"> 5 : checkInput(X_, y_);</span></span>
<span id="L16"><span class="lineNum"> 16</span> <span class="tlaGNC"> 5 : features = features_;</span></span>
<span id="L17"><span class="lineNum"> 17</span> <span class="tlaGNC"> 5 : className = className_;</span></span>
<span id="L18"><span class="lineNum"> 18</span> <span class="tlaGNC"> 5 : Xf = X_;</span></span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 5 : y = y_;</span></span>
<span id="L15"><span class="lineNum"> 15</span> <span class="tlaGNC"> 55 : checkInput(X_, y_);</span></span>
<span id="L16"><span class="lineNum"> 16</span> <span class="tlaGNC"> 55 : features = features_;</span></span>
<span id="L17"><span class="lineNum"> 17</span> <span class="tlaGNC"> 55 : className = className_;</span></span>
<span id="L18"><span class="lineNum"> 18</span> <span class="tlaGNC"> 55 : Xf = X_;</span></span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 55 : y = y_;</span></span>
<span id="L20"><span class="lineNum"> 20</span> : // Fills std::vectors Xv &amp; yv with the data from tensors X_ (discretized) &amp; y</span>
<span id="L21"><span class="lineNum"> 21</span> <span class="tlaGNC"> 5 : states = fit_local_discretization(y);</span></span>
<span id="L21"><span class="lineNum"> 21</span> <span class="tlaGNC"> 55 : states = fit_local_discretization(y);</span></span>
<span id="L22"><span class="lineNum"> 22</span> : // We have discretized the input data</span>
<span id="L23"><span class="lineNum"> 23</span> : // 1st we need to fit the model to build the normal TAN structure, TAN::fit initializes the base Bayesian network</span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 5 : Ensemble::fit(dataset, features, className, states);</span></span>
<span id="L25"><span class="lineNum"> 25</span> <span class="tlaGNC"> 5 : return *this;</span></span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 55 : Ensemble::fit(dataset, features, className, states);</span></span>
<span id="L25"><span class="lineNum"> 25</span> <span class="tlaGNC"> 55 : return *this;</span></span>
<span id="L26"><span class="lineNum"> 26</span> : </span>
<span id="L27"><span class="lineNum"> 27</span> : }</span>
<span id="L28"><span class="lineNum"> 28</span> <span class="tlaGNC"> 5 : void AODELd::buildModel(const torch::Tensor&amp; weights)</span></span>
<span id="L28"><span class="lineNum"> 28</span> <span class="tlaGNC"> 55 : void AODELd::buildModel(const torch::Tensor&amp; weights)</span></span>
<span id="L29"><span class="lineNum"> 29</span> : {</span>
<span id="L30"><span class="lineNum"> 30</span> <span class="tlaGNC"> 5 : models.clear();</span></span>
<span id="L31"><span class="lineNum"> 31</span> <span class="tlaGNC"> 42 : for (int i = 0; i &lt; features.size(); ++i) {</span></span>
<span id="L32"><span class="lineNum"> 32</span> <span class="tlaGNC"> 37 : models.push_back(std::make_unique&lt;SPODELd&gt;(i));</span></span>
<span id="L30"><span class="lineNum"> 30</span> <span class="tlaGNC"> 55 : models.clear();</span></span>
<span id="L31"><span class="lineNum"> 31</span> <span class="tlaGNC"> 462 : for (int i = 0; i &lt; features.size(); ++i) {</span></span>
<span id="L32"><span class="lineNum"> 32</span> <span class="tlaGNC"> 407 : models.push_back(std::make_unique&lt;SPODELd&gt;(i));</span></span>
<span id="L33"><span class="lineNum"> 33</span> : }</span>
<span id="L34"><span class="lineNum"> 34</span> <span class="tlaGNC"> 5 : n_models = models.size();</span></span>
<span id="L35"><span class="lineNum"> 35</span> <span class="tlaGNC"> 5 : significanceModels = std::vector&lt;double&gt;(n_models, 1.0);</span></span>
<span id="L36"><span class="lineNum"> 36</span> <span class="tlaGNC"> 5 : }</span></span>
<span id="L37"><span class="lineNum"> 37</span> <span class="tlaGNC"> 5 : void AODELd::trainModel(const torch::Tensor&amp; weights)</span></span>
<span id="L34"><span class="lineNum"> 34</span> <span class="tlaGNC"> 55 : n_models = models.size();</span></span>
<span id="L35"><span class="lineNum"> 35</span> <span class="tlaGNC"> 55 : significanceModels = std::vector&lt;double&gt;(n_models, 1.0);</span></span>
<span id="L36"><span class="lineNum"> 36</span> <span class="tlaGNC"> 55 : }</span></span>
<span id="L37"><span class="lineNum"> 37</span> <span class="tlaGNC"> 55 : void AODELd::trainModel(const torch::Tensor&amp; weights)</span></span>
<span id="L38"><span class="lineNum"> 38</span> : {</span>
<span id="L39"><span class="lineNum"> 39</span> <span class="tlaGNC"> 42 : for (const auto&amp; model : models) {</span></span>
<span id="L40"><span class="lineNum"> 40</span> <span class="tlaGNC"> 37 : model-&gt;fit(Xf, y, features, className, states);</span></span>
<span id="L39"><span class="lineNum"> 39</span> <span class="tlaGNC"> 462 : for (const auto&amp; model : models) {</span></span>
<span id="L40"><span class="lineNum"> 40</span> <span class="tlaGNC"> 407 : model-&gt;fit(Xf, y, features, className, states);</span></span>
<span id="L41"><span class="lineNum"> 41</span> : }</span>
<span id="L42"><span class="lineNum"> 42</span> <span class="tlaGNC"> 5 : }</span></span>
<span id="L43"><span class="lineNum"> 43</span> <span class="tlaGNC"> 1 : std::vector&lt;std::string&gt; AODELd::graph(const std::string&amp; name) const</span></span>
<span id="L42"><span class="lineNum"> 42</span> <span class="tlaGNC"> 55 : }</span></span>
<span id="L43"><span class="lineNum"> 43</span> <span class="tlaGNC"> 11 : std::vector&lt;std::string&gt; AODELd::graph(const std::string&amp; name) const</span></span>
<span id="L44"><span class="lineNum"> 44</span> : {</span>
<span id="L45"><span class="lineNum"> 45</span> <span class="tlaGNC"> 1 : return Ensemble::graph(name);</span></span>
<span id="L45"><span class="lineNum"> 45</span> <span class="tlaGNC"> 11 : return Ensemble::graph(name);</span></span>
<span id="L46"><span class="lineNum"> 46</span> : }</span>
<span id="L47"><span class="lineNum"> 47</span> : }</span>
</pre>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -31,13 +31,13 @@
<td class="headerValue">coverage.info</td>
<td></td>
<td class="headerItem">Lines:</td>
<td class="headerCovTableEntryHi">99.1&nbsp;%</td>
<td class="headerCovTableEntry">218</td>
<td class="headerCovTableEntry">216</td>
<td class="headerCovTableEntryHi">98.3&nbsp;%</td>
<td class="headerCovTableEntry">237</td>
<td class="headerCovTableEntry">233</td>
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -63,65 +63,65 @@
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L367">_ZNK8bayesnet9BoostAODE5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">1</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L132">_ZN8bayesnet9BoostAODE20update_weights_blockEiRN2at6TensorES3_</a></td>
<td class="coverFnHi">5</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L228">_ZN8bayesnet9BoostAODE16initializeModelsEv</a></td>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L390">_ZNK8bayesnet9BoostAODE5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">8</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L56">_ZN8bayesnet9BoostAODE18setHyperparametersERKN8nlohmann16json_abi_v3_11_310basic_jsonISt3mapSt6vectorNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEblmdSaNS2_14adl_serializerES5_IhSaIhEEvEE</a></td>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L137">_ZN8bayesnet9BoostAODE20update_weights_blockEiRN2at6TensorES3_</a></td>
<td class="coverFnHi">20</td>
<td class="coverFnHi">40</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L25">_ZN8bayesnet9BoostAODE10buildModelERKN2at6TensorE</a></td>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L233">_ZN8bayesnet9BoostAODE16initializeModelsEv</a></td>
<td class="coverFnHi">21</td>
<td class="coverFnHi">76</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L261">_ZN8bayesnet9BoostAODE10trainModelERKN2at6TensorE</a></td>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L57">_ZN8bayesnet9BoostAODE18setHyperparametersERKN8nlohmann16json_abi_v3_11_310basic_jsonISt3mapSt6vectorNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEblmdSaNS2_14adl_serializerES5_IhSaIhEEvEE</a></td>
<td class="coverFnHi">21</td>
<td class="coverFnHi">187</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L17">_ZN8bayesnet9BoostAODEC2Eb</a></td>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L26">_ZN8bayesnet9BoostAODE10buildModelERKN2at6TensorE</a></td>
<td class="coverFnHi">41</td>
<td class="coverFnHi">190</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L105">_ZN8bayesnet14update_weightsERN2at6TensorES2_S2_</a></td>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L266">_ZN8bayesnet9BoostAODE10trainModelERKN2at6TensorE</a></td>
<td class="coverFnHi">108</td>
<td class="coverFnHi">190</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L301">_ZZN8bayesnet9BoostAODE10trainModelERKN2at6TensorEENKUlT_E_clIiEEDaS5_</a></td>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L18">_ZN8bayesnet9BoostAODEC2Eb</a></td>
<td class="coverFnHi">2691</td>
<td class="coverFnHi">345</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L110">_ZN8bayesnet14update_weightsERN2at6TensorES2_S2_</a></td>
<td class="coverFnHi">1025</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L313">_ZZN8bayesnet9BoostAODE10trainModelERKN2at6TensorEENKUlT_E_clIiEEDaS5_</a></td>
<td class="coverFnHi">27637</td>
</tr>

View File

@ -31,13 +31,13 @@
<td class="headerValue">coverage.info</td>
<td></td>
<td class="headerItem">Lines:</td>
<td class="headerCovTableEntryHi">99.1&nbsp;%</td>
<td class="headerCovTableEntry">218</td>
<td class="headerCovTableEntry">216</td>
<td class="headerCovTableEntryHi">98.3&nbsp;%</td>
<td class="headerCovTableEntry">237</td>
<td class="headerCovTableEntry">233</td>
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -63,65 +63,65 @@
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L105">_ZN8bayesnet14update_weightsERN2at6TensorES2_S2_</a></td>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L110">_ZN8bayesnet14update_weightsERN2at6TensorES2_S2_</a></td>
<td class="coverFnHi">108</td>
<td class="coverFnHi">1025</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L25">_ZN8bayesnet9BoostAODE10buildModelERKN2at6TensorE</a></td>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L26">_ZN8bayesnet9BoostAODE10buildModelERKN2at6TensorE</a></td>
<td class="coverFnHi">21</td>
<td class="coverFnHi">190</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L261">_ZN8bayesnet9BoostAODE10trainModelERKN2at6TensorE</a></td>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L266">_ZN8bayesnet9BoostAODE10trainModelERKN2at6TensorE</a></td>
<td class="coverFnHi">21</td>
<td class="coverFnHi">190</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L228">_ZN8bayesnet9BoostAODE16initializeModelsEv</a></td>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L233">_ZN8bayesnet9BoostAODE16initializeModelsEv</a></td>
<td class="coverFnHi">76</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L57">_ZN8bayesnet9BoostAODE18setHyperparametersERKN8nlohmann16json_abi_v3_11_310basic_jsonISt3mapSt6vectorNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEblmdSaNS2_14adl_serializerES5_IhSaIhEEvEE</a></td>
<td class="coverFnHi">187</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L137">_ZN8bayesnet9BoostAODE20update_weights_blockEiRN2at6TensorES3_</a></td>
<td class="coverFnHi">40</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L18">_ZN8bayesnet9BoostAODEC2Eb</a></td>
<td class="coverFnHi">345</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L390">_ZNK8bayesnet9BoostAODE5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">8</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L56">_ZN8bayesnet9BoostAODE18setHyperparametersERKN8nlohmann16json_abi_v3_11_310basic_jsonISt3mapSt6vectorNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEblmdSaNS2_14adl_serializerES5_IhSaIhEEvEE</a></td>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L313">_ZZN8bayesnet9BoostAODE10trainModelERKN2at6TensorEENKUlT_E_clIiEEDaS5_</a></td>
<td class="coverFnHi">20</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L132">_ZN8bayesnet9BoostAODE20update_weights_blockEiRN2at6TensorES3_</a></td>
<td class="coverFnHi">5</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L17">_ZN8bayesnet9BoostAODEC2Eb</a></td>
<td class="coverFnHi">41</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L367">_ZNK8bayesnet9BoostAODE5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">1</td>
</tr>
<tr>
<td class="coverFn"><a href="BoostAODE.cc.gcov.html#L301">_ZZN8bayesnet9BoostAODE10trainModelERKN2at6TensorEENKUlT_E_clIiEEDaS5_</a></td>
<td class="coverFnHi">2691</td>
<td class="coverFnHi">27637</td>
</tr>

View File

@ -31,13 +31,13 @@
<td class="headerValue">coverage.info</td>
<td></td>
<td class="headerItem">Lines:</td>
<td class="headerCovTableEntryHi">99.1&nbsp;%</td>
<td class="headerCovTableEntry">218</td>
<td class="headerCovTableEntry">216</td>
<td class="headerCovTableEntryHi">98.3&nbsp;%</td>
<td class="headerCovTableEntry">237</td>
<td class="headerCovTableEntry">233</td>
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -75,364 +75,387 @@
<span id="L13"><span class="lineNum"> 13</span> : #include &quot;bayesnet/feature_selection/FCBF.h&quot;</span>
<span id="L14"><span class="lineNum"> 14</span> : #include &quot;bayesnet/feature_selection/IWSS.h&quot;</span>
<span id="L15"><span class="lineNum"> 15</span> : #include &quot;BoostAODE.h&quot;</span>
<span id="L16"><span class="lineNum"> 16</span> : </span>
<span id="L17"><span class="lineNum"> 17</span> : namespace bayesnet {</span>
<span id="L18"><span class="lineNum"> 18</span> : </span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC tlaBgGNC"> 41 : BoostAODE::BoostAODE(bool predict_voting) : Ensemble(predict_voting)</span></span>
<span id="L20"><span class="lineNum"> 20</span> : {</span>
<span id="L21"><span class="lineNum"> 21</span> <span class="tlaGNC"> 410 : validHyperparameters = {</span></span>
<span id="L22"><span class="lineNum"> 22</span> : &quot;maxModels&quot;, &quot;bisection&quot;, &quot;order&quot;, &quot;convergence&quot;, &quot;threshold&quot;,</span>
<span id="L23"><span class="lineNum"> 23</span> : &quot;select_features&quot;, &quot;maxTolerance&quot;, &quot;predict_voting&quot;, &quot;block_update&quot;</span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 410 : };</span></span>
<span id="L25"><span class="lineNum"> 25</span> : </span>
<span id="L26"><span class="lineNum"> 26</span> <span class="tlaGNC"> 123 : }</span></span>
<span id="L27"><span class="lineNum"> 27</span> <span class="tlaGNC"> 21 : void BoostAODE::buildModel(const torch::Tensor&amp; weights)</span></span>
<span id="L28"><span class="lineNum"> 28</span> : {</span>
<span id="L29"><span class="lineNum"> 29</span> : // Models shall be built in trainModel</span>
<span id="L30"><span class="lineNum"> 30</span> <span class="tlaGNC"> 21 : models.clear();</span></span>
<span id="L31"><span class="lineNum"> 31</span> <span class="tlaGNC"> 21 : significanceModels.clear();</span></span>
<span id="L32"><span class="lineNum"> 32</span> <span class="tlaGNC"> 21 : n_models = 0;</span></span>
<span id="L33"><span class="lineNum"> 33</span> : // Prepare the validation dataset</span>
<span id="L34"><span class="lineNum"> 34</span> <span class="tlaGNC"> 63 : auto y_ = dataset.index({ -1, &quot;...&quot; });</span></span>
<span id="L35"><span class="lineNum"> 35</span> <span class="tlaGNC"> 21 : if (convergence) {</span></span>
<span id="L36"><span class="lineNum"> 36</span> : // Prepare train &amp; validation sets from train data</span>
<span id="L37"><span class="lineNum"> 37</span> <span class="tlaGNC"> 17 : auto fold = folding::StratifiedKFold(5, y_, 271);</span></span>
<span id="L38"><span class="lineNum"> 38</span> <span class="tlaGNC"> 17 : auto [train, test] = fold.getFold(0);</span></span>
<span id="L39"><span class="lineNum"> 39</span> <span class="tlaGNC"> 17 : auto train_t = torch::tensor(train);</span></span>
<span id="L40"><span class="lineNum"> 40</span> <span class="tlaGNC"> 17 : auto test_t = torch::tensor(test);</span></span>
<span id="L41"><span class="lineNum"> 41</span> : // Get train and validation sets</span>
<span id="L42"><span class="lineNum"> 42</span> <span class="tlaGNC"> 85 : X_train = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), train_t });</span></span>
<span id="L43"><span class="lineNum"> 43</span> <span class="tlaGNC"> 51 : y_train = dataset.index({ -1, train_t });</span></span>
<span id="L44"><span class="lineNum"> 44</span> <span class="tlaGNC"> 85 : X_test = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), test_t });</span></span>
<span id="L45"><span class="lineNum"> 45</span> <span class="tlaGNC"> 51 : y_test = dataset.index({ -1, test_t });</span></span>
<span id="L46"><span class="lineNum"> 46</span> <span class="tlaGNC"> 17 : dataset = X_train;</span></span>
<span id="L47"><span class="lineNum"> 47</span> <span class="tlaGNC"> 17 : m = X_train.size(1);</span></span>
<span id="L48"><span class="lineNum"> 48</span> <span class="tlaGNC"> 17 : auto n_classes = states.at(className).size();</span></span>
<span id="L49"><span class="lineNum"> 49</span> : // Build dataset with train data</span>
<span id="L50"><span class="lineNum"> 50</span> <span class="tlaGNC"> 17 : buildDataset(y_train);</span></span>
<span id="L51"><span class="lineNum"> 51</span> <span class="tlaGNC"> 17 : metrics = Metrics(dataset, features, className, n_classes);</span></span>
<span id="L52"><span class="lineNum"> 52</span> <span class="tlaGNC"> 17 : } else {</span></span>
<span id="L53"><span class="lineNum"> 53</span> : // Use all data to train</span>
<span id="L54"><span class="lineNum"> 54</span> <span class="tlaGNC"> 16 : X_train = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), &quot;...&quot; });</span></span>
<span id="L55"><span class="lineNum"> 55</span> <span class="tlaGNC"> 4 : y_train = y_;</span></span>
<span id="L56"><span class="lineNum"> 56</span> : }</span>
<span id="L57"><span class="lineNum"> 57</span> <span class="tlaGNC"> 203 : }</span></span>
<span id="L58"><span class="lineNum"> 58</span> <span class="tlaGNC"> 20 : void BoostAODE::setHyperparameters(const nlohmann::json&amp; hyperparameters_)</span></span>
<span id="L59"><span class="lineNum"> 59</span> : {</span>
<span id="L60"><span class="lineNum"> 60</span> <span class="tlaGNC"> 20 : auto hyperparameters = hyperparameters_;</span></span>
<span id="L61"><span class="lineNum"> 61</span> <span class="tlaGNC"> 20 : if (hyperparameters.contains(&quot;order&quot;)) {</span></span>
<span id="L62"><span class="lineNum"> 62</span> <span class="tlaGNC"> 25 : std::vector&lt;std::string&gt; algos = { Orders.ASC, Orders.DESC, Orders.RAND };</span></span>
<span id="L63"><span class="lineNum"> 63</span> <span class="tlaGNC"> 5 : order_algorithm = hyperparameters[&quot;order&quot;];</span></span>
<span id="L64"><span class="lineNum"> 64</span> <span class="tlaGNC"> 5 : if (std::find(algos.begin(), algos.end(), order_algorithm) == algos.end()) {</span></span>
<span id="L65"><span class="lineNum"> 65</span> <span class="tlaGNC"> 1 : throw std::invalid_argument(&quot;Invalid order algorithm, valid values [&quot; + Orders.ASC + &quot;, &quot; + Orders.DESC + &quot;, &quot; + Orders.RAND + &quot;]&quot;);</span></span>
<span id="L66"><span class="lineNum"> 66</span> : }</span>
<span id="L67"><span class="lineNum"> 67</span> <span class="tlaGNC"> 4 : hyperparameters.erase(&quot;order&quot;);</span></span>
<span id="L68"><span class="lineNum"> 68</span> <span class="tlaGNC"> 5 : }</span></span>
<span id="L69"><span class="lineNum"> 69</span> <span class="tlaGNC"> 19 : if (hyperparameters.contains(&quot;convergence&quot;)) {</span></span>
<span id="L70"><span class="lineNum"> 70</span> <span class="tlaGNC"> 7 : convergence = hyperparameters[&quot;convergence&quot;];</span></span>
<span id="L71"><span class="lineNum"> 71</span> <span class="tlaGNC"> 7 : hyperparameters.erase(&quot;convergence&quot;);</span></span>
<span id="L72"><span class="lineNum"> 72</span> : }</span>
<span id="L73"><span class="lineNum"> 73</span> <span class="tlaGNC"> 19 : if (hyperparameters.contains(&quot;bisection&quot;)) {</span></span>
<span id="L74"><span class="lineNum"> 74</span> <span class="tlaGNC"> 6 : bisection = hyperparameters[&quot;bisection&quot;];</span></span>
<span id="L75"><span class="lineNum"> 75</span> <span class="tlaGNC"> 6 : hyperparameters.erase(&quot;bisection&quot;);</span></span>
<span id="L76"><span class="lineNum"> 76</span> : }</span>
<span id="L77"><span class="lineNum"> 77</span> <span class="tlaGNC"> 19 : if (hyperparameters.contains(&quot;threshold&quot;)) {</span></span>
<span id="L78"><span class="lineNum"> 78</span> <span class="tlaGNC"> 6 : threshold = hyperparameters[&quot;threshold&quot;];</span></span>
<span id="L79"><span class="lineNum"> 79</span> <span class="tlaGNC"> 6 : hyperparameters.erase(&quot;threshold&quot;);</span></span>
<span id="L80"><span class="lineNum"> 80</span> : }</span>
<span id="L81"><span class="lineNum"> 81</span> <span class="tlaGNC"> 19 : if (hyperparameters.contains(&quot;maxTolerance&quot;)) {</span></span>
<span id="L82"><span class="lineNum"> 82</span> <span class="tlaGNC"> 9 : maxTolerance = hyperparameters[&quot;maxTolerance&quot;];</span></span>
<span id="L83"><span class="lineNum"> 83</span> <span class="tlaGNC"> 9 : if (maxTolerance &lt; 1 || maxTolerance &gt; 4)</span></span>
<span id="L84"><span class="lineNum"> 84</span> <span class="tlaGNC"> 3 : throw std::invalid_argument(&quot;Invalid maxTolerance value, must be greater in [1, 4]&quot;);</span></span>
<span id="L85"><span class="lineNum"> 85</span> <span class="tlaGNC"> 6 : hyperparameters.erase(&quot;maxTolerance&quot;);</span></span>
<span id="L86"><span class="lineNum"> 86</span> : }</span>
<span id="L87"><span class="lineNum"> 87</span> <span class="tlaGNC"> 16 : if (hyperparameters.contains(&quot;predict_voting&quot;)) {</span></span>
<span id="L88"><span class="lineNum"> 88</span> <span class="tlaGNC"> 1 : predict_voting = hyperparameters[&quot;predict_voting&quot;];</span></span>
<span id="L89"><span class="lineNum"> 89</span> <span class="tlaGNC"> 1 : hyperparameters.erase(&quot;predict_voting&quot;);</span></span>
<span id="L90"><span class="lineNum"> 90</span> : }</span>
<span id="L91"><span class="lineNum"> 91</span> <span class="tlaGNC"> 16 : if (hyperparameters.contains(&quot;select_features&quot;)) {</span></span>
<span id="L92"><span class="lineNum"> 92</span> <span class="tlaGNC"> 9 : auto selectedAlgorithm = hyperparameters[&quot;select_features&quot;];</span></span>
<span id="L93"><span class="lineNum"> 93</span> <span class="tlaGNC"> 45 : std::vector&lt;std::string&gt; algos = { SelectFeatures.IWSS, SelectFeatures.CFS, SelectFeatures.FCBF };</span></span>
<span id="L94"><span class="lineNum"> 94</span> <span class="tlaGNC"> 9 : selectFeatures = true;</span></span>
<span id="L95"><span class="lineNum"> 95</span> <span class="tlaGNC"> 9 : select_features_algorithm = selectedAlgorithm;</span></span>
<span id="L96"><span class="lineNum"> 96</span> <span class="tlaGNC"> 9 : if (std::find(algos.begin(), algos.end(), selectedAlgorithm) == algos.end()) {</span></span>
<span id="L97"><span class="lineNum"> 97</span> <span class="tlaGNC"> 1 : throw std::invalid_argument(&quot;Invalid selectFeatures value, valid values [&quot; + SelectFeatures.IWSS + &quot;, &quot; + SelectFeatures.CFS + &quot;, &quot; + SelectFeatures.FCBF + &quot;]&quot;);</span></span>
<span id="L98"><span class="lineNum"> 98</span> : }</span>
<span id="L99"><span class="lineNum"> 99</span> <span class="tlaGNC"> 8 : hyperparameters.erase(&quot;select_features&quot;);</span></span>
<span id="L100"><span class="lineNum"> 100</span> <span class="tlaGNC"> 10 : }</span></span>
<span id="L101"><span class="lineNum"> 101</span> <span class="tlaGNC"> 15 : if (hyperparameters.contains(&quot;block_update&quot;)) {</span></span>
<span id="L102"><span class="lineNum"> 102</span> <span class="tlaGNC"> 2 : block_update = hyperparameters[&quot;block_update&quot;];</span></span>
<span id="L103"><span class="lineNum"> 103</span> <span class="tlaGNC"> 2 : hyperparameters.erase(&quot;block_update&quot;);</span></span>
<span id="L104"><span class="lineNum"> 104</span> : }</span>
<span id="L105"><span class="lineNum"> 105</span> <span class="tlaGNC"> 15 : Classifier::setHyperparameters(hyperparameters);</span></span>
<span id="L106"><span class="lineNum"> 106</span> <span class="tlaGNC"> 34 : }</span></span>
<span id="L107"><span class="lineNum"> 107</span> <span class="tlaGNC"> 108 : std::tuple&lt;torch::Tensor&amp;, double, bool&gt; update_weights(torch::Tensor&amp; ytrain, torch::Tensor&amp; ypred, torch::Tensor&amp; weights)</span></span>
<span id="L108"><span class="lineNum"> 108</span> : {</span>
<span id="L109"><span class="lineNum"> 109</span> <span class="tlaGNC"> 108 : bool terminate = false;</span></span>
<span id="L110"><span class="lineNum"> 110</span> <span class="tlaGNC"> 108 : double alpha_t = 0;</span></span>
<span id="L111"><span class="lineNum"> 111</span> <span class="tlaGNC"> 108 : auto mask_wrong = ypred != ytrain;</span></span>
<span id="L112"><span class="lineNum"> 112</span> <span class="tlaGNC"> 108 : auto mask_right = ypred == ytrain;</span></span>
<span id="L113"><span class="lineNum"> 113</span> <span class="tlaGNC"> 108 : auto masked_weights = weights * mask_wrong.to(weights.dtype());</span></span>
<span id="L114"><span class="lineNum"> 114</span> <span class="tlaGNC"> 108 : double epsilon_t = masked_weights.sum().item&lt;double&gt;();</span></span>
<span id="L115"><span class="lineNum"> 115</span> <span class="tlaGNC"> 108 : if (epsilon_t &gt; 0.5) {</span></span>
<span id="L116"><span class="lineNum"> 116</span> : // Inverse the weights policy (plot ln(wt))</span>
<span id="L117"><span class="lineNum"> 117</span> : // &quot;In each round of AdaBoost, there is a sanity check to ensure that the current base </span>
<span id="L118"><span class="lineNum"> 118</span> : // learner is better than random guess&quot; (Zhi-Hua Zhou, 2012)</span>
<span id="L119"><span class="lineNum"> 119</span> <span class="tlaGNC"> 4 : terminate = true;</span></span>
<span id="L120"><span class="lineNum"> 120</span> : } else {</span>
<span id="L121"><span class="lineNum"> 121</span> <span class="tlaGNC"> 104 : double wt = (1 - epsilon_t) / epsilon_t;</span></span>
<span id="L122"><span class="lineNum"> 122</span> <span class="tlaGNC"> 104 : alpha_t = epsilon_t == 0 ? 1 : 0.5 * log(wt);</span></span>
<span id="L123"><span class="lineNum"> 123</span> : // Step 3.2: Update weights for next classifier</span>
<span id="L124"><span class="lineNum"> 124</span> : // Step 3.2.1: Update weights of wrong samples</span>
<span id="L125"><span class="lineNum"> 125</span> <span class="tlaGNC"> 104 : weights += mask_wrong.to(weights.dtype()) * exp(alpha_t) * weights;</span></span>
<span id="L126"><span class="lineNum"> 126</span> : // Step 3.2.2: Update weights of right samples</span>
<span id="L127"><span class="lineNum"> 127</span> <span class="tlaGNC"> 104 : weights += mask_right.to(weights.dtype()) * exp(-alpha_t) * weights;</span></span>
<span id="L128"><span class="lineNum"> 128</span> : // Step 3.3: Normalise the weights</span>
<span id="L129"><span class="lineNum"> 129</span> <span class="tlaGNC"> 104 : double totalWeights = torch::sum(weights).item&lt;double&gt;();</span></span>
<span id="L130"><span class="lineNum"> 130</span> <span class="tlaGNC"> 104 : weights = weights / totalWeights;</span></span>
<span id="L131"><span class="lineNum"> 131</span> : }</span>
<span id="L132"><span class="lineNum"> 132</span> <span class="tlaGNC"> 216 : return { weights, alpha_t, terminate };</span></span>
<span id="L133"><span class="lineNum"> 133</span> <span class="tlaGNC"> 108 : }</span></span>
<span id="L134"><span class="lineNum"> 134</span> <span class="tlaGNC"> 5 : std::tuple&lt;torch::Tensor&amp;, double, bool&gt; BoostAODE::update_weights_block(int k, torch::Tensor&amp; ytrain, torch::Tensor&amp; weights)</span></span>
<span id="L135"><span class="lineNum"> 135</span> : {</span>
<span id="L136"><span class="lineNum"> 136</span> : /* Update Block algorithm</span>
<span id="L137"><span class="lineNum"> 137</span> : k = # of models in block</span>
<span id="L138"><span class="lineNum"> 138</span> : n_models = # of models in ensemble to make predictions</span>
<span id="L139"><span class="lineNum"> 139</span> : n_models_bak = # models saved</span>
<span id="L140"><span class="lineNum"> 140</span> : models = vector of models to make predictions</span>
<span id="L141"><span class="lineNum"> 141</span> : models_bak = models not used to make predictions</span>
<span id="L142"><span class="lineNum"> 142</span> : significances_bak = backup of significances vector</span>
<span id="L143"><span class="lineNum"> 143</span> : </span>
<span id="L144"><span class="lineNum"> 144</span> : Case list</span>
<span id="L145"><span class="lineNum"> 145</span> : A) k = 1, n_models = 1 =&gt; n = 0 , n_models = n + k</span>
<span id="L146"><span class="lineNum"> 146</span> : B) k = 1, n_models = n + 1 =&gt; n_models = n + k</span>
<span id="L147"><span class="lineNum"> 147</span> : C) k &gt; 1, n_models = k + 1 =&gt; n= 1, n_models = n + k</span>
<span id="L148"><span class="lineNum"> 148</span> : D) k &gt; 1, n_models = k =&gt; n = 0, n_models = n + k</span>
<span id="L149"><span class="lineNum"> 149</span> : E) k &gt; 1, n_models = k + n =&gt; n_models = n + k</span>
<span id="L150"><span class="lineNum"> 150</span> : </span>
<span id="L151"><span class="lineNum"> 151</span> : A, D) n=0, k &gt; 0, n_models == k</span>
<span id="L152"><span class="lineNum"> 152</span> : 1. n_models_bak &lt;- n_models</span>
<span id="L153"><span class="lineNum"> 153</span> : 2. significances_bak &lt;- significances</span>
<span id="L154"><span class="lineNum"> 154</span> : 3. significances = vector(k, 1)</span>
<span id="L155"><span class="lineNum"> 155</span> : 4. Dont move any classifiers out of models</span>
<span id="L156"><span class="lineNum"> 156</span> : 5. n_models &lt;- k</span>
<span id="L157"><span class="lineNum"> 157</span> : 6. Make prediction, compute alpha, update weights</span>
<span id="L158"><span class="lineNum"> 158</span> : 7. Dont restore any classifiers to models</span>
<span id="L159"><span class="lineNum"> 159</span> : 8. significances &lt;- significances_bak</span>
<span id="L160"><span class="lineNum"> 160</span> : 9. Update last k significances</span>
<span id="L161"><span class="lineNum"> 161</span> : 10. n_models &lt;- n_models_bak</span>
<span id="L162"><span class="lineNum"> 162</span> : </span>
<span id="L163"><span class="lineNum"> 163</span> : B, C, E) n &gt; 0, k &gt; 0, n_models == n + k</span>
<span id="L164"><span class="lineNum"> 164</span> : 1. n_models_bak &lt;- n_models</span>
<span id="L165"><span class="lineNum"> 165</span> : 2. significances_bak &lt;- significances</span>
<span id="L166"><span class="lineNum"> 166</span> : 3. significances = vector(k, 1)</span>
<span id="L167"><span class="lineNum"> 167</span> : 4. Move first n classifiers to models_bak</span>
<span id="L168"><span class="lineNum"> 168</span> : 5. n_models &lt;- k</span>
<span id="L169"><span class="lineNum"> 169</span> : 6. Make prediction, compute alpha, update weights</span>
<span id="L170"><span class="lineNum"> 170</span> : 7. Insert classifiers in models_bak to be the first n models</span>
<span id="L171"><span class="lineNum"> 171</span> : 8. significances &lt;- significances_bak</span>
<span id="L172"><span class="lineNum"> 172</span> : 9. Update last k significances</span>
<span id="L173"><span class="lineNum"> 173</span> : 10. n_models &lt;- n_models_bak</span>
<span id="L174"><span class="lineNum"> 174</span> : */</span>
<span id="L175"><span class="lineNum"> 175</span> : //</span>
<span id="L176"><span class="lineNum"> 176</span> : // Make predict with only the last k models</span>
<span id="L177"><span class="lineNum"> 177</span> : //</span>
<span id="L178"><span class="lineNum"> 178</span> <span class="tlaGNC"> 5 : std::unique_ptr&lt;Classifier&gt; model;</span></span>
<span id="L179"><span class="lineNum"> 179</span> <span class="tlaGNC"> 5 : std::vector&lt;std::unique_ptr&lt;Classifier&gt;&gt; models_bak;</span></span>
<span id="L180"><span class="lineNum"> 180</span> : // 1. n_models_bak &lt;- n_models 2. significances_bak &lt;- significances</span>
<span id="L181"><span class="lineNum"> 181</span> <span class="tlaGNC"> 5 : auto significance_bak = significanceModels;</span></span>
<span id="L182"><span class="lineNum"> 182</span> <span class="tlaGNC"> 5 : auto n_models_bak = n_models;</span></span>
<span id="L183"><span class="lineNum"> 183</span> : // 3. significances = vector(k, 1)</span>
<span id="L184"><span class="lineNum"> 184</span> <span class="tlaGNC"> 5 : significanceModels = std::vector&lt;double&gt;(k, 1.0);</span></span>
<span id="L185"><span class="lineNum"> 185</span> : // 4. Move first n classifiers to models_bak</span>
<span id="L186"><span class="lineNum"> 186</span> : // backup the first n_models - k models (if n_models == k, don't backup any)</span>
<span id="L187"><span class="lineNum"> 187</span> <span class="tlaGNC"> 20 : for (int i = 0; i &lt; n_models - k; ++i) {</span></span>
<span id="L188"><span class="lineNum"> 188</span> <span class="tlaGNC"> 15 : model = std::move(models[0]);</span></span>
<span id="L189"><span class="lineNum"> 189</span> <span class="tlaGNC"> 15 : models.erase(models.begin());</span></span>
<span id="L190"><span class="lineNum"> 190</span> <span class="tlaGNC"> 15 : models_bak.push_back(std::move(model));</span></span>
<span id="L191"><span class="lineNum"> 191</span> : }</span>
<span id="L192"><span class="lineNum"> 192</span> <span class="tlaGNC"> 5 : assert(models.size() == k);</span></span>
<span id="L193"><span class="lineNum"> 193</span> : // 5. n_models &lt;- k</span>
<span id="L194"><span class="lineNum"> 194</span> <span class="tlaGNC"> 5 : n_models = k;</span></span>
<span id="L195"><span class="lineNum"> 195</span> : // 6. Make prediction, compute alpha, update weights</span>
<span id="L196"><span class="lineNum"> 196</span> <span class="tlaGNC"> 5 : auto ypred = predict(X_train);</span></span>
<span id="L197"><span class="lineNum"> 197</span> : //</span>
<span id="L198"><span class="lineNum"> 198</span> : // Update weights</span>
<span id="L199"><span class="lineNum"> 199</span> : //</span>
<span id="L200"><span class="lineNum"> 200</span> : double alpha_t;</span>
<span id="L201"><span class="lineNum"> 201</span> : bool terminate;</span>
<span id="L202"><span class="lineNum"> 202</span> <span class="tlaGNC"> 5 : std::tie(weights, alpha_t, terminate) = update_weights(y_train, ypred, weights);</span></span>
<span id="L203"><span class="lineNum"> 203</span> : //</span>
<span id="L204"><span class="lineNum"> 204</span> : // Restore the models if needed</span>
<span id="L205"><span class="lineNum"> 205</span> : //</span>
<span id="L206"><span class="lineNum"> 206</span> : // 7. Insert classifiers in models_bak to be the first n models</span>
<span id="L207"><span class="lineNum"> 207</span> : // if n_models_bak == k, don't restore any, because none of them were moved</span>
<span id="L208"><span class="lineNum"> 208</span> <span class="tlaGNC"> 5 : if (k != n_models_bak) {</span></span>
<span id="L209"><span class="lineNum"> 209</span> : // Insert in the same order as they were extracted</span>
<span id="L210"><span class="lineNum"> 210</span> <span class="tlaGNC"> 4 : int bak_size = models_bak.size();</span></span>
<span id="L211"><span class="lineNum"> 211</span> <span class="tlaGNC"> 19 : for (int i = 0; i &lt; bak_size; ++i) {</span></span>
<span id="L212"><span class="lineNum"> 212</span> <span class="tlaGNC"> 15 : model = std::move(models_bak[bak_size - 1 - i]);</span></span>
<span id="L213"><span class="lineNum"> 213</span> <span class="tlaGNC"> 15 : models_bak.erase(models_bak.end() - 1);</span></span>
<span id="L214"><span class="lineNum"> 214</span> <span class="tlaGNC"> 15 : models.insert(models.begin(), std::move(model));</span></span>
<span id="L215"><span class="lineNum"> 215</span> : }</span>
<span id="L216"><span class="lineNum"> 216</span> : }</span>
<span id="L217"><span class="lineNum"> 217</span> : // 8. significances &lt;- significances_bak</span>
<span id="L218"><span class="lineNum"> 218</span> <span class="tlaGNC"> 5 : significanceModels = significance_bak;</span></span>
<span id="L219"><span class="lineNum"> 219</span> : //</span>
<span id="L220"><span class="lineNum"> 220</span> : // Update the significance of the last k models</span>
<span id="L221"><span class="lineNum"> 221</span> : //</span>
<span id="L222"><span class="lineNum"> 222</span> : // 9. Update last k significances</span>
<span id="L223"><span class="lineNum"> 223</span> <span class="tlaGNC"> 21 : for (int i = 0; i &lt; k; ++i) {</span></span>
<span id="L224"><span class="lineNum"> 224</span> <span class="tlaGNC"> 16 : significanceModels[n_models_bak - k + i] = alpha_t;</span></span>
<span id="L225"><span class="lineNum"> 225</span> : }</span>
<span id="L226"><span class="lineNum"> 226</span> : // 10. n_models &lt;- n_models_bak</span>
<span id="L227"><span class="lineNum"> 227</span> <span class="tlaGNC"> 5 : n_models = n_models_bak;</span></span>
<span id="L228"><span class="lineNum"> 228</span> <span class="tlaGNC"> 10 : return { weights, alpha_t, terminate };</span></span>
<span id="L229"><span class="lineNum"> 229</span> <span class="tlaGNC"> 5 : }</span></span>
<span id="L230"><span class="lineNum"> 230</span> <span class="tlaGNC"> 8 : std::vector&lt;int&gt; BoostAODE::initializeModels()</span></span>
<span id="L231"><span class="lineNum"> 231</span> : {</span>
<span id="L232"><span class="lineNum"> 232</span> <span class="tlaGNC"> 8 : std::vector&lt;int&gt; featuresUsed;</span></span>
<span id="L233"><span class="lineNum"> 233</span> <span class="tlaGNC"> 8 : torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);</span></span>
<span id="L234"><span class="lineNum"> 234</span> <span class="tlaGNC"> 8 : int maxFeatures = 0;</span></span>
<span id="L235"><span class="lineNum"> 235</span> <span class="tlaGNC"> 8 : if (select_features_algorithm == SelectFeatures.CFS) {</span></span>
<span id="L236"><span class="lineNum"> 236</span> <span class="tlaGNC"> 2 : featureSelector = new CFS(dataset, features, className, maxFeatures, states.at(className).size(), weights_);</span></span>
<span id="L237"><span class="lineNum"> 237</span> <span class="tlaGNC"> 6 : } else if (select_features_algorithm == SelectFeatures.IWSS) {</span></span>
<span id="L238"><span class="lineNum"> 238</span> <span class="tlaGNC"> 3 : if (threshold &lt; 0 || threshold &gt;0.5) {</span></span>
<span id="L239"><span class="lineNum"> 239</span> <span class="tlaGNC"> 2 : throw std::invalid_argument(&quot;Invalid threshold value for &quot; + SelectFeatures.IWSS + &quot; [0, 0.5]&quot;);</span></span>
<span id="L240"><span class="lineNum"> 240</span> : }</span>
<span id="L241"><span class="lineNum"> 241</span> <span class="tlaGNC"> 1 : featureSelector = new IWSS(dataset, features, className, maxFeatures, states.at(className).size(), weights_, threshold);</span></span>
<span id="L242"><span class="lineNum"> 242</span> <span class="tlaGNC"> 3 : } else if (select_features_algorithm == SelectFeatures.FCBF) {</span></span>
<span id="L243"><span class="lineNum"> 243</span> <span class="tlaGNC"> 3 : if (threshold &lt; 1e-7 || threshold &gt; 1) {</span></span>
<span id="L244"><span class="lineNum"> 244</span> <span class="tlaGNC"> 2 : throw std::invalid_argument(&quot;Invalid threshold value for &quot; + SelectFeatures.FCBF + &quot; [1e-7, 1]&quot;);</span></span>
<span id="L16"><span class="lineNum"> 16</span> : #include &quot;lib/log/loguru.cpp&quot;</span>
<span id="L17"><span class="lineNum"> 17</span> : </span>
<span id="L18"><span class="lineNum"> 18</span> : namespace bayesnet {</span>
<span id="L19"><span class="lineNum"> 19</span> : </span>
<span id="L20"><span class="lineNum"> 20</span> <span class="tlaGNC tlaBgGNC"> 345 : BoostAODE::BoostAODE(bool predict_voting) : Ensemble(predict_voting)</span></span>
<span id="L21"><span class="lineNum"> 21</span> : {</span>
<span id="L22"><span class="lineNum"> 22</span> <span class="tlaGNC"> 3795 : validHyperparameters = {</span></span>
<span id="L23"><span class="lineNum"> 23</span> : &quot;maxModels&quot;, &quot;bisection&quot;, &quot;order&quot;, &quot;convergence&quot;, &quot;convergence_best&quot;, &quot;threshold&quot;,</span>
<span id="L24"><span class="lineNum"> 24</span> : &quot;select_features&quot;, &quot;maxTolerance&quot;, &quot;predict_voting&quot;, &quot;block_update&quot;</span>
<span id="L25"><span class="lineNum"> 25</span> <span class="tlaGNC"> 3795 : };</span></span>
<span id="L26"><span class="lineNum"> 26</span> : </span>
<span id="L27"><span class="lineNum"> 27</span> <span class="tlaGNC"> 1035 : }</span></span>
<span id="L28"><span class="lineNum"> 28</span> <span class="tlaGNC"> 190 : void BoostAODE::buildModel(const torch::Tensor&amp; weights)</span></span>
<span id="L29"><span class="lineNum"> 29</span> : {</span>
<span id="L30"><span class="lineNum"> 30</span> : // Models shall be built in trainModel</span>
<span id="L31"><span class="lineNum"> 31</span> <span class="tlaGNC"> 190 : models.clear();</span></span>
<span id="L32"><span class="lineNum"> 32</span> <span class="tlaGNC"> 190 : significanceModels.clear();</span></span>
<span id="L33"><span class="lineNum"> 33</span> <span class="tlaGNC"> 190 : n_models = 0;</span></span>
<span id="L34"><span class="lineNum"> 34</span> : // Prepare the validation dataset</span>
<span id="L35"><span class="lineNum"> 35</span> <span class="tlaGNC"> 570 : auto y_ = dataset.index({ -1, &quot;...&quot; });</span></span>
<span id="L36"><span class="lineNum"> 36</span> <span class="tlaGNC"> 190 : if (convergence) {</span></span>
<span id="L37"><span class="lineNum"> 37</span> : // Prepare train &amp; validation sets from train data</span>
<span id="L38"><span class="lineNum"> 38</span> <span class="tlaGNC"> 155 : auto fold = folding::StratifiedKFold(5, y_, 271);</span></span>
<span id="L39"><span class="lineNum"> 39</span> <span class="tlaGNC"> 155 : auto [train, test] = fold.getFold(0);</span></span>
<span id="L40"><span class="lineNum"> 40</span> <span class="tlaGNC"> 155 : auto train_t = torch::tensor(train);</span></span>
<span id="L41"><span class="lineNum"> 41</span> <span class="tlaGNC"> 155 : auto test_t = torch::tensor(test);</span></span>
<span id="L42"><span class="lineNum"> 42</span> : // Get train and validation sets</span>
<span id="L43"><span class="lineNum"> 43</span> <span class="tlaGNC"> 775 : X_train = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), train_t });</span></span>
<span id="L44"><span class="lineNum"> 44</span> <span class="tlaGNC"> 465 : y_train = dataset.index({ -1, train_t });</span></span>
<span id="L45"><span class="lineNum"> 45</span> <span class="tlaGNC"> 775 : X_test = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), test_t });</span></span>
<span id="L46"><span class="lineNum"> 46</span> <span class="tlaGNC"> 465 : y_test = dataset.index({ -1, test_t });</span></span>
<span id="L47"><span class="lineNum"> 47</span> <span class="tlaGNC"> 155 : dataset = X_train;</span></span>
<span id="L48"><span class="lineNum"> 48</span> <span class="tlaGNC"> 155 : m = X_train.size(1);</span></span>
<span id="L49"><span class="lineNum"> 49</span> <span class="tlaGNC"> 155 : auto n_classes = states.at(className).size();</span></span>
<span id="L50"><span class="lineNum"> 50</span> : // Build dataset with train data</span>
<span id="L51"><span class="lineNum"> 51</span> <span class="tlaGNC"> 155 : buildDataset(y_train);</span></span>
<span id="L52"><span class="lineNum"> 52</span> <span class="tlaGNC"> 155 : metrics = Metrics(dataset, features, className, n_classes);</span></span>
<span id="L53"><span class="lineNum"> 53</span> <span class="tlaGNC"> 155 : } else {</span></span>
<span id="L54"><span class="lineNum"> 54</span> : // Use all data to train</span>
<span id="L55"><span class="lineNum"> 55</span> <span class="tlaGNC"> 140 : X_train = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), &quot;...&quot; });</span></span>
<span id="L56"><span class="lineNum"> 56</span> <span class="tlaGNC"> 35 : y_train = y_;</span></span>
<span id="L57"><span class="lineNum"> 57</span> : }</span>
<span id="L58"><span class="lineNum"> 58</span> <span class="tlaGNC"> 1845 : }</span></span>
<span id="L59"><span class="lineNum"> 59</span> <span class="tlaGNC"> 187 : void BoostAODE::setHyperparameters(const nlohmann::json&amp; hyperparameters_)</span></span>
<span id="L60"><span class="lineNum"> 60</span> : {</span>
<span id="L61"><span class="lineNum"> 61</span> <span class="tlaGNC"> 187 : auto hyperparameters = hyperparameters_;</span></span>
<span id="L62"><span class="lineNum"> 62</span> <span class="tlaGNC"> 187 : if (hyperparameters.contains(&quot;order&quot;)) {</span></span>
<span id="L63"><span class="lineNum"> 63</span> <span class="tlaGNC"> 250 : std::vector&lt;std::string&gt; algos = { Orders.ASC, Orders.DESC, Orders.RAND };</span></span>
<span id="L64"><span class="lineNum"> 64</span> <span class="tlaGNC"> 50 : order_algorithm = hyperparameters[&quot;order&quot;];</span></span>
<span id="L65"><span class="lineNum"> 65</span> <span class="tlaGNC"> 50 : if (std::find(algos.begin(), algos.end(), order_algorithm) == algos.end()) {</span></span>
<span id="L66"><span class="lineNum"> 66</span> <span class="tlaGNC"> 9 : throw std::invalid_argument(&quot;Invalid order algorithm, valid values [&quot; + Orders.ASC + &quot;, &quot; + Orders.DESC + &quot;, &quot; + Orders.RAND + &quot;]&quot;);</span></span>
<span id="L67"><span class="lineNum"> 67</span> : }</span>
<span id="L68"><span class="lineNum"> 68</span> <span class="tlaGNC"> 41 : hyperparameters.erase(&quot;order&quot;);</span></span>
<span id="L69"><span class="lineNum"> 69</span> <span class="tlaGNC"> 50 : }</span></span>
<span id="L70"><span class="lineNum"> 70</span> <span class="tlaGNC"> 178 : if (hyperparameters.contains(&quot;convergence&quot;)) {</span></span>
<span id="L71"><span class="lineNum"> 71</span> <span class="tlaGNC"> 70 : convergence = hyperparameters[&quot;convergence&quot;];</span></span>
<span id="L72"><span class="lineNum"> 72</span> <span class="tlaGNC"> 70 : hyperparameters.erase(&quot;convergence&quot;);</span></span>
<span id="L73"><span class="lineNum"> 73</span> : }</span>
<span id="L74"><span class="lineNum"> 74</span> <span class="tlaGNC"> 178 : if (hyperparameters.contains(&quot;convergence_best&quot;)) {</span></span>
<span id="L75"><span class="lineNum"> 75</span> <span class="tlaGNC"> 18 : convergence_best = hyperparameters[&quot;convergence_best&quot;];</span></span>
<span id="L76"><span class="lineNum"> 76</span> <span class="tlaGNC"> 18 : hyperparameters.erase(&quot;convergence_best&quot;);</span></span>
<span id="L77"><span class="lineNum"> 77</span> : }</span>
<span id="L78"><span class="lineNum"> 78</span> <span class="tlaGNC"> 178 : if (hyperparameters.contains(&quot;bisection&quot;)) {</span></span>
<span id="L79"><span class="lineNum"> 79</span> <span class="tlaGNC"> 56 : bisection = hyperparameters[&quot;bisection&quot;];</span></span>
<span id="L80"><span class="lineNum"> 80</span> <span class="tlaGNC"> 56 : hyperparameters.erase(&quot;bisection&quot;);</span></span>
<span id="L81"><span class="lineNum"> 81</span> : }</span>
<span id="L82"><span class="lineNum"> 82</span> <span class="tlaGNC"> 178 : if (hyperparameters.contains(&quot;threshold&quot;)) {</span></span>
<span id="L83"><span class="lineNum"> 83</span> <span class="tlaGNC"> 56 : threshold = hyperparameters[&quot;threshold&quot;];</span></span>
<span id="L84"><span class="lineNum"> 84</span> <span class="tlaGNC"> 56 : hyperparameters.erase(&quot;threshold&quot;);</span></span>
<span id="L85"><span class="lineNum"> 85</span> : }</span>
<span id="L86"><span class="lineNum"> 86</span> <span class="tlaGNC"> 178 : if (hyperparameters.contains(&quot;maxTolerance&quot;)) {</span></span>
<span id="L87"><span class="lineNum"> 87</span> <span class="tlaGNC"> 83 : maxTolerance = hyperparameters[&quot;maxTolerance&quot;];</span></span>
<span id="L88"><span class="lineNum"> 88</span> <span class="tlaGNC"> 83 : if (maxTolerance &lt; 1 || maxTolerance &gt; 4)</span></span>
<span id="L89"><span class="lineNum"> 89</span> <span class="tlaGNC"> 27 : throw std::invalid_argument(&quot;Invalid maxTolerance value, must be greater in [1, 4]&quot;);</span></span>
<span id="L90"><span class="lineNum"> 90</span> <span class="tlaGNC"> 56 : hyperparameters.erase(&quot;maxTolerance&quot;);</span></span>
<span id="L91"><span class="lineNum"> 91</span> : }</span>
<span id="L92"><span class="lineNum"> 92</span> <span class="tlaGNC"> 151 : if (hyperparameters.contains(&quot;predict_voting&quot;)) {</span></span>
<span id="L93"><span class="lineNum"> 93</span> <span class="tlaGNC"> 9 : predict_voting = hyperparameters[&quot;predict_voting&quot;];</span></span>
<span id="L94"><span class="lineNum"> 94</span> <span class="tlaGNC"> 9 : hyperparameters.erase(&quot;predict_voting&quot;);</span></span>
<span id="L95"><span class="lineNum"> 95</span> : }</span>
<span id="L96"><span class="lineNum"> 96</span> <span class="tlaGNC"> 151 : if (hyperparameters.contains(&quot;select_features&quot;)) {</span></span>
<span id="L97"><span class="lineNum"> 97</span> <span class="tlaGNC"> 85 : auto selectedAlgorithm = hyperparameters[&quot;select_features&quot;];</span></span>
<span id="L98"><span class="lineNum"> 98</span> <span class="tlaGNC"> 425 : std::vector&lt;std::string&gt; algos = { SelectFeatures.IWSS, SelectFeatures.CFS, SelectFeatures.FCBF };</span></span>
<span id="L99"><span class="lineNum"> 99</span> <span class="tlaGNC"> 85 : selectFeatures = true;</span></span>
<span id="L100"><span class="lineNum"> 100</span> <span class="tlaGNC"> 85 : select_features_algorithm = selectedAlgorithm;</span></span>
<span id="L101"><span class="lineNum"> 101</span> <span class="tlaGNC"> 85 : if (std::find(algos.begin(), algos.end(), selectedAlgorithm) == algos.end()) {</span></span>
<span id="L102"><span class="lineNum"> 102</span> <span class="tlaGNC"> 9 : throw std::invalid_argument(&quot;Invalid selectFeatures value, valid values [&quot; + SelectFeatures.IWSS + &quot;, &quot; + SelectFeatures.CFS + &quot;, &quot; + SelectFeatures.FCBF + &quot;]&quot;);</span></span>
<span id="L103"><span class="lineNum"> 103</span> : }</span>
<span id="L104"><span class="lineNum"> 104</span> <span class="tlaGNC"> 76 : hyperparameters.erase(&quot;select_features&quot;);</span></span>
<span id="L105"><span class="lineNum"> 105</span> <span class="tlaGNC"> 94 : }</span></span>
<span id="L106"><span class="lineNum"> 106</span> <span class="tlaGNC"> 142 : if (hyperparameters.contains(&quot;block_update&quot;)) {</span></span>
<span id="L107"><span class="lineNum"> 107</span> <span class="tlaGNC"> 16 : block_update = hyperparameters[&quot;block_update&quot;];</span></span>
<span id="L108"><span class="lineNum"> 108</span> <span class="tlaGNC"> 16 : hyperparameters.erase(&quot;block_update&quot;);</span></span>
<span id="L109"><span class="lineNum"> 109</span> : }</span>
<span id="L110"><span class="lineNum"> 110</span> <span class="tlaGNC"> 142 : Classifier::setHyperparameters(hyperparameters);</span></span>
<span id="L111"><span class="lineNum"> 111</span> <span class="tlaGNC"> 322 : }</span></span>
<span id="L112"><span class="lineNum"> 112</span> <span class="tlaGNC"> 1025 : std::tuple&lt;torch::Tensor&amp;, double, bool&gt; update_weights(torch::Tensor&amp; ytrain, torch::Tensor&amp; ypred, torch::Tensor&amp; weights)</span></span>
<span id="L113"><span class="lineNum"> 113</span> : {</span>
<span id="L114"><span class="lineNum"> 114</span> <span class="tlaGNC"> 1025 : bool terminate = false;</span></span>
<span id="L115"><span class="lineNum"> 115</span> <span class="tlaGNC"> 1025 : double alpha_t = 0;</span></span>
<span id="L116"><span class="lineNum"> 116</span> <span class="tlaGNC"> 1025 : auto mask_wrong = ypred != ytrain;</span></span>
<span id="L117"><span class="lineNum"> 117</span> <span class="tlaGNC"> 1025 : auto mask_right = ypred == ytrain;</span></span>
<span id="L118"><span class="lineNum"> 118</span> <span class="tlaGNC"> 1025 : auto masked_weights = weights * mask_wrong.to(weights.dtype());</span></span>
<span id="L119"><span class="lineNum"> 119</span> <span class="tlaGNC"> 1025 : double epsilon_t = masked_weights.sum().item&lt;double&gt;();</span></span>
<span id="L120"><span class="lineNum"> 120</span> <span class="tlaGNC"> 1025 : if (epsilon_t &gt; 0.5) {</span></span>
<span id="L121"><span class="lineNum"> 121</span> : // Inverse the weights policy (plot ln(wt))</span>
<span id="L122"><span class="lineNum"> 122</span> : // &quot;In each round of AdaBoost, there is a sanity check to ensure that the current base </span>
<span id="L123"><span class="lineNum"> 123</span> : // learner is better than random guess&quot; (Zhi-Hua Zhou, 2012)</span>
<span id="L124"><span class="lineNum"> 124</span> <span class="tlaGNC"> 34 : terminate = true;</span></span>
<span id="L125"><span class="lineNum"> 125</span> : } else {</span>
<span id="L126"><span class="lineNum"> 126</span> <span class="tlaGNC"> 991 : double wt = (1 - epsilon_t) / epsilon_t;</span></span>
<span id="L127"><span class="lineNum"> 127</span> <span class="tlaGNC"> 991 : alpha_t = epsilon_t == 0 ? 1 : 0.5 * log(wt);</span></span>
<span id="L128"><span class="lineNum"> 128</span> : // Step 3.2: Update weights for next classifier</span>
<span id="L129"><span class="lineNum"> 129</span> : // Step 3.2.1: Update weights of wrong samples</span>
<span id="L130"><span class="lineNum"> 130</span> <span class="tlaGNC"> 991 : weights += mask_wrong.to(weights.dtype()) * exp(alpha_t) * weights;</span></span>
<span id="L131"><span class="lineNum"> 131</span> : // Step 3.2.2: Update weights of right samples</span>
<span id="L132"><span class="lineNum"> 132</span> <span class="tlaGNC"> 991 : weights += mask_right.to(weights.dtype()) * exp(-alpha_t) * weights;</span></span>
<span id="L133"><span class="lineNum"> 133</span> : // Step 3.3: Normalise the weights</span>
<span id="L134"><span class="lineNum"> 134</span> <span class="tlaGNC"> 991 : double totalWeights = torch::sum(weights).item&lt;double&gt;();</span></span>
<span id="L135"><span class="lineNum"> 135</span> <span class="tlaGNC"> 991 : weights = weights / totalWeights;</span></span>
<span id="L136"><span class="lineNum"> 136</span> : }</span>
<span id="L137"><span class="lineNum"> 137</span> <span class="tlaGNC"> 2050 : return { weights, alpha_t, terminate };</span></span>
<span id="L138"><span class="lineNum"> 138</span> <span class="tlaGNC"> 1025 : }</span></span>
<span id="L139"><span class="lineNum"> 139</span> <span class="tlaGNC"> 40 : std::tuple&lt;torch::Tensor&amp;, double, bool&gt; BoostAODE::update_weights_block(int k, torch::Tensor&amp; ytrain, torch::Tensor&amp; weights)</span></span>
<span id="L140"><span class="lineNum"> 140</span> : {</span>
<span id="L141"><span class="lineNum"> 141</span> : /* Update Block algorithm</span>
<span id="L142"><span class="lineNum"> 142</span> : k = # of models in block</span>
<span id="L143"><span class="lineNum"> 143</span> : n_models = # of models in ensemble to make predictions</span>
<span id="L144"><span class="lineNum"> 144</span> : n_models_bak = # models saved</span>
<span id="L145"><span class="lineNum"> 145</span> : models = vector of models to make predictions</span>
<span id="L146"><span class="lineNum"> 146</span> : models_bak = models not used to make predictions</span>
<span id="L147"><span class="lineNum"> 147</span> : significances_bak = backup of significances vector</span>
<span id="L148"><span class="lineNum"> 148</span> : </span>
<span id="L149"><span class="lineNum"> 149</span> : Case list</span>
<span id="L150"><span class="lineNum"> 150</span> : A) k = 1, n_models = 1 =&gt; n = 0 , n_models = n + k</span>
<span id="L151"><span class="lineNum"> 151</span> : B) k = 1, n_models = n + 1 =&gt; n_models = n + k</span>
<span id="L152"><span class="lineNum"> 152</span> : C) k &gt; 1, n_models = k + 1 =&gt; n= 1, n_models = n + k</span>
<span id="L153"><span class="lineNum"> 153</span> : D) k &gt; 1, n_models = k =&gt; n = 0, n_models = n + k</span>
<span id="L154"><span class="lineNum"> 154</span> : E) k &gt; 1, n_models = k + n =&gt; n_models = n + k</span>
<span id="L155"><span class="lineNum"> 155</span> : </span>
<span id="L156"><span class="lineNum"> 156</span> : A, D) n=0, k &gt; 0, n_models == k</span>
<span id="L157"><span class="lineNum"> 157</span> : 1. n_models_bak &lt;- n_models</span>
<span id="L158"><span class="lineNum"> 158</span> : 2. significances_bak &lt;- significances</span>
<span id="L159"><span class="lineNum"> 159</span> : 3. significances = vector(k, 1)</span>
<span id="L160"><span class="lineNum"> 160</span> : 4. Dont move any classifiers out of models</span>
<span id="L161"><span class="lineNum"> 161</span> : 5. n_models &lt;- k</span>
<span id="L162"><span class="lineNum"> 162</span> : 6. Make prediction, compute alpha, update weights</span>
<span id="L163"><span class="lineNum"> 163</span> : 7. Dont restore any classifiers to models</span>
<span id="L164"><span class="lineNum"> 164</span> : 8. significances &lt;- significances_bak</span>
<span id="L165"><span class="lineNum"> 165</span> : 9. Update last k significances</span>
<span id="L166"><span class="lineNum"> 166</span> : 10. n_models &lt;- n_models_bak</span>
<span id="L167"><span class="lineNum"> 167</span> : </span>
<span id="L168"><span class="lineNum"> 168</span> : B, C, E) n &gt; 0, k &gt; 0, n_models == n + k</span>
<span id="L169"><span class="lineNum"> 169</span> : 1. n_models_bak &lt;- n_models</span>
<span id="L170"><span class="lineNum"> 170</span> : 2. significances_bak &lt;- significances</span>
<span id="L171"><span class="lineNum"> 171</span> : 3. significances = vector(k, 1)</span>
<span id="L172"><span class="lineNum"> 172</span> : 4. Move first n classifiers to models_bak</span>
<span id="L173"><span class="lineNum"> 173</span> : 5. n_models &lt;- k</span>
<span id="L174"><span class="lineNum"> 174</span> : 6. Make prediction, compute alpha, update weights</span>
<span id="L175"><span class="lineNum"> 175</span> : 7. Insert classifiers in models_bak to be the first n models</span>
<span id="L176"><span class="lineNum"> 176</span> : 8. significances &lt;- significances_bak</span>
<span id="L177"><span class="lineNum"> 177</span> : 9. Update last k significances</span>
<span id="L178"><span class="lineNum"> 178</span> : 10. n_models &lt;- n_models_bak</span>
<span id="L179"><span class="lineNum"> 179</span> : */</span>
<span id="L180"><span class="lineNum"> 180</span> : //</span>
<span id="L181"><span class="lineNum"> 181</span> : // Make predict with only the last k models</span>
<span id="L182"><span class="lineNum"> 182</span> : //</span>
<span id="L183"><span class="lineNum"> 183</span> <span class="tlaGNC"> 40 : std::unique_ptr&lt;Classifier&gt; model;</span></span>
<span id="L184"><span class="lineNum"> 184</span> <span class="tlaGNC"> 40 : std::vector&lt;std::unique_ptr&lt;Classifier&gt;&gt; models_bak;</span></span>
<span id="L185"><span class="lineNum"> 185</span> : // 1. n_models_bak &lt;- n_models 2. significances_bak &lt;- significances</span>
<span id="L186"><span class="lineNum"> 186</span> <span class="tlaGNC"> 40 : auto significance_bak = significanceModels;</span></span>
<span id="L187"><span class="lineNum"> 187</span> <span class="tlaGNC"> 40 : auto n_models_bak = n_models;</span></span>
<span id="L188"><span class="lineNum"> 188</span> : // 3. significances = vector(k, 1)</span>
<span id="L189"><span class="lineNum"> 189</span> <span class="tlaGNC"> 40 : significanceModels = std::vector&lt;double&gt;(k, 1.0);</span></span>
<span id="L190"><span class="lineNum"> 190</span> : // 4. Move first n classifiers to models_bak</span>
<span id="L191"><span class="lineNum"> 191</span> : // backup the first n_models - k models (if n_models == k, don't backup any)</span>
<span id="L192"><span class="lineNum"> 192</span> <span class="tlaGNC"> 160 : for (int i = 0; i &lt; n_models - k; ++i) {</span></span>
<span id="L193"><span class="lineNum"> 193</span> <span class="tlaGNC"> 120 : model = std::move(models[0]);</span></span>
<span id="L194"><span class="lineNum"> 194</span> <span class="tlaGNC"> 120 : models.erase(models.begin());</span></span>
<span id="L195"><span class="lineNum"> 195</span> <span class="tlaGNC"> 120 : models_bak.push_back(std::move(model));</span></span>
<span id="L196"><span class="lineNum"> 196</span> : }</span>
<span id="L197"><span class="lineNum"> 197</span> <span class="tlaGNC"> 40 : assert(models.size() == k);</span></span>
<span id="L198"><span class="lineNum"> 198</span> : // 5. n_models &lt;- k</span>
<span id="L199"><span class="lineNum"> 199</span> <span class="tlaGNC"> 40 : n_models = k;</span></span>
<span id="L200"><span class="lineNum"> 200</span> : // 6. Make prediction, compute alpha, update weights</span>
<span id="L201"><span class="lineNum"> 201</span> <span class="tlaGNC"> 40 : auto ypred = predict(X_train);</span></span>
<span id="L202"><span class="lineNum"> 202</span> : //</span>
<span id="L203"><span class="lineNum"> 203</span> : // Update weights</span>
<span id="L204"><span class="lineNum"> 204</span> : //</span>
<span id="L205"><span class="lineNum"> 205</span> : double alpha_t;</span>
<span id="L206"><span class="lineNum"> 206</span> : bool terminate;</span>
<span id="L207"><span class="lineNum"> 207</span> <span class="tlaGNC"> 40 : std::tie(weights, alpha_t, terminate) = update_weights(y_train, ypred, weights);</span></span>
<span id="L208"><span class="lineNum"> 208</span> : //</span>
<span id="L209"><span class="lineNum"> 209</span> : // Restore the models if needed</span>
<span id="L210"><span class="lineNum"> 210</span> : //</span>
<span id="L211"><span class="lineNum"> 211</span> : // 7. Insert classifiers in models_bak to be the first n models</span>
<span id="L212"><span class="lineNum"> 212</span> : // if n_models_bak == k, don't restore any, because none of them were moved</span>
<span id="L213"><span class="lineNum"> 213</span> <span class="tlaGNC"> 40 : if (k != n_models_bak) {</span></span>
<span id="L214"><span class="lineNum"> 214</span> : // Insert in the same order as they were extracted</span>
<span id="L215"><span class="lineNum"> 215</span> <span class="tlaGNC"> 32 : int bak_size = models_bak.size();</span></span>
<span id="L216"><span class="lineNum"> 216</span> <span class="tlaGNC"> 152 : for (int i = 0; i &lt; bak_size; ++i) {</span></span>
<span id="L217"><span class="lineNum"> 217</span> <span class="tlaGNC"> 120 : model = std::move(models_bak[bak_size - 1 - i]);</span></span>
<span id="L218"><span class="lineNum"> 218</span> <span class="tlaGNC"> 120 : models_bak.erase(models_bak.end() - 1);</span></span>
<span id="L219"><span class="lineNum"> 219</span> <span class="tlaGNC"> 120 : models.insert(models.begin(), std::move(model));</span></span>
<span id="L220"><span class="lineNum"> 220</span> : }</span>
<span id="L221"><span class="lineNum"> 221</span> : }</span>
<span id="L222"><span class="lineNum"> 222</span> : // 8. significances &lt;- significances_bak</span>
<span id="L223"><span class="lineNum"> 223</span> <span class="tlaGNC"> 40 : significanceModels = significance_bak;</span></span>
<span id="L224"><span class="lineNum"> 224</span> : //</span>
<span id="L225"><span class="lineNum"> 225</span> : // Update the significance of the last k models</span>
<span id="L226"><span class="lineNum"> 226</span> : //</span>
<span id="L227"><span class="lineNum"> 227</span> : // 9. Update last k significances</span>
<span id="L228"><span class="lineNum"> 228</span> <span class="tlaGNC"> 168 : for (int i = 0; i &lt; k; ++i) {</span></span>
<span id="L229"><span class="lineNum"> 229</span> <span class="tlaGNC"> 128 : significanceModels[n_models_bak - k + i] = alpha_t;</span></span>
<span id="L230"><span class="lineNum"> 230</span> : }</span>
<span id="L231"><span class="lineNum"> 231</span> : // 10. n_models &lt;- n_models_bak</span>
<span id="L232"><span class="lineNum"> 232</span> <span class="tlaGNC"> 40 : n_models = n_models_bak;</span></span>
<span id="L233"><span class="lineNum"> 233</span> <span class="tlaGNC"> 80 : return { weights, alpha_t, terminate };</span></span>
<span id="L234"><span class="lineNum"> 234</span> <span class="tlaGNC"> 40 : }</span></span>
<span id="L235"><span class="lineNum"> 235</span> <span class="tlaGNC"> 76 : std::vector&lt;int&gt; BoostAODE::initializeModels()</span></span>
<span id="L236"><span class="lineNum"> 236</span> : {</span>
<span id="L237"><span class="lineNum"> 237</span> <span class="tlaGNC"> 76 : std::vector&lt;int&gt; featuresUsed;</span></span>
<span id="L238"><span class="lineNum"> 238</span> <span class="tlaGNC"> 76 : torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);</span></span>
<span id="L239"><span class="lineNum"> 239</span> <span class="tlaGNC"> 76 : int maxFeatures = 0;</span></span>
<span id="L240"><span class="lineNum"> 240</span> <span class="tlaGNC"> 76 : if (select_features_algorithm == SelectFeatures.CFS) {</span></span>
<span id="L241"><span class="lineNum"> 241</span> <span class="tlaGNC"> 20 : featureSelector = new CFS(dataset, features, className, maxFeatures, states.at(className).size(), weights_);</span></span>
<span id="L242"><span class="lineNum"> 242</span> <span class="tlaGNC"> 56 : } else if (select_features_algorithm == SelectFeatures.IWSS) {</span></span>
<span id="L243"><span class="lineNum"> 243</span> <span class="tlaGNC"> 29 : if (threshold &lt; 0 || threshold &gt;0.5) {</span></span>
<span id="L244"><span class="lineNum"> 244</span> <span class="tlaGNC"> 18 : throw std::invalid_argument(&quot;Invalid threshold value for &quot; + SelectFeatures.IWSS + &quot; [0, 0.5]&quot;);</span></span>
<span id="L245"><span class="lineNum"> 245</span> : }</span>
<span id="L246"><span class="lineNum"> 246</span> <span class="tlaGNC"> 1 : featureSelector = new FCBF(dataset, features, className, maxFeatures, states.at(className).size(), weights_, threshold);</span></span>
<span id="L247"><span class="lineNum"> 247</span> : }</span>
<span id="L248"><span class="lineNum"> 248</span> <span class="tlaGNC"> 4 : featureSelector-&gt;fit();</span></span>
<span id="L249"><span class="lineNum"> 249</span> <span class="tlaGNC"> 4 : auto cfsFeatures = featureSelector-&gt;getFeatures();</span></span>
<span id="L250"><span class="lineNum"> 250</span> <span class="tlaGNC"> 4 : auto scores = featureSelector-&gt;getScores();</span></span>
<span id="L251"><span class="lineNum"> 251</span> <span class="tlaGNC"> 25 : for (const int&amp; feature : cfsFeatures) {</span></span>
<span id="L252"><span class="lineNum"> 252</span> <span class="tlaGNC"> 21 : featuresUsed.push_back(feature);</span></span>
<span id="L253"><span class="lineNum"> 253</span> <span class="tlaGNC"> 21 : std::unique_ptr&lt;Classifier&gt; model = std::make_unique&lt;SPODE&gt;(feature);</span></span>
<span id="L254"><span class="lineNum"> 254</span> <span class="tlaGNC"> 21 : model-&gt;fit(dataset, features, className, states, weights_);</span></span>
<span id="L255"><span class="lineNum"> 255</span> <span class="tlaGNC"> 21 : models.push_back(std::move(model));</span></span>
<span id="L256"><span class="lineNum"> 256</span> <span class="tlaGNC"> 21 : significanceModels.push_back(1.0); // They will be updated later in trainModel</span></span>
<span id="L257"><span class="lineNum"> 257</span> <span class="tlaGNC"> 21 : n_models++;</span></span>
<span id="L258"><span class="lineNum"> 258</span> <span class="tlaGNC"> 21 : }</span></span>
<span id="L259"><span class="lineNum"> 259</span> <span class="tlaGNC"> 4 : notes.push_back(&quot;Used features in initialization: &quot; + std::to_string(featuresUsed.size()) + &quot; of &quot; + std::to_string(features.size()) + &quot; with &quot; + select_features_algorithm);</span></span>
<span id="L260"><span class="lineNum"> 260</span> <span class="tlaGNC"> 4 : delete featureSelector;</span></span>
<span id="L261"><span class="lineNum"> 261</span> <span class="tlaGNC"> 8 : return featuresUsed;</span></span>
<span id="L262"><span class="lineNum"> 262</span> <span class="tlaGNC"> 12 : }</span></span>
<span id="L263"><span class="lineNum"> 263</span> <span class="tlaGNC"> 21 : void BoostAODE::trainModel(const torch::Tensor&amp; weights)</span></span>
<span id="L264"><span class="lineNum"> 264</span> : {</span>
<span id="L265"><span class="lineNum"> 265</span> : // Algorithm based on the adaboost algorithm for classification</span>
<span id="L266"><span class="lineNum"> 266</span> : // as explained in Ensemble methods (Zhi-Hua Zhou, 2012)</span>
<span id="L267"><span class="lineNum"> 267</span> <span class="tlaGNC"> 21 : fitted = true;</span></span>
<span id="L268"><span class="lineNum"> 268</span> <span class="tlaGNC"> 21 : double alpha_t = 0;</span></span>
<span id="L269"><span class="lineNum"> 269</span> <span class="tlaGNC"> 21 : torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);</span></span>
<span id="L270"><span class="lineNum"> 270</span> <span class="tlaGNC"> 21 : bool finished = false;</span></span>
<span id="L271"><span class="lineNum"> 271</span> <span class="tlaGNC"> 21 : std::vector&lt;int&gt; featuresUsed;</span></span>
<span id="L272"><span class="lineNum"> 272</span> <span class="tlaGNC"> 21 : if (selectFeatures) {</span></span>
<span id="L273"><span class="lineNum"> 273</span> <span class="tlaGNC"> 8 : featuresUsed = initializeModels();</span></span>
<span id="L274"><span class="lineNum"> 274</span> <span class="tlaGNC"> 4 : auto ypred = predict(X_train);</span></span>
<span id="L275"><span class="lineNum"> 275</span> <span class="tlaGNC"> 4 : std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);</span></span>
<span id="L276"><span class="lineNum"> 276</span> : // Update significance of the models</span>
<span id="L277"><span class="lineNum"> 277</span> <span class="tlaGNC"> 25 : for (int i = 0; i &lt; n_models; ++i) {</span></span>
<span id="L278"><span class="lineNum"> 278</span> <span class="tlaGNC"> 21 : significanceModels[i] = alpha_t;</span></span>
<span id="L279"><span class="lineNum"> 279</span> : }</span>
<span id="L280"><span class="lineNum"> 280</span> <span class="tlaGNC"> 4 : if (finished) {</span></span>
<span id="L281"><span class="lineNum"> 281</span> <span class="tlaUNC tlaBgUNC"> 0 : return;</span></span>
<span id="L282"><span class="lineNum"> 282</span> : }</span>
<span id="L283"><span class="lineNum"> 283</span> <span class="tlaGNC tlaBgGNC"> 4 : }</span></span>
<span id="L284"><span class="lineNum"> 284</span> <span class="tlaGNC"> 17 : int numItemsPack = 0; // The counter of the models inserted in the current pack</span></span>
<span id="L285"><span class="lineNum"> 285</span> : // Variables to control the accuracy finish condition</span>
<span id="L286"><span class="lineNum"> 286</span> <span class="tlaGNC"> 17 : double priorAccuracy = 0.0;</span></span>
<span id="L287"><span class="lineNum"> 287</span> <span class="tlaGNC"> 17 : double improvement = 1.0;</span></span>
<span id="L288"><span class="lineNum"> 288</span> <span class="tlaGNC"> 17 : double convergence_threshold = 1e-4;</span></span>
<span id="L289"><span class="lineNum"> 289</span> <span class="tlaGNC"> 17 : int tolerance = 0; // number of times the accuracy is lower than the convergence_threshold</span></span>
<span id="L290"><span class="lineNum"> 290</span> : // Step 0: Set the finish condition</span>
<span id="L291"><span class="lineNum"> 291</span> : // epsilon sub t &gt; 0.5 =&gt; inverse the weights policy</span>
<span id="L292"><span class="lineNum"> 292</span> : // validation error is not decreasing</span>
<span id="L293"><span class="lineNum"> 293</span> : // run out of features</span>
<span id="L294"><span class="lineNum"> 294</span> <span class="tlaGNC"> 17 : bool ascending = order_algorithm == Orders.ASC;</span></span>
<span id="L295"><span class="lineNum"> 295</span> <span class="tlaGNC"> 17 : std::mt19937 g{ 173 };</span></span>
<span id="L296"><span class="lineNum"> 296</span> <span class="tlaGNC"> 99 : while (!finished) {</span></span>
<span id="L297"><span class="lineNum"> 297</span> : // Step 1: Build ranking with mutual information</span>
<span id="L298"><span class="lineNum"> 298</span> <span class="tlaGNC"> 82 : auto featureSelection = metrics.SelectKBestWeighted(weights_, ascending, n); // Get all the features sorted</span></span>
<span id="L299"><span class="lineNum"> 299</span> <span class="tlaGNC"> 82 : if (order_algorithm == Orders.RAND) {</span></span>
<span id="L300"><span class="lineNum"> 300</span> <span class="tlaGNC"> 9 : std::shuffle(featureSelection.begin(), featureSelection.end(), g);</span></span>
<span id="L301"><span class="lineNum"> 301</span> : }</span>
<span id="L302"><span class="lineNum"> 302</span> : // Remove used features</span>
<span id="L303"><span class="lineNum"> 303</span> <span class="tlaGNC"> 164 : featureSelection.erase(remove_if(begin(featureSelection), end(featureSelection), [&amp;](auto x)</span></span>
<span id="L304"><span class="lineNum"> 304</span> <span class="tlaGNC"> 10764 : { return std::find(begin(featuresUsed), end(featuresUsed), x) != end(featuresUsed);}),</span></span>
<span id="L305"><span class="lineNum"> 305</span> <span class="tlaGNC"> 82 : end(featureSelection)</span></span>
<span id="L306"><span class="lineNum"> 306</span> : );</span>
<span id="L307"><span class="lineNum"> 307</span> <span class="tlaGNC"> 82 : int k = pow(2, tolerance);</span></span>
<span id="L308"><span class="lineNum"> 308</span> <span class="tlaGNC"> 82 : int counter = 0; // The model counter of the current pack</span></span>
<span id="L309"><span class="lineNum"> 309</span> <span class="tlaGNC"> 197 : while (counter++ &lt; k &amp;&amp; featureSelection.size() &gt; 0) {</span></span>
<span id="L310"><span class="lineNum"> 310</span> <span class="tlaGNC"> 115 : auto feature = featureSelection[0];</span></span>
<span id="L311"><span class="lineNum"> 311</span> <span class="tlaGNC"> 115 : featureSelection.erase(featureSelection.begin());</span></span>
<span id="L312"><span class="lineNum"> 312</span> <span class="tlaGNC"> 115 : std::unique_ptr&lt;Classifier&gt; model;</span></span>
<span id="L313"><span class="lineNum"> 313</span> <span class="tlaGNC"> 115 : model = std::make_unique&lt;SPODE&gt;(feature);</span></span>
<span id="L314"><span class="lineNum"> 314</span> <span class="tlaGNC"> 115 : model-&gt;fit(dataset, features, className, states, weights_);</span></span>
<span id="L315"><span class="lineNum"> 315</span> <span class="tlaGNC"> 115 : alpha_t = 0.0;</span></span>
<span id="L316"><span class="lineNum"> 316</span> <span class="tlaGNC"> 115 : if (!block_update) {</span></span>
<span id="L317"><span class="lineNum"> 317</span> <span class="tlaGNC"> 99 : auto ypred = model-&gt;predict(X_train);</span></span>
<span id="L318"><span class="lineNum"> 318</span> : // Step 3.1: Compute the classifier amout of say</span>
<span id="L319"><span class="lineNum"> 319</span> <span class="tlaGNC"> 99 : std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);</span></span>
<span id="L320"><span class="lineNum"> 320</span> <span class="tlaGNC"> 99 : }</span></span>
<span id="L321"><span class="lineNum"> 321</span> : // Step 3.4: Store classifier and its accuracy to weigh its future vote</span>
<span id="L322"><span class="lineNum"> 322</span> <span class="tlaGNC"> 115 : numItemsPack++;</span></span>
<span id="L323"><span class="lineNum"> 323</span> <span class="tlaGNC"> 115 : featuresUsed.push_back(feature);</span></span>
<span id="L324"><span class="lineNum"> 324</span> <span class="tlaGNC"> 115 : models.push_back(std::move(model));</span></span>
<span id="L325"><span class="lineNum"> 325</span> <span class="tlaGNC"> 115 : significanceModels.push_back(alpha_t);</span></span>
<span id="L326"><span class="lineNum"> 326</span> <span class="tlaGNC"> 115 : n_models++;</span></span>
<span id="L327"><span class="lineNum"> 327</span> <span class="tlaGNC"> 115 : }</span></span>
<span id="L328"><span class="lineNum"> 328</span> <span class="tlaGNC"> 82 : if (block_update) {</span></span>
<span id="L329"><span class="lineNum"> 329</span> <span class="tlaGNC"> 5 : std::tie(weights_, alpha_t, finished) = update_weights_block(k, y_train, weights_);</span></span>
<span id="L330"><span class="lineNum"> 330</span> : }</span>
<span id="L331"><span class="lineNum"> 331</span> <span class="tlaGNC"> 82 : if (convergence &amp;&amp; !finished) {</span></span>
<span id="L332"><span class="lineNum"> 332</span> <span class="tlaGNC"> 49 : auto y_val_predict = predict(X_test);</span></span>
<span id="L333"><span class="lineNum"> 333</span> <span class="tlaGNC"> 49 : double accuracy = (y_val_predict == y_test).sum().item&lt;double&gt;() / (double)y_test.size(0);</span></span>
<span id="L334"><span class="lineNum"> 334</span> <span class="tlaGNC"> 49 : if (priorAccuracy == 0) {</span></span>
<span id="L335"><span class="lineNum"> 335</span> <span class="tlaGNC"> 13 : priorAccuracy = accuracy;</span></span>
<span id="L336"><span class="lineNum"> 336</span> : } else {</span>
<span id="L337"><span class="lineNum"> 337</span> <span class="tlaGNC"> 36 : improvement = accuracy - priorAccuracy;</span></span>
<span id="L338"><span class="lineNum"> 338</span> : }</span>
<span id="L339"><span class="lineNum"> 339</span> <span class="tlaGNC"> 49 : if (improvement &lt; convergence_threshold) {</span></span>
<span id="L340"><span class="lineNum"> 340</span> <span class="tlaGNC"> 32 : tolerance++;</span></span>
<span id="L341"><span class="lineNum"> 341</span> : } else {</span>
<span id="L342"><span class="lineNum"> 342</span> <span class="tlaGNC"> 17 : tolerance = 0; // Reset the counter if the model performs better</span></span>
<span id="L343"><span class="lineNum"> 343</span> <span class="tlaGNC"> 17 : numItemsPack = 0;</span></span>
<span id="L344"><span class="lineNum"> 344</span> : }</span>
<span id="L345"><span class="lineNum"> 345</span> : // Keep the best accuracy until now as the prior accuracy</span>
<span id="L346"><span class="lineNum"> 346</span> <span class="tlaGNC"> 49 : priorAccuracy = std::max(accuracy, priorAccuracy);</span></span>
<span id="L347"><span class="lineNum"> 347</span> : // priorAccuracy = accuracy;</span>
<span id="L348"><span class="lineNum"> 348</span> <span class="tlaGNC"> 49 : }</span></span>
<span id="L349"><span class="lineNum"> 349</span> <span class="tlaGNC"> 82 : finished = finished || tolerance &gt; maxTolerance || featuresUsed.size() == features.size();</span></span>
<span id="L350"><span class="lineNum"> 350</span> <span class="tlaGNC"> 82 : }</span></span>
<span id="L351"><span class="lineNum"> 351</span> <span class="tlaGNC"> 17 : if (tolerance &gt; maxTolerance) {</span></span>
<span id="L352"><span class="lineNum"> 352</span> <span class="tlaGNC"> 2 : if (numItemsPack &lt; n_models) {</span></span>
<span id="L353"><span class="lineNum"> 353</span> <span class="tlaGNC"> 2 : notes.push_back(&quot;Convergence threshold reached &amp; &quot; + std::to_string(numItemsPack) + &quot; models eliminated&quot;);</span></span>
<span id="L354"><span class="lineNum"> 354</span> <span class="tlaGNC"> 32 : for (int i = 0; i &lt; numItemsPack; ++i) {</span></span>
<span id="L355"><span class="lineNum"> 355</span> <span class="tlaGNC"> 30 : significanceModels.pop_back();</span></span>
<span id="L356"><span class="lineNum"> 356</span> <span class="tlaGNC"> 30 : models.pop_back();</span></span>
<span id="L357"><span class="lineNum"> 357</span> <span class="tlaGNC"> 30 : n_models--;</span></span>
<span id="L358"><span class="lineNum"> 358</span> : }</span>
<span id="L359"><span class="lineNum"> 359</span> : } else {</span>
<span id="L360"><span class="lineNum"> 360</span> <span class="tlaUNC tlaBgUNC"> 0 : notes.push_back(&quot;Convergence threshold reached &amp; 0 models eliminated&quot;);</span></span>
<span id="L361"><span class="lineNum"> 361</span> : }</span>
<span id="L362"><span class="lineNum"> 362</span> : }</span>
<span id="L363"><span class="lineNum"> 363</span> <span class="tlaGNC tlaBgGNC"> 17 : if (featuresUsed.size() != features.size()) {</span></span>
<span id="L364"><span class="lineNum"> 364</span> <span class="tlaGNC"> 2 : notes.push_back(&quot;Used features in train: &quot; + std::to_string(featuresUsed.size()) + &quot; of &quot; + std::to_string(features.size()));</span></span>
<span id="L365"><span class="lineNum"> 365</span> <span class="tlaGNC"> 2 : status = WARNING;</span></span>
<span id="L366"><span class="lineNum"> 366</span> : }</span>
<span id="L367"><span class="lineNum"> 367</span> <span class="tlaGNC"> 17 : notes.push_back(&quot;Number of models: &quot; + std::to_string(n_models));</span></span>
<span id="L368"><span class="lineNum"> 368</span> <span class="tlaGNC"> 25 : }</span></span>
<span id="L369"><span class="lineNum"> 369</span> <span class="tlaGNC"> 1 : std::vector&lt;std::string&gt; BoostAODE::graph(const std::string&amp; title) const</span></span>
<span id="L370"><span class="lineNum"> 370</span> : {</span>
<span id="L371"><span class="lineNum"> 371</span> <span class="tlaGNC"> 1 : return Ensemble::graph(title);</span></span>
<span id="L372"><span class="lineNum"> 372</span> : }</span>
<span id="L373"><span class="lineNum"> 373</span> : }</span>
<span id="L246"><span class="lineNum"> 246</span> <span class="tlaGNC"> 11 : featureSelector = new IWSS(dataset, features, className, maxFeatures, states.at(className).size(), weights_, threshold);</span></span>
<span id="L247"><span class="lineNum"> 247</span> <span class="tlaGNC"> 27 : } else if (select_features_algorithm == SelectFeatures.FCBF) {</span></span>
<span id="L248"><span class="lineNum"> 248</span> <span class="tlaGNC"> 27 : if (threshold &lt; 1e-7 || threshold &gt; 1) {</span></span>
<span id="L249"><span class="lineNum"> 249</span> <span class="tlaGNC"> 18 : throw std::invalid_argument(&quot;Invalid threshold value for &quot; + SelectFeatures.FCBF + &quot; [1e-7, 1]&quot;);</span></span>
<span id="L250"><span class="lineNum"> 250</span> : }</span>
<span id="L251"><span class="lineNum"> 251</span> <span class="tlaGNC"> 9 : featureSelector = new FCBF(dataset, features, className, maxFeatures, states.at(className).size(), weights_, threshold);</span></span>
<span id="L252"><span class="lineNum"> 252</span> : }</span>
<span id="L253"><span class="lineNum"> 253</span> <span class="tlaGNC"> 40 : featureSelector-&gt;fit();</span></span>
<span id="L254"><span class="lineNum"> 254</span> <span class="tlaGNC"> 40 : auto cfsFeatures = featureSelector-&gt;getFeatures();</span></span>
<span id="L255"><span class="lineNum"> 255</span> <span class="tlaGNC"> 40 : auto scores = featureSelector-&gt;getScores();</span></span>
<span id="L256"><span class="lineNum"> 256</span> <span class="tlaGNC"> 245 : for (const int&amp; feature : cfsFeatures) {</span></span>
<span id="L257"><span class="lineNum"> 257</span> <span class="tlaGNC"> 205 : featuresUsed.push_back(feature);</span></span>
<span id="L258"><span class="lineNum"> 258</span> <span class="tlaGNC"> 205 : std::unique_ptr&lt;Classifier&gt; model = std::make_unique&lt;SPODE&gt;(feature);</span></span>
<span id="L259"><span class="lineNum"> 259</span> <span class="tlaGNC"> 205 : model-&gt;fit(dataset, features, className, states, weights_);</span></span>
<span id="L260"><span class="lineNum"> 260</span> <span class="tlaGNC"> 205 : models.push_back(std::move(model));</span></span>
<span id="L261"><span class="lineNum"> 261</span> <span class="tlaGNC"> 205 : significanceModels.push_back(1.0); // They will be updated later in trainModel</span></span>
<span id="L262"><span class="lineNum"> 262</span> <span class="tlaGNC"> 205 : n_models++;</span></span>
<span id="L263"><span class="lineNum"> 263</span> <span class="tlaGNC"> 205 : }</span></span>
<span id="L264"><span class="lineNum"> 264</span> <span class="tlaGNC"> 40 : notes.push_back(&quot;Used features in initialization: &quot; + std::to_string(featuresUsed.size()) + &quot; of &quot; + std::to_string(features.size()) + &quot; with &quot; + select_features_algorithm);</span></span>
<span id="L265"><span class="lineNum"> 265</span> <span class="tlaGNC"> 40 : delete featureSelector;</span></span>
<span id="L266"><span class="lineNum"> 266</span> <span class="tlaGNC"> 80 : return featuresUsed;</span></span>
<span id="L267"><span class="lineNum"> 267</span> <span class="tlaGNC"> 112 : }</span></span>
<span id="L268"><span class="lineNum"> 268</span> <span class="tlaGNC"> 190 : void BoostAODE::trainModel(const torch::Tensor&amp; weights)</span></span>
<span id="L269"><span class="lineNum"> 269</span> : {</span>
<span id="L270"><span class="lineNum"> 270</span> : //</span>
<span id="L271"><span class="lineNum"> 271</span> : // Logging setup</span>
<span id="L272"><span class="lineNum"> 272</span> : //</span>
<span id="L273"><span class="lineNum"> 273</span> <span class="tlaGNC"> 190 : loguru::set_thread_name(&quot;BoostAODE&quot;);</span></span>
<span id="L274"><span class="lineNum"> 274</span> <span class="tlaGNC"> 190 : loguru::g_stderr_verbosity = loguru::Verbosity_OFF;</span></span>
<span id="L275"><span class="lineNum"> 275</span> <span class="tlaGNC"> 190 : loguru::add_file(&quot;boostAODE.log&quot;, loguru::Truncate, loguru::Verbosity_MAX);</span></span>
<span id="L276"><span class="lineNum"> 276</span> : </span>
<span id="L277"><span class="lineNum"> 277</span> : // Algorithm based on the adaboost algorithm for classification</span>
<span id="L278"><span class="lineNum"> 278</span> : // as explained in Ensemble methods (Zhi-Hua Zhou, 2012)</span>
<span id="L279"><span class="lineNum"> 279</span> <span class="tlaGNC"> 190 : fitted = true;</span></span>
<span id="L280"><span class="lineNum"> 280</span> <span class="tlaGNC"> 190 : double alpha_t = 0;</span></span>
<span id="L281"><span class="lineNum"> 281</span> <span class="tlaGNC"> 190 : torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);</span></span>
<span id="L282"><span class="lineNum"> 282</span> <span class="tlaGNC"> 190 : bool finished = false;</span></span>
<span id="L283"><span class="lineNum"> 283</span> <span class="tlaGNC"> 190 : std::vector&lt;int&gt; featuresUsed;</span></span>
<span id="L284"><span class="lineNum"> 284</span> <span class="tlaGNC"> 190 : if (selectFeatures) {</span></span>
<span id="L285"><span class="lineNum"> 285</span> <span class="tlaGNC"> 76 : featuresUsed = initializeModels();</span></span>
<span id="L286"><span class="lineNum"> 286</span> <span class="tlaGNC"> 40 : auto ypred = predict(X_train);</span></span>
<span id="L287"><span class="lineNum"> 287</span> <span class="tlaGNC"> 40 : std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);</span></span>
<span id="L288"><span class="lineNum"> 288</span> : // Update significance of the models</span>
<span id="L289"><span class="lineNum"> 289</span> <span class="tlaGNC"> 245 : for (int i = 0; i &lt; n_models; ++i) {</span></span>
<span id="L290"><span class="lineNum"> 290</span> <span class="tlaGNC"> 205 : significanceModels[i] = alpha_t;</span></span>
<span id="L291"><span class="lineNum"> 291</span> : }</span>
<span id="L292"><span class="lineNum"> 292</span> <span class="tlaGNC"> 40 : if (finished) {</span></span>
<span id="L293"><span class="lineNum"> 293</span> <span class="tlaUNC tlaBgUNC"> 0 : return;</span></span>
<span id="L294"><span class="lineNum"> 294</span> : }</span>
<span id="L295"><span class="lineNum"> 295</span> <span class="tlaGNC tlaBgGNC"> 40 : }</span></span>
<span id="L296"><span class="lineNum"> 296</span> <span class="tlaGNC"> 154 : int numItemsPack = 0; // The counter of the models inserted in the current pack</span></span>
<span id="L297"><span class="lineNum"> 297</span> : // Variables to control the accuracy finish condition</span>
<span id="L298"><span class="lineNum"> 298</span> <span class="tlaGNC"> 154 : double priorAccuracy = 0.0;</span></span>
<span id="L299"><span class="lineNum"> 299</span> <span class="tlaGNC"> 154 : double improvement = 1.0;</span></span>
<span id="L300"><span class="lineNum"> 300</span> <span class="tlaGNC"> 154 : double convergence_threshold = 1e-4;</span></span>
<span id="L301"><span class="lineNum"> 301</span> <span class="tlaGNC"> 154 : int tolerance = 0; // number of times the accuracy is lower than the convergence_threshold</span></span>
<span id="L302"><span class="lineNum"> 302</span> : // Step 0: Set the finish condition</span>
<span id="L303"><span class="lineNum"> 303</span> : // epsilon sub t &gt; 0.5 =&gt; inverse the weights policy</span>
<span id="L304"><span class="lineNum"> 304</span> : // validation error is not decreasing</span>
<span id="L305"><span class="lineNum"> 305</span> : // run out of features</span>
<span id="L306"><span class="lineNum"> 306</span> <span class="tlaGNC"> 154 : bool ascending = order_algorithm == Orders.ASC;</span></span>
<span id="L307"><span class="lineNum"> 307</span> <span class="tlaGNC"> 154 : std::mt19937 g{ 173 };</span></span>
<span id="L308"><span class="lineNum"> 308</span> <span class="tlaGNC"> 919 : while (!finished) {</span></span>
<span id="L309"><span class="lineNum"> 309</span> : // Step 1: Build ranking with mutual information</span>
<span id="L310"><span class="lineNum"> 310</span> <span class="tlaGNC"> 765 : auto featureSelection = metrics.SelectKBestWeighted(weights_, ascending, n); // Get all the features sorted</span></span>
<span id="L311"><span class="lineNum"> 311</span> <span class="tlaGNC"> 765 : if (order_algorithm == Orders.RAND) {</span></span>
<span id="L312"><span class="lineNum"> 312</span> <span class="tlaGNC"> 81 : std::shuffle(featureSelection.begin(), featureSelection.end(), g);</span></span>
<span id="L313"><span class="lineNum"> 313</span> : }</span>
<span id="L314"><span class="lineNum"> 314</span> : // Remove used features</span>
<span id="L315"><span class="lineNum"> 315</span> <span class="tlaGNC"> 1530 : featureSelection.erase(remove_if(begin(featureSelection), end(featureSelection), [&amp;](auto x)</span></span>
<span id="L316"><span class="lineNum"> 316</span> <span class="tlaGNC"> 110548 : { return std::find(begin(featuresUsed), end(featuresUsed), x) != end(featuresUsed);}),</span></span>
<span id="L317"><span class="lineNum"> 317</span> <span class="tlaGNC"> 765 : end(featureSelection)</span></span>
<span id="L318"><span class="lineNum"> 318</span> : );</span>
<span id="L319"><span class="lineNum"> 319</span> <span class="tlaGNC"> 765 : int k = bisection ? pow(2, tolerance) : 1;</span></span>
<span id="L320"><span class="lineNum"> 320</span> <span class="tlaGNC"> 765 : int counter = 0; // The model counter of the current pack</span></span>
<span id="L321"><span class="lineNum"> 321</span> <span class="tlaGNC"> 765 : VLOG_SCOPE_F(1, &quot;counter=%d k=%d featureSelection.size: %zu&quot;, counter, k, featureSelection.size());</span></span>
<span id="L322"><span class="lineNum"> 322</span> <span class="tlaGNC"> 1838 : while (counter++ &lt; k &amp;&amp; featureSelection.size() &gt; 0) {</span></span>
<span id="L323"><span class="lineNum"> 323</span> <span class="tlaGNC"> 1073 : auto feature = featureSelection[0];</span></span>
<span id="L324"><span class="lineNum"> 324</span> <span class="tlaGNC"> 1073 : featureSelection.erase(featureSelection.begin());</span></span>
<span id="L325"><span class="lineNum"> 325</span> <span class="tlaGNC"> 1073 : std::unique_ptr&lt;Classifier&gt; model;</span></span>
<span id="L326"><span class="lineNum"> 326</span> <span class="tlaGNC"> 1073 : model = std::make_unique&lt;SPODE&gt;(feature);</span></span>
<span id="L327"><span class="lineNum"> 327</span> <span class="tlaGNC"> 1073 : model-&gt;fit(dataset, features, className, states, weights_);</span></span>
<span id="L328"><span class="lineNum"> 328</span> <span class="tlaGNC"> 1073 : alpha_t = 0.0;</span></span>
<span id="L329"><span class="lineNum"> 329</span> <span class="tlaGNC"> 1073 : if (!block_update) {</span></span>
<span id="L330"><span class="lineNum"> 330</span> <span class="tlaGNC"> 945 : auto ypred = model-&gt;predict(X_train);</span></span>
<span id="L331"><span class="lineNum"> 331</span> : // Step 3.1: Compute the classifier amout of say</span>
<span id="L332"><span class="lineNum"> 332</span> <span class="tlaGNC"> 945 : std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);</span></span>
<span id="L333"><span class="lineNum"> 333</span> <span class="tlaGNC"> 945 : }</span></span>
<span id="L334"><span class="lineNum"> 334</span> : // Step 3.4: Store classifier and its accuracy to weigh its future vote</span>
<span id="L335"><span class="lineNum"> 335</span> <span class="tlaGNC"> 1073 : numItemsPack++;</span></span>
<span id="L336"><span class="lineNum"> 336</span> <span class="tlaGNC"> 1073 : featuresUsed.push_back(feature);</span></span>
<span id="L337"><span class="lineNum"> 337</span> <span class="tlaGNC"> 1073 : models.push_back(std::move(model));</span></span>
<span id="L338"><span class="lineNum"> 338</span> <span class="tlaGNC"> 1073 : significanceModels.push_back(alpha_t);</span></span>
<span id="L339"><span class="lineNum"> 339</span> <span class="tlaGNC"> 1073 : n_models++;</span></span>
<span id="L340"><span class="lineNum"> 340</span> <span class="tlaGNC"> 1073 : VLOG_SCOPE_F(2, &quot;numItemsPack: %d n_models: %d featuresUsed: %zu&quot;, numItemsPack, n_models, featuresUsed.size());</span></span>
<span id="L341"><span class="lineNum"> 341</span> <span class="tlaGNC"> 1073 : }</span></span>
<span id="L342"><span class="lineNum"> 342</span> <span class="tlaGNC"> 765 : if (block_update) {</span></span>
<span id="L343"><span class="lineNum"> 343</span> <span class="tlaGNC"> 40 : std::tie(weights_, alpha_t, finished) = update_weights_block(k, y_train, weights_);</span></span>
<span id="L344"><span class="lineNum"> 344</span> : }</span>
<span id="L345"><span class="lineNum"> 345</span> <span class="tlaGNC"> 765 : if (convergence &amp;&amp; !finished) {</span></span>
<span id="L346"><span class="lineNum"> 346</span> <span class="tlaGNC"> 474 : auto y_val_predict = predict(X_test);</span></span>
<span id="L347"><span class="lineNum"> 347</span> <span class="tlaGNC"> 474 : double accuracy = (y_val_predict == y_test).sum().item&lt;double&gt;() / (double)y_test.size(0);</span></span>
<span id="L348"><span class="lineNum"> 348</span> <span class="tlaGNC"> 474 : if (priorAccuracy == 0) {</span></span>
<span id="L349"><span class="lineNum"> 349</span> <span class="tlaGNC"> 119 : priorAccuracy = accuracy;</span></span>
<span id="L350"><span class="lineNum"> 350</span> : } else {</span>
<span id="L351"><span class="lineNum"> 351</span> <span class="tlaGNC"> 355 : improvement = accuracy - priorAccuracy;</span></span>
<span id="L352"><span class="lineNum"> 352</span> : }</span>
<span id="L353"><span class="lineNum"> 353</span> <span class="tlaGNC"> 474 : if (improvement &lt; convergence_threshold) {</span></span>
<span id="L354"><span class="lineNum"> 354</span> <span class="tlaGNC"> 296 : VLOG_SCOPE_F(3, &quot; (improvement&lt;threshold) tolerance: %d numItemsPack: %d improvement: %f prior: %f current: %f&quot;, tolerance, numItemsPack, improvement, priorAccuracy, accuracy);</span></span>
<span id="L355"><span class="lineNum"> 355</span> <span class="tlaGNC"> 296 : tolerance++;</span></span>
<span id="L356"><span class="lineNum"> 356</span> <span class="tlaGNC"> 296 : } else {</span></span>
<span id="L357"><span class="lineNum"> 357</span> <span class="tlaGNC"> 178 : VLOG_SCOPE_F(3, &quot;* (improvement&gt;=threshold) Reset. tolerance: %d numItemsPack: %d improvement: %f prior: %f current: %f&quot;, tolerance, numItemsPack, improvement, priorAccuracy, accuracy);</span></span>
<span id="L358"><span class="lineNum"> 358</span> <span class="tlaGNC"> 178 : tolerance = 0; // Reset the counter if the model performs better</span></span>
<span id="L359"><span class="lineNum"> 359</span> <span class="tlaGNC"> 178 : numItemsPack = 0;</span></span>
<span id="L360"><span class="lineNum"> 360</span> <span class="tlaGNC"> 178 : }</span></span>
<span id="L361"><span class="lineNum"> 361</span> <span class="tlaGNC"> 474 : if (convergence_best) {</span></span>
<span id="L362"><span class="lineNum"> 362</span> : // Keep the best accuracy until now as the prior accuracy</span>
<span id="L363"><span class="lineNum"> 363</span> <span class="tlaGNC"> 71 : priorAccuracy = std::max(accuracy, priorAccuracy);</span></span>
<span id="L364"><span class="lineNum"> 364</span> : } else {</span>
<span id="L365"><span class="lineNum"> 365</span> : // Keep the last accuray obtained as the prior accuracy</span>
<span id="L366"><span class="lineNum"> 366</span> <span class="tlaGNC"> 403 : priorAccuracy = accuracy;</span></span>
<span id="L367"><span class="lineNum"> 367</span> : }</span>
<span id="L368"><span class="lineNum"> 368</span> <span class="tlaGNC"> 474 : }</span></span>
<span id="L369"><span class="lineNum"> 369</span> <span class="tlaGNC"> 765 : VLOG_SCOPE_F(1, &quot;tolerance: %d featuresUsed.size: %zu features.size: %zu&quot;, tolerance, featuresUsed.size(), features.size());</span></span>
<span id="L370"><span class="lineNum"> 370</span> <span class="tlaGNC"> 765 : finished = finished || tolerance &gt; maxTolerance || featuresUsed.size() == features.size();</span></span>
<span id="L371"><span class="lineNum"> 371</span> <span class="tlaGNC"> 765 : }</span></span>
<span id="L372"><span class="lineNum"> 372</span> <span class="tlaGNC"> 154 : if (tolerance &gt; maxTolerance) {</span></span>
<span id="L373"><span class="lineNum"> 373</span> <span class="tlaGNC"> 21 : if (numItemsPack &lt; n_models) {</span></span>
<span id="L374"><span class="lineNum"> 374</span> <span class="tlaGNC"> 21 : notes.push_back(&quot;Convergence threshold reached &amp; &quot; + std::to_string(numItemsPack) + &quot; models eliminated&quot;);</span></span>
<span id="L375"><span class="lineNum"> 375</span> <span class="tlaGNC"> 21 : VLOG_SCOPE_F(4, &quot;Convergence threshold reached &amp; %d models eliminated of %d&quot;, numItemsPack, n_models);</span></span>
<span id="L376"><span class="lineNum"> 376</span> <span class="tlaGNC"> 336 : for (int i = 0; i &lt; numItemsPack; ++i) {</span></span>
<span id="L377"><span class="lineNum"> 377</span> <span class="tlaGNC"> 315 : significanceModels.pop_back();</span></span>
<span id="L378"><span class="lineNum"> 378</span> <span class="tlaGNC"> 315 : models.pop_back();</span></span>
<span id="L379"><span class="lineNum"> 379</span> <span class="tlaGNC"> 315 : n_models--;</span></span>
<span id="L380"><span class="lineNum"> 380</span> : }</span>
<span id="L381"><span class="lineNum"> 381</span> <span class="tlaGNC"> 21 : } else {</span></span>
<span id="L382"><span class="lineNum"> 382</span> <span class="tlaUNC tlaBgUNC"> 0 : notes.push_back(&quot;Convergence threshold reached &amp; 0 models eliminated&quot;);</span></span>
<span id="L383"><span class="lineNum"> 383</span> <span class="tlaUNC"> 0 : VLOG_SCOPE_F(4, &quot;Convergence threshold reached &amp; 0 models eliminated n_models=%d numItemsPack=%d&quot;, n_models, numItemsPack);</span></span>
<span id="L384"><span class="lineNum"> 384</span> <span class="tlaUNC"> 0 : }</span></span>
<span id="L385"><span class="lineNum"> 385</span> : }</span>
<span id="L386"><span class="lineNum"> 386</span> <span class="tlaGNC tlaBgGNC"> 154 : if (featuresUsed.size() != features.size()) {</span></span>
<span id="L387"><span class="lineNum"> 387</span> <span class="tlaGNC"> 21 : notes.push_back(&quot;Used features in train: &quot; + std::to_string(featuresUsed.size()) + &quot; of &quot; + std::to_string(features.size()));</span></span>
<span id="L388"><span class="lineNum"> 388</span> <span class="tlaGNC"> 21 : status = WARNING;</span></span>
<span id="L389"><span class="lineNum"> 389</span> : }</span>
<span id="L390"><span class="lineNum"> 390</span> <span class="tlaGNC"> 154 : notes.push_back(&quot;Number of models: &quot; + std::to_string(n_models));</span></span>
<span id="L391"><span class="lineNum"> 391</span> <span class="tlaGNC"> 226 : }</span></span>
<span id="L392"><span class="lineNum"> 392</span> <span class="tlaGNC"> 8 : std::vector&lt;std::string&gt; BoostAODE::graph(const std::string&amp; title) const</span></span>
<span id="L393"><span class="lineNum"> 393</span> : {</span>
<span id="L394"><span class="lineNum"> 394</span> <span class="tlaGNC"> 8 : return Ensemble::graph(title);</span></span>
<span id="L395"><span class="lineNum"> 395</span> : }</span>
<span id="L396"><span class="lineNum"> 396</span> : }</span>
</pre>
</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -101,14 +101,15 @@
<span id="L39"><span class="lineNum"> 39</span> : int maxTolerance = 3;</span>
<span id="L40"><span class="lineNum"> 40</span> : std::string order_algorithm; // order to process the KBest features asc, desc, rand</span>
<span id="L41"><span class="lineNum"> 41</span> : bool convergence = true; //if true, stop when the model does not improve</span>
<span id="L42"><span class="lineNum"> 42</span> : bool selectFeatures = false; // if true, use feature selection</span>
<span id="L43"><span class="lineNum"> 43</span> : std::string select_features_algorithm = Orders.DESC; // Selected feature selection algorithm</span>
<span id="L44"><span class="lineNum"> 44</span> : FeatureSelect* featureSelector = nullptr;</span>
<span id="L45"><span class="lineNum"> 45</span> : double threshold = -1;</span>
<span id="L46"><span class="lineNum"> 46</span> : bool block_update = false;</span>
<span id="L47"><span class="lineNum"> 47</span> : };</span>
<span id="L48"><span class="lineNum"> 48</span> : }</span>
<span id="L49"><span class="lineNum"> 49</span> : #endif</span>
<span id="L42"><span class="lineNum"> 42</span> : bool convergence_best = false; // wether to keep the best accuracy to the moment or the last accuracy as prior accuracy</span>
<span id="L43"><span class="lineNum"> 43</span> : bool selectFeatures = false; // if true, use feature selection</span>
<span id="L44"><span class="lineNum"> 44</span> : std::string select_features_algorithm = Orders.DESC; // Selected feature selection algorithm</span>
<span id="L45"><span class="lineNum"> 45</span> : FeatureSelect* featureSelector = nullptr;</span>
<span id="L46"><span class="lineNum"> 46</span> : double threshold = -1;</span>
<span id="L47"><span class="lineNum"> 47</span> : bool block_update = false;</span>
<span id="L48"><span class="lineNum"> 48</span> : };</span>
<span id="L49"><span class="lineNum"> 49</span> : }</span>
<span id="L50"><span class="lineNum"> 50</span> : #endif</span>
</pre>
</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,175 +65,175 @@
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L212">_ZNK8bayesnet8Ensemble17getNumberOfStatesEv</a></td>
<td class="coverFnHi">1</td>
<td class="coverFnHi">11</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L178">_ZNK8bayesnet8Ensemble4showB5cxx11Ev</a></td>
<td class="coverFnHi">1</td>
<td class="coverFnHi">11</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L187">_ZNK8bayesnet8Ensemble5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">3</td>
<td class="coverFnHi">33</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L14">_ZN8bayesnet8Ensemble10trainModelERKN2at6TensorE</a></td>
<td class="coverFnHi">6</td>
<td class="coverFnHi">66</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L204">_ZNK8bayesnet8Ensemble16getNumberOfEdgesEv</a></td>
<td class="coverFnHi">6</td>
<td class="coverFnHi">70</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L196">_ZNK8bayesnet8Ensemble16getNumberOfNodesEv</a></td>
<td class="coverFnHi">6</td>
<td class="coverFnHi">70</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L131">_ZN8bayesnet8Ensemble22predict_average_votingERSt6vectorIS1_IiSaIiEESaIS3_EE</a></td>
<td class="coverFnHi">7</td>
<td class="coverFnHi">82</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L102">_ZN8bayesnet8Ensemble21predict_average_probaERSt6vectorIS1_IiSaIiEESaIS3_EE</a></td>
<td class="coverFnHi">11</td>
<td class="coverFnHi">120</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L167">_ZN8bayesnet8Ensemble5scoreERSt6vectorIS1_IiSaIiEESaIS3_EERS3_</a></td>
<td class="coverFnHi">12</td>
<td class="coverFnHi">134</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L22">_ZN8bayesnet8Ensemble15compute_arg_maxERSt6vectorIS1_IdSaIdEESaIS3_EE</a></td>
<td class="coverFnHi">13</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L156">_ZN8bayesnet8Ensemble5scoreERN2at6TensorES3_</a></td>
<td class="coverFnHi">16</td>
<td class="coverFnHi">145</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L72">_ZN8bayesnet8Ensemble7predictERSt6vectorIS1_IiSaIiEESaIS3_EE</a></td>
<td class="coverFnHi">16</td>
<td class="coverFnHi">178</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L138">_ZN8bayesnet8Ensemble22predict_average_votingERN2at6TensorE</a></td>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L156">_ZN8bayesnet8Ensemble5scoreERN2at6TensorES3_</a></td>
<td class="coverFnHi">20</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L36">_ZN8bayesnet8Ensemble6votingERN2at6TensorE</a></td>
<td class="coverFnHi">20</td>
<td class="coverFnHi">194</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L58">_ZN8bayesnet8Ensemble13predict_probaERSt6vectorIS1_IiSaIiEESaIS3_EE</a></td>
<td class="coverFnHi">24</td>
<td class="coverFnHi">268</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L82">_ZN8bayesnet8Ensemble21predict_average_probaERN2at6TensorE</a></td>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L138">_ZN8bayesnet8Ensemble22predict_average_votingERN2at6TensorE</a></td>
<td class="coverFnHi">63</td>
<td class="coverFnHi">291</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L36">_ZN8bayesnet8Ensemble6votingERN2at6TensorE</a></td>
<td class="coverFnHi">291</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L109">_ZZN8bayesnet8Ensemble21predict_average_probaERSt6vectorIS1_IiSaIiEESaIS3_EEENKUlvE_clEv</a></td>
<td class="coverFnHi">63</td>
<td class="coverFnHi">722</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L31">_ZN8bayesnet8Ensemble15compute_arg_maxERN2at6TensorE</a></td>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L82">_ZN8bayesnet8Ensemble21predict_average_probaERN2at6TensorE</a></td>
<td class="coverFnHi">75</td>
<td class="coverFnHi">735</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L9">_ZN8bayesnet8EnsembleC2Eb</a></td>
<td class="coverFnHi">77</td>
<td class="coverFnHi">864</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L31">_ZN8bayesnet8Ensemble15compute_arg_maxERN2at6TensorE</a></td>
<td class="coverFnHi">933</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L77">_ZN8bayesnet8Ensemble7predictERN2at6TensorE</a></td>
<td class="coverFnHi">78</td>
<td class="coverFnHi">966</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L65">_ZN8bayesnet8Ensemble13predict_probaERN2at6TensorE</a></td>
<td class="coverFnHi">82</td>
<td class="coverFnHi">1010</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L145">_ZZN8bayesnet8Ensemble22predict_average_votingERN2at6TensorEENKUlvE_clEv</a></td>
<td class="coverFnHi">114</td>
<td class="coverFnHi">1668</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L89">_ZZN8bayesnet8Ensemble21predict_average_probaERN2at6TensorEENKUlvE_clEv</a></td>
<td class="coverFnHi">302</td>
<td class="coverFnHi">3518</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L127">_ZZN8bayesnet8Ensemble21predict_average_probaERSt6vectorIS1_IiSaIiEESaIS3_EEENKUldE_clEd</a></td>
<td class="coverFnHi">8606</td>
<td class="coverFnHi">98260</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L117">_ZZZN8bayesnet8Ensemble21predict_average_probaERSt6vectorIS1_IiSaIiEESaIS3_EEENKUlvE_clEvENKUlddE_clEdd</a></td>
<td class="coverFnHi">65366</td>
<td class="coverFnHi">756136</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,175 +65,175 @@
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L14">_ZN8bayesnet8Ensemble10trainModelERKN2at6TensorE</a></td>
<td class="coverFnHi">6</td>
<td class="coverFnHi">66</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L65">_ZN8bayesnet8Ensemble13predict_probaERN2at6TensorE</a></td>
<td class="coverFnHi">82</td>
<td class="coverFnHi">1010</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L58">_ZN8bayesnet8Ensemble13predict_probaERSt6vectorIS1_IiSaIiEESaIS3_EE</a></td>
<td class="coverFnHi">24</td>
<td class="coverFnHi">268</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L31">_ZN8bayesnet8Ensemble15compute_arg_maxERN2at6TensorE</a></td>
<td class="coverFnHi">75</td>
<td class="coverFnHi">933</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L22">_ZN8bayesnet8Ensemble15compute_arg_maxERSt6vectorIS1_IdSaIdEESaIS3_EE</a></td>
<td class="coverFnHi">13</td>
<td class="coverFnHi">145</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L82">_ZN8bayesnet8Ensemble21predict_average_probaERN2at6TensorE</a></td>
<td class="coverFnHi">63</td>
<td class="coverFnHi">735</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L102">_ZN8bayesnet8Ensemble21predict_average_probaERSt6vectorIS1_IiSaIiEESaIS3_EE</a></td>
<td class="coverFnHi">11</td>
<td class="coverFnHi">120</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L138">_ZN8bayesnet8Ensemble22predict_average_votingERN2at6TensorE</a></td>
<td class="coverFnHi">20</td>
<td class="coverFnHi">291</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L131">_ZN8bayesnet8Ensemble22predict_average_votingERSt6vectorIS1_IiSaIiEESaIS3_EE</a></td>
<td class="coverFnHi">7</td>
<td class="coverFnHi">82</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L156">_ZN8bayesnet8Ensemble5scoreERN2at6TensorES3_</a></td>
<td class="coverFnHi">16</td>
<td class="coverFnHi">194</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L167">_ZN8bayesnet8Ensemble5scoreERSt6vectorIS1_IiSaIiEESaIS3_EERS3_</a></td>
<td class="coverFnHi">12</td>
<td class="coverFnHi">134</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L36">_ZN8bayesnet8Ensemble6votingERN2at6TensorE</a></td>
<td class="coverFnHi">20</td>
<td class="coverFnHi">291</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L77">_ZN8bayesnet8Ensemble7predictERN2at6TensorE</a></td>
<td class="coverFnHi">78</td>
<td class="coverFnHi">966</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L72">_ZN8bayesnet8Ensemble7predictERSt6vectorIS1_IiSaIiEESaIS3_EE</a></td>
<td class="coverFnHi">16</td>
<td class="coverFnHi">178</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L9">_ZN8bayesnet8EnsembleC2Eb</a></td>
<td class="coverFnHi">77</td>
<td class="coverFnHi">864</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L204">_ZNK8bayesnet8Ensemble16getNumberOfEdgesEv</a></td>
<td class="coverFnHi">6</td>
<td class="coverFnHi">70</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L196">_ZNK8bayesnet8Ensemble16getNumberOfNodesEv</a></td>
<td class="coverFnHi">6</td>
<td class="coverFnHi">70</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L212">_ZNK8bayesnet8Ensemble17getNumberOfStatesEv</a></td>
<td class="coverFnHi">1</td>
<td class="coverFnHi">11</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L178">_ZNK8bayesnet8Ensemble4showB5cxx11Ev</a></td>
<td class="coverFnHi">1</td>
<td class="coverFnHi">11</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L187">_ZNK8bayesnet8Ensemble5graphERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE</a></td>
<td class="coverFnHi">3</td>
<td class="coverFnHi">33</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L89">_ZZN8bayesnet8Ensemble21predict_average_probaERN2at6TensorEENKUlvE_clEv</a></td>
<td class="coverFnHi">302</td>
<td class="coverFnHi">3518</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L127">_ZZN8bayesnet8Ensemble21predict_average_probaERSt6vectorIS1_IiSaIiEESaIS3_EEENKUldE_clEd</a></td>
<td class="coverFnHi">8606</td>
<td class="coverFnHi">98260</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L109">_ZZN8bayesnet8Ensemble21predict_average_probaERSt6vectorIS1_IiSaIiEESaIS3_EEENKUlvE_clEv</a></td>
<td class="coverFnHi">63</td>
<td class="coverFnHi">722</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L145">_ZZN8bayesnet8Ensemble22predict_average_votingERN2at6TensorEENKUlvE_clEv</a></td>
<td class="coverFnHi">114</td>
<td class="coverFnHi">1668</td>
</tr>
<tr>
<td class="coverFn"><a href="Ensemble.cc.gcov.html#L117">_ZZZN8bayesnet8Ensemble21predict_average_probaERSt6vectorIS1_IiSaIiEESaIS3_EEENKUlvE_clEvENKUlddE_clEdd</a></td>
<td class="coverFnHi">65366</td>
<td class="coverFnHi">756136</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -70,216 +70,216 @@
<span id="L8"><span class="lineNum"> 8</span> : </span>
<span id="L9"><span class="lineNum"> 9</span> : namespace bayesnet {</span>
<span id="L10"><span class="lineNum"> 10</span> : </span>
<span id="L11"><span class="lineNum"> 11</span> <span class="tlaGNC tlaBgGNC"> 77 : Ensemble::Ensemble(bool predict_voting) : Classifier(Network()), n_models(0), predict_voting(predict_voting)</span></span>
<span id="L11"><span class="lineNum"> 11</span> <span class="tlaGNC tlaBgGNC"> 864 : Ensemble::Ensemble(bool predict_voting) : Classifier(Network()), n_models(0), predict_voting(predict_voting)</span></span>
<span id="L12"><span class="lineNum"> 12</span> : {</span>
<span id="L13"><span class="lineNum"> 13</span> : </span>
<span id="L14"><span class="lineNum"> 14</span> <span class="tlaGNC"> 77 : };</span></span>
<span id="L14"><span class="lineNum"> 14</span> <span class="tlaGNC"> 864 : };</span></span>
<span id="L15"><span class="lineNum"> 15</span> : const std::string ENSEMBLE_NOT_FITTED = &quot;Ensemble has not been fitted&quot;;</span>
<span id="L16"><span class="lineNum"> 16</span> <span class="tlaGNC"> 6 : void Ensemble::trainModel(const torch::Tensor&amp; weights)</span></span>
<span id="L16"><span class="lineNum"> 16</span> <span class="tlaGNC"> 66 : void Ensemble::trainModel(const torch::Tensor&amp; weights)</span></span>
<span id="L17"><span class="lineNum"> 17</span> : {</span>
<span id="L18"><span class="lineNum"> 18</span> <span class="tlaGNC"> 6 : n_models = models.size();</span></span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 47 : for (auto i = 0; i &lt; n_models; ++i) {</span></span>
<span id="L18"><span class="lineNum"> 18</span> <span class="tlaGNC"> 66 : n_models = models.size();</span></span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 517 : for (auto i = 0; i &lt; n_models; ++i) {</span></span>
<span id="L20"><span class="lineNum"> 20</span> : // fit with std::vectors</span>
<span id="L21"><span class="lineNum"> 21</span> <span class="tlaGNC"> 41 : models[i]-&gt;fit(dataset, features, className, states);</span></span>
<span id="L21"><span class="lineNum"> 21</span> <span class="tlaGNC"> 451 : models[i]-&gt;fit(dataset, features, className, states);</span></span>
<span id="L22"><span class="lineNum"> 22</span> : }</span>
<span id="L23"><span class="lineNum"> 23</span> <span class="tlaGNC"> 6 : }</span></span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 13 : std::vector&lt;int&gt; Ensemble::compute_arg_max(std::vector&lt;std::vector&lt;double&gt;&gt;&amp; X)</span></span>
<span id="L23"><span class="lineNum"> 23</span> <span class="tlaGNC"> 66 : }</span></span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 145 : std::vector&lt;int&gt; Ensemble::compute_arg_max(std::vector&lt;std::vector&lt;double&gt;&gt;&amp; X)</span></span>
<span id="L25"><span class="lineNum"> 25</span> : {</span>
<span id="L26"><span class="lineNum"> 26</span> <span class="tlaGNC"> 13 : std::vector&lt;int&gt; y_pred;</span></span>
<span id="L27"><span class="lineNum"> 27</span> <span class="tlaGNC"> 2843 : for (auto i = 0; i &lt; X.size(); ++i) {</span></span>
<span id="L28"><span class="lineNum"> 28</span> <span class="tlaGNC"> 2830 : auto max = std::max_element(X[i].begin(), X[i].end());</span></span>
<span id="L29"><span class="lineNum"> 29</span> <span class="tlaGNC"> 5660 : y_pred.push_back(std::distance(X[i].begin(), max));</span></span>
<span id="L26"><span class="lineNum"> 26</span> <span class="tlaGNC"> 145 : std::vector&lt;int&gt; y_pred;</span></span>
<span id="L27"><span class="lineNum"> 27</span> <span class="tlaGNC"> 33363 : for (auto i = 0; i &lt; X.size(); ++i) {</span></span>
<span id="L28"><span class="lineNum"> 28</span> <span class="tlaGNC"> 33218 : auto max = std::max_element(X[i].begin(), X[i].end());</span></span>
<span id="L29"><span class="lineNum"> 29</span> <span class="tlaGNC"> 66436 : y_pred.push_back(std::distance(X[i].begin(), max));</span></span>
<span id="L30"><span class="lineNum"> 30</span> : }</span>
<span id="L31"><span class="lineNum"> 31</span> <span class="tlaGNC"> 13 : return y_pred;</span></span>
<span id="L31"><span class="lineNum"> 31</span> <span class="tlaGNC"> 145 : return y_pred;</span></span>
<span id="L32"><span class="lineNum"> 32</span> <span class="tlaUNC tlaBgUNC"> 0 : }</span></span>
<span id="L33"><span class="lineNum"> 33</span> <span class="tlaGNC tlaBgGNC"> 75 : torch::Tensor Ensemble::compute_arg_max(torch::Tensor&amp; X)</span></span>
<span id="L33"><span class="lineNum"> 33</span> <span class="tlaGNC tlaBgGNC"> 933 : torch::Tensor Ensemble::compute_arg_max(torch::Tensor&amp; X)</span></span>
<span id="L34"><span class="lineNum"> 34</span> : {</span>
<span id="L35"><span class="lineNum"> 35</span> <span class="tlaGNC"> 75 : auto y_pred = torch::argmax(X, 1);</span></span>
<span id="L36"><span class="lineNum"> 36</span> <span class="tlaGNC"> 75 : return y_pred;</span></span>
<span id="L35"><span class="lineNum"> 35</span> <span class="tlaGNC"> 933 : auto y_pred = torch::argmax(X, 1);</span></span>
<span id="L36"><span class="lineNum"> 36</span> <span class="tlaGNC"> 933 : return y_pred;</span></span>
<span id="L37"><span class="lineNum"> 37</span> : }</span>
<span id="L38"><span class="lineNum"> 38</span> <span class="tlaGNC"> 20 : torch::Tensor Ensemble::voting(torch::Tensor&amp; votes)</span></span>
<span id="L38"><span class="lineNum"> 38</span> <span class="tlaGNC"> 291 : torch::Tensor Ensemble::voting(torch::Tensor&amp; votes)</span></span>
<span id="L39"><span class="lineNum"> 39</span> : {</span>
<span id="L40"><span class="lineNum"> 40</span> : // Convert m x n_models tensor to a m x n_class_states with voting probabilities</span>
<span id="L41"><span class="lineNum"> 41</span> <span class="tlaGNC"> 20 : auto y_pred_ = votes.accessor&lt;int, 2&gt;();</span></span>
<span id="L42"><span class="lineNum"> 42</span> <span class="tlaGNC"> 20 : std::vector&lt;int&gt; y_pred_final;</span></span>
<span id="L43"><span class="lineNum"> 43</span> <span class="tlaGNC"> 20 : int numClasses = states.at(className).size();</span></span>
<span id="L41"><span class="lineNum"> 41</span> <span class="tlaGNC"> 291 : auto y_pred_ = votes.accessor&lt;int, 2&gt;();</span></span>
<span id="L42"><span class="lineNum"> 42</span> <span class="tlaGNC"> 291 : std::vector&lt;int&gt; y_pred_final;</span></span>
<span id="L43"><span class="lineNum"> 43</span> <span class="tlaGNC"> 291 : int numClasses = states.at(className).size();</span></span>
<span id="L44"><span class="lineNum"> 44</span> : // votes is m x n_models with the prediction of every model for each sample</span>
<span id="L45"><span class="lineNum"> 45</span> <span class="tlaGNC"> 20 : auto result = torch::zeros({ votes.size(0), numClasses }, torch::kFloat32);</span></span>
<span id="L46"><span class="lineNum"> 46</span> <span class="tlaGNC"> 20 : auto sum = std::reduce(significanceModels.begin(), significanceModels.end());</span></span>
<span id="L47"><span class="lineNum"> 47</span> <span class="tlaGNC"> 5364 : for (int i = 0; i &lt; votes.size(0); ++i) {</span></span>
<span id="L45"><span class="lineNum"> 45</span> <span class="tlaGNC"> 291 : auto result = torch::zeros({ votes.size(0), numClasses }, torch::kFloat32);</span></span>
<span id="L46"><span class="lineNum"> 46</span> <span class="tlaGNC"> 291 : auto sum = std::reduce(significanceModels.begin(), significanceModels.end());</span></span>
<span id="L47"><span class="lineNum"> 47</span> <span class="tlaGNC"> 69474 : for (int i = 0; i &lt; votes.size(0); ++i) {</span></span>
<span id="L48"><span class="lineNum"> 48</span> : // n_votes store in each index (value of class) the significance added by each model</span>
<span id="L49"><span class="lineNum"> 49</span> : // i.e. n_votes[0] contains how much value has the value 0 of class. That value is generated by the models predictions</span>
<span id="L50"><span class="lineNum"> 50</span> <span class="tlaGNC"> 5344 : std::vector&lt;double&gt; n_votes(numClasses, 0.0);</span></span>
<span id="L51"><span class="lineNum"> 51</span> <span class="tlaGNC"> 42310 : for (int j = 0; j &lt; n_models; ++j) {</span></span>
<span id="L52"><span class="lineNum"> 52</span> <span class="tlaGNC"> 36966 : n_votes[y_pred_[i][j]] += significanceModels.at(j);</span></span>
<span id="L50"><span class="lineNum"> 50</span> <span class="tlaGNC"> 69183 : std::vector&lt;double&gt; n_votes(numClasses, 0.0);</span></span>
<span id="L51"><span class="lineNum"> 51</span> <span class="tlaGNC"> 541708 : for (int j = 0; j &lt; n_models; ++j) {</span></span>
<span id="L52"><span class="lineNum"> 52</span> <span class="tlaGNC"> 472525 : n_votes[y_pred_[i][j]] += significanceModels.at(j);</span></span>
<span id="L53"><span class="lineNum"> 53</span> : }</span>
<span id="L54"><span class="lineNum"> 54</span> <span class="tlaGNC"> 5344 : result[i] = torch::tensor(n_votes);</span></span>
<span id="L55"><span class="lineNum"> 55</span> <span class="tlaGNC"> 5344 : }</span></span>
<span id="L54"><span class="lineNum"> 54</span> <span class="tlaGNC"> 69183 : result[i] = torch::tensor(n_votes);</span></span>
<span id="L55"><span class="lineNum"> 55</span> <span class="tlaGNC"> 69183 : }</span></span>
<span id="L56"><span class="lineNum"> 56</span> : // To only do one division and gain precision</span>
<span id="L57"><span class="lineNum"> 57</span> <span class="tlaGNC"> 20 : result /= sum;</span></span>
<span id="L58"><span class="lineNum"> 58</span> <span class="tlaGNC"> 40 : return result;</span></span>
<span id="L59"><span class="lineNum"> 59</span> <span class="tlaGNC"> 20 : }</span></span>
<span id="L60"><span class="lineNum"> 60</span> <span class="tlaGNC"> 24 : std::vector&lt;std::vector&lt;double&gt;&gt; Ensemble::predict_proba(std::vector&lt;std::vector&lt;int&gt;&gt;&amp; X)</span></span>
<span id="L57"><span class="lineNum"> 57</span> <span class="tlaGNC"> 291 : result /= sum;</span></span>
<span id="L58"><span class="lineNum"> 58</span> <span class="tlaGNC"> 582 : return result;</span></span>
<span id="L59"><span class="lineNum"> 59</span> <span class="tlaGNC"> 291 : }</span></span>
<span id="L60"><span class="lineNum"> 60</span> <span class="tlaGNC"> 268 : std::vector&lt;std::vector&lt;double&gt;&gt; Ensemble::predict_proba(std::vector&lt;std::vector&lt;int&gt;&gt;&amp; X)</span></span>
<span id="L61"><span class="lineNum"> 61</span> : {</span>
<span id="L62"><span class="lineNum"> 62</span> <span class="tlaGNC"> 24 : if (!fitted) {</span></span>
<span id="L63"><span class="lineNum"> 63</span> <span class="tlaGNC"> 6 : throw std::logic_error(ENSEMBLE_NOT_FITTED);</span></span>
<span id="L62"><span class="lineNum"> 62</span> <span class="tlaGNC"> 268 : if (!fitted) {</span></span>
<span id="L63"><span class="lineNum"> 63</span> <span class="tlaGNC"> 66 : throw std::logic_error(ENSEMBLE_NOT_FITTED);</span></span>
<span id="L64"><span class="lineNum"> 64</span> : }</span>
<span id="L65"><span class="lineNum"> 65</span> <span class="tlaGNC"> 18 : return predict_voting ? predict_average_voting(X) : predict_average_proba(X);</span></span>
<span id="L65"><span class="lineNum"> 65</span> <span class="tlaGNC"> 202 : return predict_voting ? predict_average_voting(X) : predict_average_proba(X);</span></span>
<span id="L66"><span class="lineNum"> 66</span> : }</span>
<span id="L67"><span class="lineNum"> 67</span> <span class="tlaGNC"> 82 : torch::Tensor Ensemble::predict_proba(torch::Tensor&amp; X)</span></span>
<span id="L67"><span class="lineNum"> 67</span> <span class="tlaGNC"> 1010 : torch::Tensor Ensemble::predict_proba(torch::Tensor&amp; X)</span></span>
<span id="L68"><span class="lineNum"> 68</span> : {</span>
<span id="L69"><span class="lineNum"> 69</span> <span class="tlaGNC"> 82 : if (!fitted) {</span></span>
<span id="L70"><span class="lineNum"> 70</span> <span class="tlaGNC"> 6 : throw std::logic_error(ENSEMBLE_NOT_FITTED);</span></span>
<span id="L69"><span class="lineNum"> 69</span> <span class="tlaGNC"> 1010 : if (!fitted) {</span></span>
<span id="L70"><span class="lineNum"> 70</span> <span class="tlaGNC"> 66 : throw std::logic_error(ENSEMBLE_NOT_FITTED);</span></span>
<span id="L71"><span class="lineNum"> 71</span> : }</span>
<span id="L72"><span class="lineNum"> 72</span> <span class="tlaGNC"> 76 : return predict_voting ? predict_average_voting(X) : predict_average_proba(X);</span></span>
<span id="L72"><span class="lineNum"> 72</span> <span class="tlaGNC"> 944 : return predict_voting ? predict_average_voting(X) : predict_average_proba(X);</span></span>
<span id="L73"><span class="lineNum"> 73</span> : }</span>
<span id="L74"><span class="lineNum"> 74</span> <span class="tlaGNC"> 16 : std::vector&lt;int&gt; Ensemble::predict(std::vector&lt;std::vector&lt;int&gt;&gt;&amp; X)</span></span>
<span id="L74"><span class="lineNum"> 74</span> <span class="tlaGNC"> 178 : std::vector&lt;int&gt; Ensemble::predict(std::vector&lt;std::vector&lt;int&gt;&gt;&amp; X)</span></span>
<span id="L75"><span class="lineNum"> 75</span> : {</span>
<span id="L76"><span class="lineNum"> 76</span> <span class="tlaGNC"> 16 : auto res = predict_proba(X);</span></span>
<span id="L77"><span class="lineNum"> 77</span> <span class="tlaGNC"> 24 : return compute_arg_max(res);</span></span>
<span id="L78"><span class="lineNum"> 78</span> <span class="tlaGNC"> 12 : }</span></span>
<span id="L79"><span class="lineNum"> 79</span> <span class="tlaGNC"> 78 : torch::Tensor Ensemble::predict(torch::Tensor&amp; X)</span></span>
<span id="L76"><span class="lineNum"> 76</span> <span class="tlaGNC"> 178 : auto res = predict_proba(X);</span></span>
<span id="L77"><span class="lineNum"> 77</span> <span class="tlaGNC"> 268 : return compute_arg_max(res);</span></span>
<span id="L78"><span class="lineNum"> 78</span> <span class="tlaGNC"> 134 : }</span></span>
<span id="L79"><span class="lineNum"> 79</span> <span class="tlaGNC"> 966 : torch::Tensor Ensemble::predict(torch::Tensor&amp; X)</span></span>
<span id="L80"><span class="lineNum"> 80</span> : {</span>
<span id="L81"><span class="lineNum"> 81</span> <span class="tlaGNC"> 78 : auto res = predict_proba(X);</span></span>
<span id="L82"><span class="lineNum"> 82</span> <span class="tlaGNC"> 148 : return compute_arg_max(res);</span></span>
<span id="L83"><span class="lineNum"> 83</span> <span class="tlaGNC"> 74 : }</span></span>
<span id="L84"><span class="lineNum"> 84</span> <span class="tlaGNC"> 63 : torch::Tensor Ensemble::predict_average_proba(torch::Tensor&amp; X)</span></span>
<span id="L81"><span class="lineNum"> 81</span> <span class="tlaGNC"> 966 : auto res = predict_proba(X);</span></span>
<span id="L82"><span class="lineNum"> 82</span> <span class="tlaGNC"> 1844 : return compute_arg_max(res);</span></span>
<span id="L83"><span class="lineNum"> 83</span> <span class="tlaGNC"> 922 : }</span></span>
<span id="L84"><span class="lineNum"> 84</span> <span class="tlaGNC"> 735 : torch::Tensor Ensemble::predict_average_proba(torch::Tensor&amp; X)</span></span>
<span id="L85"><span class="lineNum"> 85</span> : {</span>
<span id="L86"><span class="lineNum"> 86</span> <span class="tlaGNC"> 63 : auto n_states = models[0]-&gt;getClassNumStates();</span></span>
<span id="L87"><span class="lineNum"> 87</span> <span class="tlaGNC"> 63 : torch::Tensor y_pred = torch::zeros({ X.size(1), n_states }, torch::kFloat32);</span></span>
<span id="L88"><span class="lineNum"> 88</span> <span class="tlaGNC"> 63 : auto threads{ std::vector&lt;std::thread&gt;() };</span></span>
<span id="L89"><span class="lineNum"> 89</span> <span class="tlaGNC"> 63 : std::mutex mtx;</span></span>
<span id="L90"><span class="lineNum"> 90</span> <span class="tlaGNC"> 365 : for (auto i = 0; i &lt; n_models; ++i) {</span></span>
<span id="L91"><span class="lineNum"> 91</span> <span class="tlaGNC"> 302 : threads.push_back(std::thread([&amp;, i]() {</span></span>
<span id="L92"><span class="lineNum"> 92</span> <span class="tlaGNC"> 302 : auto ypredict = models[i]-&gt;predict_proba(X);</span></span>
<span id="L93"><span class="lineNum"> 93</span> <span class="tlaGNC"> 302 : std::lock_guard&lt;std::mutex&gt; lock(mtx);</span></span>
<span id="L94"><span class="lineNum"> 94</span> <span class="tlaGNC"> 302 : y_pred += ypredict * significanceModels[i];</span></span>
<span id="L95"><span class="lineNum"> 95</span> <span class="tlaGNC"> 302 : }));</span></span>
<span id="L86"><span class="lineNum"> 86</span> <span class="tlaGNC"> 735 : auto n_states = models[0]-&gt;getClassNumStates();</span></span>
<span id="L87"><span class="lineNum"> 87</span> <span class="tlaGNC"> 735 : torch::Tensor y_pred = torch::zeros({ X.size(1), n_states }, torch::kFloat32);</span></span>
<span id="L88"><span class="lineNum"> 88</span> <span class="tlaGNC"> 735 : auto threads{ std::vector&lt;std::thread&gt;() };</span></span>
<span id="L89"><span class="lineNum"> 89</span> <span class="tlaGNC"> 735 : std::mutex mtx;</span></span>
<span id="L90"><span class="lineNum"> 90</span> <span class="tlaGNC"> 4253 : for (auto i = 0; i &lt; n_models; ++i) {</span></span>
<span id="L91"><span class="lineNum"> 91</span> <span class="tlaGNC"> 3518 : threads.push_back(std::thread([&amp;, i]() {</span></span>
<span id="L92"><span class="lineNum"> 92</span> <span class="tlaGNC"> 3518 : auto ypredict = models[i]-&gt;predict_proba(X);</span></span>
<span id="L93"><span class="lineNum"> 93</span> <span class="tlaGNC"> 3518 : std::lock_guard&lt;std::mutex&gt; lock(mtx);</span></span>
<span id="L94"><span class="lineNum"> 94</span> <span class="tlaGNC"> 3518 : y_pred += ypredict * significanceModels[i];</span></span>
<span id="L95"><span class="lineNum"> 95</span> <span class="tlaGNC"> 3518 : }));</span></span>
<span id="L96"><span class="lineNum"> 96</span> : }</span>
<span id="L97"><span class="lineNum"> 97</span> <span class="tlaGNC"> 365 : for (auto&amp; thread : threads) {</span></span>
<span id="L98"><span class="lineNum"> 98</span> <span class="tlaGNC"> 302 : thread.join();</span></span>
<span id="L97"><span class="lineNum"> 97</span> <span class="tlaGNC"> 4253 : for (auto&amp; thread : threads) {</span></span>
<span id="L98"><span class="lineNum"> 98</span> <span class="tlaGNC"> 3518 : thread.join();</span></span>
<span id="L99"><span class="lineNum"> 99</span> : }</span>
<span id="L100"><span class="lineNum"> 100</span> <span class="tlaGNC"> 63 : auto sum = std::reduce(significanceModels.begin(), significanceModels.end());</span></span>
<span id="L101"><span class="lineNum"> 101</span> <span class="tlaGNC"> 63 : y_pred /= sum;</span></span>
<span id="L102"><span class="lineNum"> 102</span> <span class="tlaGNC"> 126 : return y_pred;</span></span>
<span id="L103"><span class="lineNum"> 103</span> <span class="tlaGNC"> 63 : }</span></span>
<span id="L104"><span class="lineNum"> 104</span> <span class="tlaGNC"> 11 : std::vector&lt;std::vector&lt;double&gt;&gt; Ensemble::predict_average_proba(std::vector&lt;std::vector&lt;int&gt;&gt;&amp; X)</span></span>
<span id="L100"><span class="lineNum"> 100</span> <span class="tlaGNC"> 735 : auto sum = std::reduce(significanceModels.begin(), significanceModels.end());</span></span>
<span id="L101"><span class="lineNum"> 101</span> <span class="tlaGNC"> 735 : y_pred /= sum;</span></span>
<span id="L102"><span class="lineNum"> 102</span> <span class="tlaGNC"> 1470 : return y_pred;</span></span>
<span id="L103"><span class="lineNum"> 103</span> <span class="tlaGNC"> 735 : }</span></span>
<span id="L104"><span class="lineNum"> 104</span> <span class="tlaGNC"> 120 : std::vector&lt;std::vector&lt;double&gt;&gt; Ensemble::predict_average_proba(std::vector&lt;std::vector&lt;int&gt;&gt;&amp; X)</span></span>
<span id="L105"><span class="lineNum"> 105</span> : {</span>
<span id="L106"><span class="lineNum"> 106</span> <span class="tlaGNC"> 11 : auto n_states = models[0]-&gt;getClassNumStates();</span></span>
<span id="L107"><span class="lineNum"> 107</span> <span class="tlaGNC"> 11 : std::vector&lt;std::vector&lt;double&gt;&gt; y_pred(X[0].size(), std::vector&lt;double&gt;(n_states, 0.0));</span></span>
<span id="L108"><span class="lineNum"> 108</span> <span class="tlaGNC"> 11 : auto threads{ std::vector&lt;std::thread&gt;() };</span></span>
<span id="L109"><span class="lineNum"> 109</span> <span class="tlaGNC"> 11 : std::mutex mtx;</span></span>
<span id="L110"><span class="lineNum"> 110</span> <span class="tlaGNC"> 74 : for (auto i = 0; i &lt; n_models; ++i) {</span></span>
<span id="L111"><span class="lineNum"> 111</span> <span class="tlaGNC"> 63 : threads.push_back(std::thread([&amp;, i]() {</span></span>
<span id="L112"><span class="lineNum"> 112</span> <span class="tlaGNC"> 63 : auto ypredict = models[i]-&gt;predict_proba(X);</span></span>
<span id="L113"><span class="lineNum"> 113</span> <span class="tlaGNC"> 63 : assert(ypredict.size() == y_pred.size());</span></span>
<span id="L114"><span class="lineNum"> 114</span> <span class="tlaGNC"> 63 : assert(ypredict[0].size() == y_pred[0].size());</span></span>
<span id="L115"><span class="lineNum"> 115</span> <span class="tlaGNC"> 63 : std::lock_guard&lt;std::mutex&gt; lock(mtx);</span></span>
<span id="L106"><span class="lineNum"> 106</span> <span class="tlaGNC"> 120 : auto n_states = models[0]-&gt;getClassNumStates();</span></span>
<span id="L107"><span class="lineNum"> 107</span> <span class="tlaGNC"> 120 : std::vector&lt;std::vector&lt;double&gt;&gt; y_pred(X[0].size(), std::vector&lt;double&gt;(n_states, 0.0));</span></span>
<span id="L108"><span class="lineNum"> 108</span> <span class="tlaGNC"> 120 : auto threads{ std::vector&lt;std::thread&gt;() };</span></span>
<span id="L109"><span class="lineNum"> 109</span> <span class="tlaGNC"> 120 : std::mutex mtx;</span></span>
<span id="L110"><span class="lineNum"> 110</span> <span class="tlaGNC"> 842 : for (auto i = 0; i &lt; n_models; ++i) {</span></span>
<span id="L111"><span class="lineNum"> 111</span> <span class="tlaGNC"> 722 : threads.push_back(std::thread([&amp;, i]() {</span></span>
<span id="L112"><span class="lineNum"> 112</span> <span class="tlaGNC"> 722 : auto ypredict = models[i]-&gt;predict_proba(X);</span></span>
<span id="L113"><span class="lineNum"> 113</span> <span class="tlaGNC"> 722 : assert(ypredict.size() == y_pred.size());</span></span>
<span id="L114"><span class="lineNum"> 114</span> <span class="tlaGNC"> 722 : assert(ypredict[0].size() == y_pred[0].size());</span></span>
<span id="L115"><span class="lineNum"> 115</span> <span class="tlaGNC"> 722 : std::lock_guard&lt;std::mutex&gt; lock(mtx);</span></span>
<span id="L116"><span class="lineNum"> 116</span> : // Multiply each prediction by the significance of the model and then add it to the final prediction</span>
<span id="L117"><span class="lineNum"> 117</span> <span class="tlaGNC"> 12479 : for (auto j = 0; j &lt; ypredict.size(); ++j) {</span></span>
<span id="L118"><span class="lineNum"> 118</span> <span class="tlaGNC"> 12416 : std::transform(y_pred[j].begin(), y_pred[j].end(), ypredict[j].begin(), y_pred[j].begin(),</span></span>
<span id="L119"><span class="lineNum"> 119</span> <span class="tlaGNC"> 77782 : [significanceModels = significanceModels[i]](double x, double y) { return x + y * significanceModels; });</span></span>
<span id="L117"><span class="lineNum"> 117</span> <span class="tlaGNC"> 143118 : for (auto j = 0; j &lt; ypredict.size(); ++j) {</span></span>
<span id="L118"><span class="lineNum"> 118</span> <span class="tlaGNC"> 142396 : std::transform(y_pred[j].begin(), y_pred[j].end(), ypredict[j].begin(), y_pred[j].begin(),</span></span>
<span id="L119"><span class="lineNum"> 119</span> <span class="tlaGNC"> 898532 : [significanceModels = significanceModels[i]](double x, double y) { return x + y * significanceModels; });</span></span>
<span id="L120"><span class="lineNum"> 120</span> : }</span>
<span id="L121"><span class="lineNum"> 121</span> <span class="tlaGNC"> 63 : }));</span></span>
<span id="L121"><span class="lineNum"> 121</span> <span class="tlaGNC"> 722 : }));</span></span>
<span id="L122"><span class="lineNum"> 122</span> : }</span>
<span id="L123"><span class="lineNum"> 123</span> <span class="tlaGNC"> 74 : for (auto&amp; thread : threads) {</span></span>
<span id="L124"><span class="lineNum"> 124</span> <span class="tlaGNC"> 63 : thread.join();</span></span>
<span id="L123"><span class="lineNum"> 123</span> <span class="tlaGNC"> 842 : for (auto&amp; thread : threads) {</span></span>
<span id="L124"><span class="lineNum"> 124</span> <span class="tlaGNC"> 722 : thread.join();</span></span>
<span id="L125"><span class="lineNum"> 125</span> : }</span>
<span id="L126"><span class="lineNum"> 126</span> <span class="tlaGNC"> 11 : auto sum = std::reduce(significanceModels.begin(), significanceModels.end());</span></span>
<span id="L126"><span class="lineNum"> 126</span> <span class="tlaGNC"> 120 : auto sum = std::reduce(significanceModels.begin(), significanceModels.end());</span></span>
<span id="L127"><span class="lineNum"> 127</span> : //Divide each element of the prediction by the sum of the significances</span>
<span id="L128"><span class="lineNum"> 128</span> <span class="tlaGNC"> 2067 : for (auto j = 0; j &lt; y_pred.size(); ++j) {</span></span>
<span id="L129"><span class="lineNum"> 129</span> <span class="tlaGNC"> 10662 : std::transform(y_pred[j].begin(), y_pred[j].end(), y_pred[j].begin(), [sum](double x) { return x / sum; });</span></span>
<span id="L128"><span class="lineNum"> 128</span> <span class="tlaGNC"> 22520 : for (auto j = 0; j &lt; y_pred.size(); ++j) {</span></span>
<span id="L129"><span class="lineNum"> 129</span> <span class="tlaGNC"> 120660 : std::transform(y_pred[j].begin(), y_pred[j].end(), y_pred[j].begin(), [sum](double x) { return x / sum; });</span></span>
<span id="L130"><span class="lineNum"> 130</span> : }</span>
<span id="L131"><span class="lineNum"> 131</span> <span class="tlaGNC"> 22 : return y_pred;</span></span>
<span id="L132"><span class="lineNum"> 132</span> <span class="tlaGNC"> 11 : }</span></span>
<span id="L133"><span class="lineNum"> 133</span> <span class="tlaGNC"> 7 : std::vector&lt;std::vector&lt;double&gt;&gt; Ensemble::predict_average_voting(std::vector&lt;std::vector&lt;int&gt;&gt;&amp; X)</span></span>
<span id="L131"><span class="lineNum"> 131</span> <span class="tlaGNC"> 240 : return y_pred;</span></span>
<span id="L132"><span class="lineNum"> 132</span> <span class="tlaGNC"> 120 : }</span></span>
<span id="L133"><span class="lineNum"> 133</span> <span class="tlaGNC"> 82 : std::vector&lt;std::vector&lt;double&gt;&gt; Ensemble::predict_average_voting(std::vector&lt;std::vector&lt;int&gt;&gt;&amp; X)</span></span>
<span id="L134"><span class="lineNum"> 134</span> : {</span>
<span id="L135"><span class="lineNum"> 135</span> <span class="tlaGNC"> 7 : torch::Tensor Xt = bayesnet::vectorToTensor(X, false);</span></span>
<span id="L136"><span class="lineNum"> 136</span> <span class="tlaGNC"> 7 : auto y_pred = predict_average_voting(Xt);</span></span>
<span id="L137"><span class="lineNum"> 137</span> <span class="tlaGNC"> 7 : std::vector&lt;std::vector&lt;double&gt;&gt; result = tensorToVectorDouble(y_pred);</span></span>
<span id="L138"><span class="lineNum"> 138</span> <span class="tlaGNC"> 14 : return result;</span></span>
<span id="L139"><span class="lineNum"> 139</span> <span class="tlaGNC"> 7 : }</span></span>
<span id="L140"><span class="lineNum"> 140</span> <span class="tlaGNC"> 20 : torch::Tensor Ensemble::predict_average_voting(torch::Tensor&amp; X)</span></span>
<span id="L135"><span class="lineNum"> 135</span> <span class="tlaGNC"> 82 : torch::Tensor Xt = bayesnet::vectorToTensor(X, false);</span></span>
<span id="L136"><span class="lineNum"> 136</span> <span class="tlaGNC"> 82 : auto y_pred = predict_average_voting(Xt);</span></span>
<span id="L137"><span class="lineNum"> 137</span> <span class="tlaGNC"> 82 : std::vector&lt;std::vector&lt;double&gt;&gt; result = tensorToVectorDouble(y_pred);</span></span>
<span id="L138"><span class="lineNum"> 138</span> <span class="tlaGNC"> 164 : return result;</span></span>
<span id="L139"><span class="lineNum"> 139</span> <span class="tlaGNC"> 82 : }</span></span>
<span id="L140"><span class="lineNum"> 140</span> <span class="tlaGNC"> 291 : torch::Tensor Ensemble::predict_average_voting(torch::Tensor&amp; X)</span></span>
<span id="L141"><span class="lineNum"> 141</span> : {</span>
<span id="L142"><span class="lineNum"> 142</span> : // Build a m x n_models tensor with the predictions of each model</span>
<span id="L143"><span class="lineNum"> 143</span> <span class="tlaGNC"> 20 : torch::Tensor y_pred = torch::zeros({ X.size(1), n_models }, torch::kInt32);</span></span>
<span id="L144"><span class="lineNum"> 144</span> <span class="tlaGNC"> 20 : auto threads{ std::vector&lt;std::thread&gt;() };</span></span>
<span id="L145"><span class="lineNum"> 145</span> <span class="tlaGNC"> 20 : std::mutex mtx;</span></span>
<span id="L146"><span class="lineNum"> 146</span> <span class="tlaGNC"> 134 : for (auto i = 0; i &lt; n_models; ++i) {</span></span>
<span id="L147"><span class="lineNum"> 147</span> <span class="tlaGNC"> 114 : threads.push_back(std::thread([&amp;, i]() {</span></span>
<span id="L148"><span class="lineNum"> 148</span> <span class="tlaGNC"> 114 : auto ypredict = models[i]-&gt;predict(X);</span></span>
<span id="L149"><span class="lineNum"> 149</span> <span class="tlaGNC"> 114 : std::lock_guard&lt;std::mutex&gt; lock(mtx);</span></span>
<span id="L150"><span class="lineNum"> 150</span> <span class="tlaGNC"> 342 : y_pred.index_put_({ &quot;...&quot;, i }, ypredict);</span></span>
<span id="L151"><span class="lineNum"> 151</span> <span class="tlaGNC"> 228 : }));</span></span>
<span id="L143"><span class="lineNum"> 143</span> <span class="tlaGNC"> 291 : torch::Tensor y_pred = torch::zeros({ X.size(1), n_models }, torch::kInt32);</span></span>
<span id="L144"><span class="lineNum"> 144</span> <span class="tlaGNC"> 291 : auto threads{ std::vector&lt;std::thread&gt;() };</span></span>
<span id="L145"><span class="lineNum"> 145</span> <span class="tlaGNC"> 291 : std::mutex mtx;</span></span>
<span id="L146"><span class="lineNum"> 146</span> <span class="tlaGNC"> 1959 : for (auto i = 0; i &lt; n_models; ++i) {</span></span>
<span id="L147"><span class="lineNum"> 147</span> <span class="tlaGNC"> 1668 : threads.push_back(std::thread([&amp;, i]() {</span></span>
<span id="L148"><span class="lineNum"> 148</span> <span class="tlaGNC"> 1668 : auto ypredict = models[i]-&gt;predict(X);</span></span>
<span id="L149"><span class="lineNum"> 149</span> <span class="tlaGNC"> 1668 : std::lock_guard&lt;std::mutex&gt; lock(mtx);</span></span>
<span id="L150"><span class="lineNum"> 150</span> <span class="tlaGNC"> 5004 : y_pred.index_put_({ &quot;...&quot;, i }, ypredict);</span></span>
<span id="L151"><span class="lineNum"> 151</span> <span class="tlaGNC"> 3336 : }));</span></span>
<span id="L152"><span class="lineNum"> 152</span> : }</span>
<span id="L153"><span class="lineNum"> 153</span> <span class="tlaGNC"> 134 : for (auto&amp; thread : threads) {</span></span>
<span id="L154"><span class="lineNum"> 154</span> <span class="tlaGNC"> 114 : thread.join();</span></span>
<span id="L153"><span class="lineNum"> 153</span> <span class="tlaGNC"> 1959 : for (auto&amp; thread : threads) {</span></span>
<span id="L154"><span class="lineNum"> 154</span> <span class="tlaGNC"> 1668 : thread.join();</span></span>
<span id="L155"><span class="lineNum"> 155</span> : }</span>
<span id="L156"><span class="lineNum"> 156</span> <span class="tlaGNC"> 40 : return voting(y_pred);</span></span>
<span id="L157"><span class="lineNum"> 157</span> <span class="tlaGNC"> 20 : }</span></span>
<span id="L158"><span class="lineNum"> 158</span> <span class="tlaGNC"> 16 : float Ensemble::score(torch::Tensor&amp; X, torch::Tensor&amp; y)</span></span>
<span id="L156"><span class="lineNum"> 156</span> <span class="tlaGNC"> 582 : return voting(y_pred);</span></span>
<span id="L157"><span class="lineNum"> 157</span> <span class="tlaGNC"> 291 : }</span></span>
<span id="L158"><span class="lineNum"> 158</span> <span class="tlaGNC"> 194 : float Ensemble::score(torch::Tensor&amp; X, torch::Tensor&amp; y)</span></span>
<span id="L159"><span class="lineNum"> 159</span> : {</span>
<span id="L160"><span class="lineNum"> 160</span> <span class="tlaGNC"> 16 : auto y_pred = predict(X);</span></span>
<span id="L161"><span class="lineNum"> 161</span> <span class="tlaGNC"> 14 : int correct = 0;</span></span>
<span id="L162"><span class="lineNum"> 162</span> <span class="tlaGNC"> 4746 : for (int i = 0; i &lt; y_pred.size(0); ++i) {</span></span>
<span id="L163"><span class="lineNum"> 163</span> <span class="tlaGNC"> 4732 : if (y_pred[i].item&lt;int&gt;() == y[i].item&lt;int&gt;()) {</span></span>
<span id="L164"><span class="lineNum"> 164</span> <span class="tlaGNC"> 4026 : correct++;</span></span>
<span id="L160"><span class="lineNum"> 160</span> <span class="tlaGNC"> 194 : auto y_pred = predict(X);</span></span>
<span id="L161"><span class="lineNum"> 161</span> <span class="tlaGNC"> 172 : int correct = 0;</span></span>
<span id="L162"><span class="lineNum"> 162</span> <span class="tlaGNC"> 53601 : for (int i = 0; i &lt; y_pred.size(0); ++i) {</span></span>
<span id="L163"><span class="lineNum"> 163</span> <span class="tlaGNC"> 53429 : if (y_pred[i].item&lt;int&gt;() == y[i].item&lt;int&gt;()) {</span></span>
<span id="L164"><span class="lineNum"> 164</span> <span class="tlaGNC"> 45279 : correct++;</span></span>
<span id="L165"><span class="lineNum"> 165</span> : }</span>
<span id="L166"><span class="lineNum"> 166</span> : }</span>
<span id="L167"><span class="lineNum"> 167</span> <span class="tlaGNC"> 28 : return (double)correct / y_pred.size(0);</span></span>
<span id="L168"><span class="lineNum"> 168</span> <span class="tlaGNC"> 14 : }</span></span>
<span id="L169"><span class="lineNum"> 169</span> <span class="tlaGNC"> 12 : float Ensemble::score(std::vector&lt;std::vector&lt;int&gt;&gt;&amp; X, std::vector&lt;int&gt;&amp; y)</span></span>
<span id="L167"><span class="lineNum"> 167</span> <span class="tlaGNC"> 344 : return (double)correct / y_pred.size(0);</span></span>
<span id="L168"><span class="lineNum"> 168</span> <span class="tlaGNC"> 172 : }</span></span>
<span id="L169"><span class="lineNum"> 169</span> <span class="tlaGNC"> 134 : float Ensemble::score(std::vector&lt;std::vector&lt;int&gt;&gt;&amp; X, std::vector&lt;int&gt;&amp; y)</span></span>
<span id="L170"><span class="lineNum"> 170</span> : {</span>
<span id="L171"><span class="lineNum"> 171</span> <span class="tlaGNC"> 12 : auto y_pred = predict(X);</span></span>
<span id="L172"><span class="lineNum"> 172</span> <span class="tlaGNC"> 10 : int correct = 0;</span></span>
<span id="L173"><span class="lineNum"> 173</span> <span class="tlaGNC"> 2534 : for (int i = 0; i &lt; y_pred.size(); ++i) {</span></span>
<span id="L174"><span class="lineNum"> 174</span> <span class="tlaGNC"> 2524 : if (y_pred[i] == y[i]) {</span></span>
<span id="L175"><span class="lineNum"> 175</span> <span class="tlaGNC"> 2173 : correct++;</span></span>
<span id="L171"><span class="lineNum"> 171</span> <span class="tlaGNC"> 134 : auto y_pred = predict(X);</span></span>
<span id="L172"><span class="lineNum"> 172</span> <span class="tlaGNC"> 112 : int correct = 0;</span></span>
<span id="L173"><span class="lineNum"> 173</span> <span class="tlaGNC"> 29964 : for (int i = 0; i &lt; y_pred.size(); ++i) {</span></span>
<span id="L174"><span class="lineNum"> 174</span> <span class="tlaGNC"> 29852 : if (y_pred[i] == y[i]) {</span></span>
<span id="L175"><span class="lineNum"> 175</span> <span class="tlaGNC"> 25423 : correct++;</span></span>
<span id="L176"><span class="lineNum"> 176</span> : }</span>
<span id="L177"><span class="lineNum"> 177</span> : }</span>
<span id="L178"><span class="lineNum"> 178</span> <span class="tlaGNC"> 20 : return (double)correct / y_pred.size();</span></span>
<span id="L179"><span class="lineNum"> 179</span> <span class="tlaGNC"> 10 : }</span></span>
<span id="L180"><span class="lineNum"> 180</span> <span class="tlaGNC"> 1 : std::vector&lt;std::string&gt; Ensemble::show() const</span></span>
<span id="L178"><span class="lineNum"> 178</span> <span class="tlaGNC"> 224 : return (double)correct / y_pred.size();</span></span>
<span id="L179"><span class="lineNum"> 179</span> <span class="tlaGNC"> 112 : }</span></span>
<span id="L180"><span class="lineNum"> 180</span> <span class="tlaGNC"> 11 : std::vector&lt;std::string&gt; Ensemble::show() const</span></span>
<span id="L181"><span class="lineNum"> 181</span> : {</span>
<span id="L182"><span class="lineNum"> 182</span> <span class="tlaGNC"> 1 : auto result = std::vector&lt;std::string&gt;();</span></span>
<span id="L183"><span class="lineNum"> 183</span> <span class="tlaGNC"> 5 : for (auto i = 0; i &lt; n_models; ++i) {</span></span>
<span id="L184"><span class="lineNum"> 184</span> <span class="tlaGNC"> 4 : auto res = models[i]-&gt;show();</span></span>
<span id="L185"><span class="lineNum"> 185</span> <span class="tlaGNC"> 4 : result.insert(result.end(), res.begin(), res.end());</span></span>
<span id="L186"><span class="lineNum"> 186</span> <span class="tlaGNC"> 4 : }</span></span>
<span id="L187"><span class="lineNum"> 187</span> <span class="tlaGNC"> 1 : return result;</span></span>
<span id="L182"><span class="lineNum"> 182</span> <span class="tlaGNC"> 11 : auto result = std::vector&lt;std::string&gt;();</span></span>
<span id="L183"><span class="lineNum"> 183</span> <span class="tlaGNC"> 55 : for (auto i = 0; i &lt; n_models; ++i) {</span></span>
<span id="L184"><span class="lineNum"> 184</span> <span class="tlaGNC"> 44 : auto res = models[i]-&gt;show();</span></span>
<span id="L185"><span class="lineNum"> 185</span> <span class="tlaGNC"> 44 : result.insert(result.end(), res.begin(), res.end());</span></span>
<span id="L186"><span class="lineNum"> 186</span> <span class="tlaGNC"> 44 : }</span></span>
<span id="L187"><span class="lineNum"> 187</span> <span class="tlaGNC"> 11 : return result;</span></span>
<span id="L188"><span class="lineNum"> 188</span> <span class="tlaUNC tlaBgUNC"> 0 : }</span></span>
<span id="L189"><span class="lineNum"> 189</span> <span class="tlaGNC tlaBgGNC"> 3 : std::vector&lt;std::string&gt; Ensemble::graph(const std::string&amp; title) const</span></span>
<span id="L189"><span class="lineNum"> 189</span> <span class="tlaGNC tlaBgGNC"> 33 : std::vector&lt;std::string&gt; Ensemble::graph(const std::string&amp; title) const</span></span>
<span id="L190"><span class="lineNum"> 190</span> : {</span>
<span id="L191"><span class="lineNum"> 191</span> <span class="tlaGNC"> 3 : auto result = std::vector&lt;std::string&gt;();</span></span>
<span id="L192"><span class="lineNum"> 192</span> <span class="tlaGNC"> 20 : for (auto i = 0; i &lt; n_models; ++i) {</span></span>
<span id="L193"><span class="lineNum"> 193</span> <span class="tlaGNC"> 17 : auto res = models[i]-&gt;graph(title + &quot;_&quot; + std::to_string(i));</span></span>
<span id="L194"><span class="lineNum"> 194</span> <span class="tlaGNC"> 17 : result.insert(result.end(), res.begin(), res.end());</span></span>
<span id="L195"><span class="lineNum"> 195</span> <span class="tlaGNC"> 17 : }</span></span>
<span id="L196"><span class="lineNum"> 196</span> <span class="tlaGNC"> 3 : return result;</span></span>
<span id="L191"><span class="lineNum"> 191</span> <span class="tlaGNC"> 33 : auto result = std::vector&lt;std::string&gt;();</span></span>
<span id="L192"><span class="lineNum"> 192</span> <span class="tlaGNC"> 220 : for (auto i = 0; i &lt; n_models; ++i) {</span></span>
<span id="L193"><span class="lineNum"> 193</span> <span class="tlaGNC"> 187 : auto res = models[i]-&gt;graph(title + &quot;_&quot; + std::to_string(i));</span></span>
<span id="L194"><span class="lineNum"> 194</span> <span class="tlaGNC"> 187 : result.insert(result.end(), res.begin(), res.end());</span></span>
<span id="L195"><span class="lineNum"> 195</span> <span class="tlaGNC"> 187 : }</span></span>
<span id="L196"><span class="lineNum"> 196</span> <span class="tlaGNC"> 33 : return result;</span></span>
<span id="L197"><span class="lineNum"> 197</span> <span class="tlaUNC tlaBgUNC"> 0 : }</span></span>
<span id="L198"><span class="lineNum"> 198</span> <span class="tlaGNC tlaBgGNC"> 6 : int Ensemble::getNumberOfNodes() const</span></span>
<span id="L198"><span class="lineNum"> 198</span> <span class="tlaGNC tlaBgGNC"> 70 : int Ensemble::getNumberOfNodes() const</span></span>
<span id="L199"><span class="lineNum"> 199</span> : {</span>
<span id="L200"><span class="lineNum"> 200</span> <span class="tlaGNC"> 6 : int nodes = 0;</span></span>
<span id="L201"><span class="lineNum"> 201</span> <span class="tlaGNC"> 43 : for (auto i = 0; i &lt; n_models; ++i) {</span></span>
<span id="L202"><span class="lineNum"> 202</span> <span class="tlaGNC"> 37 : nodes += models[i]-&gt;getNumberOfNodes();</span></span>
<span id="L200"><span class="lineNum"> 200</span> <span class="tlaGNC"> 70 : int nodes = 0;</span></span>
<span id="L201"><span class="lineNum"> 201</span> <span class="tlaGNC"> 512 : for (auto i = 0; i &lt; n_models; ++i) {</span></span>
<span id="L202"><span class="lineNum"> 202</span> <span class="tlaGNC"> 442 : nodes += models[i]-&gt;getNumberOfNodes();</span></span>
<span id="L203"><span class="lineNum"> 203</span> : }</span>
<span id="L204"><span class="lineNum"> 204</span> <span class="tlaGNC"> 6 : return nodes;</span></span>
<span id="L204"><span class="lineNum"> 204</span> <span class="tlaGNC"> 70 : return nodes;</span></span>
<span id="L205"><span class="lineNum"> 205</span> : }</span>
<span id="L206"><span class="lineNum"> 206</span> <span class="tlaGNC"> 6 : int Ensemble::getNumberOfEdges() const</span></span>
<span id="L206"><span class="lineNum"> 206</span> <span class="tlaGNC"> 70 : int Ensemble::getNumberOfEdges() const</span></span>
<span id="L207"><span class="lineNum"> 207</span> : {</span>
<span id="L208"><span class="lineNum"> 208</span> <span class="tlaGNC"> 6 : int edges = 0;</span></span>
<span id="L209"><span class="lineNum"> 209</span> <span class="tlaGNC"> 43 : for (auto i = 0; i &lt; n_models; ++i) {</span></span>
<span id="L210"><span class="lineNum"> 210</span> <span class="tlaGNC"> 37 : edges += models[i]-&gt;getNumberOfEdges();</span></span>
<span id="L208"><span class="lineNum"> 208</span> <span class="tlaGNC"> 70 : int edges = 0;</span></span>
<span id="L209"><span class="lineNum"> 209</span> <span class="tlaGNC"> 512 : for (auto i = 0; i &lt; n_models; ++i) {</span></span>
<span id="L210"><span class="lineNum"> 210</span> <span class="tlaGNC"> 442 : edges += models[i]-&gt;getNumberOfEdges();</span></span>
<span id="L211"><span class="lineNum"> 211</span> : }</span>
<span id="L212"><span class="lineNum"> 212</span> <span class="tlaGNC"> 6 : return edges;</span></span>
<span id="L212"><span class="lineNum"> 212</span> <span class="tlaGNC"> 70 : return edges;</span></span>
<span id="L213"><span class="lineNum"> 213</span> : }</span>
<span id="L214"><span class="lineNum"> 214</span> <span class="tlaGNC"> 1 : int Ensemble::getNumberOfStates() const</span></span>
<span id="L214"><span class="lineNum"> 214</span> <span class="tlaGNC"> 11 : int Ensemble::getNumberOfStates() const</span></span>
<span id="L215"><span class="lineNum"> 215</span> : {</span>
<span id="L216"><span class="lineNum"> 216</span> <span class="tlaGNC"> 1 : int nstates = 0;</span></span>
<span id="L217"><span class="lineNum"> 217</span> <span class="tlaGNC"> 5 : for (auto i = 0; i &lt; n_models; ++i) {</span></span>
<span id="L218"><span class="lineNum"> 218</span> <span class="tlaGNC"> 4 : nstates += models[i]-&gt;getNumberOfStates();</span></span>
<span id="L216"><span class="lineNum"> 216</span> <span class="tlaGNC"> 11 : int nstates = 0;</span></span>
<span id="L217"><span class="lineNum"> 217</span> <span class="tlaGNC"> 55 : for (auto i = 0; i &lt; n_models; ++i) {</span></span>
<span id="L218"><span class="lineNum"> 218</span> <span class="tlaGNC"> 44 : nstates += models[i]-&gt;getNumberOfStates();</span></span>
<span id="L219"><span class="lineNum"> 219</span> : }</span>
<span id="L220"><span class="lineNum"> 220</span> <span class="tlaGNC"> 1 : return nstates;</span></span>
<span id="L220"><span class="lineNum"> 220</span> <span class="tlaGNC"> 11 : return nstates;</span></span>
<span id="L221"><span class="lineNum"> 221</span> : }</span>
<span id="L222"><span class="lineNum"> 222</span> : }</span>
</pre>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryMed">75.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryMed">75.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryMed">75.0&nbsp;%</td>

View File

@ -31,13 +31,13 @@
<td class="headerValue">coverage.info</td>
<td></td>
<td class="headerItem">Lines:</td>
<td class="headerCovTableEntryHi">98.8&nbsp;%</td>
<td class="headerCovTableEntry">424</td>
<td class="headerCovTableEntry">419</td>
<td class="headerCovTableEntryHi">98.4&nbsp;%</td>
<td class="headerCovTableEntry">443</td>
<td class="headerCovTableEntry">436</td>
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">98.1&nbsp;%</td>
@ -154,11 +154,11 @@
<tr>
<td class="coverFile"><a href="BoostAODE.cc.gcov.html">BoostAODE.cc</a></td>
<td class="coverBar" align="center">
<table border=0 cellspacing=0 cellpadding=1><tr><td class="coverBarOutline"><img src="../../emerald.png" width=99 height=10 alt="99.1%"><img src="../../snow.png" width=1 height=10 alt="99.1%"></td></tr></table>
<table border=0 cellspacing=0 cellpadding=1><tr><td class="coverBarOutline"><img src="../../emerald.png" width=98 height=10 alt="98.3%"><img src="../../snow.png" width=2 height=10 alt="98.3%"></td></tr></table>
</td>
<td class="coverPerHi">99.1&nbsp;%</td>
<td class="coverNumDflt">218</td>
<td class="coverNumDflt">216</td>
<td class="coverPerHi">98.3&nbsp;%</td>
<td class="coverNumDflt">237</td>
<td class="coverNumDflt">233</td>
<td class="coverPerHi">100.0&nbsp;%</td>
<td class="coverNumDflt">9</td>
<td class="coverNumDflt">9</td>

View File

@ -31,13 +31,13 @@
<td class="headerValue">coverage.info</td>
<td></td>
<td class="headerItem">Lines:</td>
<td class="headerCovTableEntryHi">98.8&nbsp;%</td>
<td class="headerCovTableEntry">424</td>
<td class="headerCovTableEntry">419</td>
<td class="headerCovTableEntryHi">98.4&nbsp;%</td>
<td class="headerCovTableEntry">443</td>
<td class="headerCovTableEntry">436</td>
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">98.1&nbsp;%</td>
@ -94,11 +94,11 @@
<tr>
<td class="coverFile"><a href="BoostAODE.cc.gcov.html">BoostAODE.cc</a></td>
<td class="coverBar" align="center">
<table border=0 cellspacing=0 cellpadding=1><tr><td class="coverBarOutline"><img src="../../emerald.png" width=99 height=10 alt="99.1%"><img src="../../snow.png" width=1 height=10 alt="99.1%"></td></tr></table>
<table border=0 cellspacing=0 cellpadding=1><tr><td class="coverBarOutline"><img src="../../emerald.png" width=98 height=10 alt="98.3%"><img src="../../snow.png" width=2 height=10 alt="98.3%"></td></tr></table>
</td>
<td class="coverPerHi">99.1&nbsp;%</td>
<td class="coverNumDflt">218</td>
<td class="coverNumDflt">216</td>
<td class="coverPerHi">98.3&nbsp;%</td>
<td class="coverNumDflt">237</td>
<td class="coverNumDflt">233</td>
<td class="coverPerHi">100.0&nbsp;%</td>
<td class="coverNumDflt">9</td>
<td class="coverNumDflt">9</td>

View File

@ -31,13 +31,13 @@
<td class="headerValue">coverage.info</td>
<td></td>
<td class="headerItem">Lines:</td>
<td class="headerCovTableEntryHi">98.8&nbsp;%</td>
<td class="headerCovTableEntry">424</td>
<td class="headerCovTableEntry">419</td>
<td class="headerCovTableEntryHi">98.4&nbsp;%</td>
<td class="headerCovTableEntry">443</td>
<td class="headerCovTableEntry">436</td>
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">98.1&nbsp;%</td>
@ -130,11 +130,11 @@
<tr>
<td class="coverFile"><a href="BoostAODE.cc.gcov.html">BoostAODE.cc</a></td>
<td class="coverBar" align="center">
<table border=0 cellspacing=0 cellpadding=1><tr><td class="coverBarOutline"><img src="../../emerald.png" width=99 height=10 alt="99.1%"><img src="../../snow.png" width=1 height=10 alt="99.1%"></td></tr></table>
<table border=0 cellspacing=0 cellpadding=1><tr><td class="coverBarOutline"><img src="../../emerald.png" width=98 height=10 alt="98.3%"><img src="../../snow.png" width=2 height=10 alt="98.3%"></td></tr></table>
</td>
<td class="coverPerHi">99.1&nbsp;%</td>
<td class="coverNumDflt">218</td>
<td class="coverNumDflt">216</td>
<td class="coverPerHi">98.3&nbsp;%</td>
<td class="coverNumDflt">237</td>
<td class="coverNumDflt">233</td>
<td class="coverPerHi">100.0&nbsp;%</td>
<td class="coverNumDflt">9</td>
<td class="coverNumDflt">9</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,14 +65,14 @@
<tr>
<td class="coverFn"><a href="CFS.cc.gcov.html#L9">_ZN8bayesnet3CFS3fitEv</a></td>
<td class="coverFnHi">6</td>
<td class="coverFnHi">70</td>
</tr>
<tr>
<td class="coverFn"><a href="CFS.cc.gcov.html#L43">_ZN8bayesnet3CFS24computeContinueConditionERKSt6vectorIiSaIiEE</a></td>
<td class="coverFnHi">28</td>
<td class="coverFnHi">328</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,14 +65,14 @@
<tr>
<td class="coverFn"><a href="CFS.cc.gcov.html#L43">_ZN8bayesnet3CFS24computeContinueConditionERKSt6vectorIiSaIiEE</a></td>
<td class="coverFnHi">28</td>
<td class="coverFnHi">328</td>
</tr>
<tr>
<td class="coverFn"><a href="CFS.cc.gcov.html#L9">_ZN8bayesnet3CFS3fitEv</a></td>
<td class="coverFnHi">6</td>
<td class="coverFnHi">70</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -70,46 +70,46 @@
<span id="L8"><span class="lineNum"> 8</span> : #include &quot;bayesnet/utils/bayesnetUtils.h&quot;</span>
<span id="L9"><span class="lineNum"> 9</span> : #include &quot;CFS.h&quot;</span>
<span id="L10"><span class="lineNum"> 10</span> : namespace bayesnet {</span>
<span id="L11"><span class="lineNum"> 11</span> <span class="tlaGNC tlaBgGNC"> 6 : void CFS::fit()</span></span>
<span id="L11"><span class="lineNum"> 11</span> <span class="tlaGNC tlaBgGNC"> 70 : void CFS::fit()</span></span>
<span id="L12"><span class="lineNum"> 12</span> : {</span>
<span id="L13"><span class="lineNum"> 13</span> <span class="tlaGNC"> 6 : initialize();</span></span>
<span id="L14"><span class="lineNum"> 14</span> <span class="tlaGNC"> 6 : computeSuLabels();</span></span>
<span id="L15"><span class="lineNum"> 15</span> <span class="tlaGNC"> 6 : auto featureOrder = argsort(suLabels); // sort descending order</span></span>
<span id="L16"><span class="lineNum"> 16</span> <span class="tlaGNC"> 6 : auto continueCondition = true;</span></span>
<span id="L17"><span class="lineNum"> 17</span> <span class="tlaGNC"> 6 : auto feature = featureOrder[0];</span></span>
<span id="L18"><span class="lineNum"> 18</span> <span class="tlaGNC"> 6 : selectedFeatures.push_back(feature);</span></span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 6 : selectedScores.push_back(suLabels[feature]);</span></span>
<span id="L20"><span class="lineNum"> 20</span> <span class="tlaGNC"> 6 : featureOrder.erase(featureOrder.begin());</span></span>
<span id="L21"><span class="lineNum"> 21</span> <span class="tlaGNC"> 34 : while (continueCondition) {</span></span>
<span id="L22"><span class="lineNum"> 22</span> <span class="tlaGNC"> 28 : double merit = std::numeric_limits&lt;double&gt;::lowest();</span></span>
<span id="L23"><span class="lineNum"> 23</span> <span class="tlaGNC"> 28 : int bestFeature = -1;</span></span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 164 : for (auto feature : featureOrder) {</span></span>
<span id="L25"><span class="lineNum"> 25</span> <span class="tlaGNC"> 136 : selectedFeatures.push_back(feature);</span></span>
<span id="L13"><span class="lineNum"> 13</span> <span class="tlaGNC"> 70 : initialize();</span></span>
<span id="L14"><span class="lineNum"> 14</span> <span class="tlaGNC"> 70 : computeSuLabels();</span></span>
<span id="L15"><span class="lineNum"> 15</span> <span class="tlaGNC"> 70 : auto featureOrder = argsort(suLabels); // sort descending order</span></span>
<span id="L16"><span class="lineNum"> 16</span> <span class="tlaGNC"> 70 : auto continueCondition = true;</span></span>
<span id="L17"><span class="lineNum"> 17</span> <span class="tlaGNC"> 70 : auto feature = featureOrder[0];</span></span>
<span id="L18"><span class="lineNum"> 18</span> <span class="tlaGNC"> 70 : selectedFeatures.push_back(feature);</span></span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 70 : selectedScores.push_back(suLabels[feature]);</span></span>
<span id="L20"><span class="lineNum"> 20</span> <span class="tlaGNC"> 70 : featureOrder.erase(featureOrder.begin());</span></span>
<span id="L21"><span class="lineNum"> 21</span> <span class="tlaGNC"> 398 : while (continueCondition) {</span></span>
<span id="L22"><span class="lineNum"> 22</span> <span class="tlaGNC"> 328 : double merit = std::numeric_limits&lt;double&gt;::lowest();</span></span>
<span id="L23"><span class="lineNum"> 23</span> <span class="tlaGNC"> 328 : int bestFeature = -1;</span></span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 1929 : for (auto feature : featureOrder) {</span></span>
<span id="L25"><span class="lineNum"> 25</span> <span class="tlaGNC"> 1601 : selectedFeatures.push_back(feature);</span></span>
<span id="L26"><span class="lineNum"> 26</span> : // Compute merit with selectedFeatures</span>
<span id="L27"><span class="lineNum"> 27</span> <span class="tlaGNC"> 136 : auto meritNew = computeMeritCFS();</span></span>
<span id="L28"><span class="lineNum"> 28</span> <span class="tlaGNC"> 136 : if (meritNew &gt; merit) {</span></span>
<span id="L29"><span class="lineNum"> 29</span> <span class="tlaGNC"> 57 : merit = meritNew;</span></span>
<span id="L30"><span class="lineNum"> 30</span> <span class="tlaGNC"> 57 : bestFeature = feature;</span></span>
<span id="L27"><span class="lineNum"> 27</span> <span class="tlaGNC"> 1601 : auto meritNew = computeMeritCFS();</span></span>
<span id="L28"><span class="lineNum"> 28</span> <span class="tlaGNC"> 1601 : if (meritNew &gt; merit) {</span></span>
<span id="L29"><span class="lineNum"> 29</span> <span class="tlaGNC"> 663 : merit = meritNew;</span></span>
<span id="L30"><span class="lineNum"> 30</span> <span class="tlaGNC"> 663 : bestFeature = feature;</span></span>
<span id="L31"><span class="lineNum"> 31</span> : }</span>
<span id="L32"><span class="lineNum"> 32</span> <span class="tlaGNC"> 136 : selectedFeatures.pop_back();</span></span>
<span id="L32"><span class="lineNum"> 32</span> <span class="tlaGNC"> 1601 : selectedFeatures.pop_back();</span></span>
<span id="L33"><span class="lineNum"> 33</span> : }</span>
<span id="L34"><span class="lineNum"> 34</span> <span class="tlaGNC"> 28 : if (bestFeature == -1) {</span></span>
<span id="L34"><span class="lineNum"> 34</span> <span class="tlaGNC"> 328 : if (bestFeature == -1) {</span></span>
<span id="L35"><span class="lineNum"> 35</span> : // meritNew has to be nan due to constant features</span>
<span id="L36"><span class="lineNum"> 36</span> <span class="tlaUNC tlaBgUNC"> 0 : break;</span></span>
<span id="L37"><span class="lineNum"> 37</span> : }</span>
<span id="L38"><span class="lineNum"> 38</span> <span class="tlaGNC tlaBgGNC"> 28 : selectedFeatures.push_back(bestFeature);</span></span>
<span id="L39"><span class="lineNum"> 39</span> <span class="tlaGNC"> 28 : selectedScores.push_back(merit);</span></span>
<span id="L40"><span class="lineNum"> 40</span> <span class="tlaGNC"> 28 : featureOrder.erase(remove(featureOrder.begin(), featureOrder.end(), bestFeature), featureOrder.end());</span></span>
<span id="L41"><span class="lineNum"> 41</span> <span class="tlaGNC"> 28 : continueCondition = computeContinueCondition(featureOrder);</span></span>
<span id="L38"><span class="lineNum"> 38</span> <span class="tlaGNC tlaBgGNC"> 328 : selectedFeatures.push_back(bestFeature);</span></span>
<span id="L39"><span class="lineNum"> 39</span> <span class="tlaGNC"> 328 : selectedScores.push_back(merit);</span></span>
<span id="L40"><span class="lineNum"> 40</span> <span class="tlaGNC"> 328 : featureOrder.erase(remove(featureOrder.begin(), featureOrder.end(), bestFeature), featureOrder.end());</span></span>
<span id="L41"><span class="lineNum"> 41</span> <span class="tlaGNC"> 328 : continueCondition = computeContinueCondition(featureOrder);</span></span>
<span id="L42"><span class="lineNum"> 42</span> : }</span>
<span id="L43"><span class="lineNum"> 43</span> <span class="tlaGNC"> 6 : fitted = true;</span></span>
<span id="L44"><span class="lineNum"> 44</span> <span class="tlaGNC"> 6 : }</span></span>
<span id="L45"><span class="lineNum"> 45</span> <span class="tlaGNC"> 28 : bool CFS::computeContinueCondition(const std::vector&lt;int&gt;&amp; featureOrder)</span></span>
<span id="L43"><span class="lineNum"> 43</span> <span class="tlaGNC"> 70 : fitted = true;</span></span>
<span id="L44"><span class="lineNum"> 44</span> <span class="tlaGNC"> 70 : }</span></span>
<span id="L45"><span class="lineNum"> 45</span> <span class="tlaGNC"> 328 : bool CFS::computeContinueCondition(const std::vector&lt;int&gt;&amp; featureOrder)</span></span>
<span id="L46"><span class="lineNum"> 46</span> : {</span>
<span id="L47"><span class="lineNum"> 47</span> <span class="tlaGNC"> 28 : if (selectedFeatures.size() == maxFeatures || featureOrder.size() == 0) {</span></span>
<span id="L48"><span class="lineNum"> 48</span> <span class="tlaGNC"> 1 : return false;</span></span>
<span id="L47"><span class="lineNum"> 47</span> <span class="tlaGNC"> 328 : if (selectedFeatures.size() == maxFeatures || featureOrder.size() == 0) {</span></span>
<span id="L48"><span class="lineNum"> 48</span> <span class="tlaGNC"> 11 : return false;</span></span>
<span id="L49"><span class="lineNum"> 49</span> : }</span>
<span id="L50"><span class="lineNum"> 50</span> <span class="tlaGNC"> 27 : if (selectedScores.size() &gt;= 5) {</span></span>
<span id="L50"><span class="lineNum"> 50</span> <span class="tlaGNC"> 317 : if (selectedScores.size() &gt;= 5) {</span></span>
<span id="L51"><span class="lineNum"> 51</span> : /*</span>
<span id="L52"><span class="lineNum"> 52</span> : &quot;To prevent the best first search from exploring the entire</span>
<span id="L53"><span class="lineNum"> 53</span> : feature subset search space, a stopping criterion is imposed.</span>
@ -117,25 +117,25 @@
<span id="L55"><span class="lineNum"> 55</span> : subsets show no improvement over the current best subset.&quot;</span>
<span id="L56"><span class="lineNum"> 56</span> : as stated in Mark A.Hall Thesis</span>
<span id="L57"><span class="lineNum"> 57</span> : */</span>
<span id="L58"><span class="lineNum"> 58</span> <span class="tlaGNC"> 10 : double item_ant = std::numeric_limits&lt;double&gt;::lowest();</span></span>
<span id="L59"><span class="lineNum"> 59</span> <span class="tlaGNC"> 10 : int num = 0;</span></span>
<span id="L60"><span class="lineNum"> 60</span> <span class="tlaGNC"> 10 : std::vector&lt;double&gt; lastFive(selectedScores.end() - 5, selectedScores.end());</span></span>
<span id="L61"><span class="lineNum"> 61</span> <span class="tlaGNC"> 40 : for (auto item : lastFive) {</span></span>
<span id="L62"><span class="lineNum"> 62</span> <span class="tlaGNC"> 35 : if (item_ant == std::numeric_limits&lt;double&gt;::lowest()) {</span></span>
<span id="L63"><span class="lineNum"> 63</span> <span class="tlaGNC"> 10 : item_ant = item;</span></span>
<span id="L58"><span class="lineNum"> 58</span> <span class="tlaGNC"> 118 : double item_ant = std::numeric_limits&lt;double&gt;::lowest();</span></span>
<span id="L59"><span class="lineNum"> 59</span> <span class="tlaGNC"> 118 : int num = 0;</span></span>
<span id="L60"><span class="lineNum"> 60</span> <span class="tlaGNC"> 118 : std::vector&lt;double&gt; lastFive(selectedScores.end() - 5, selectedScores.end());</span></span>
<span id="L61"><span class="lineNum"> 61</span> <span class="tlaGNC"> 472 : for (auto item : lastFive) {</span></span>
<span id="L62"><span class="lineNum"> 62</span> <span class="tlaGNC"> 413 : if (item_ant == std::numeric_limits&lt;double&gt;::lowest()) {</span></span>
<span id="L63"><span class="lineNum"> 63</span> <span class="tlaGNC"> 118 : item_ant = item;</span></span>
<span id="L64"><span class="lineNum"> 64</span> : }</span>
<span id="L65"><span class="lineNum"> 65</span> <span class="tlaGNC"> 35 : if (item &gt; item_ant) {</span></span>
<span id="L66"><span class="lineNum"> 66</span> <span class="tlaGNC"> 5 : break;</span></span>
<span id="L65"><span class="lineNum"> 65</span> <span class="tlaGNC"> 413 : if (item &gt; item_ant) {</span></span>
<span id="L66"><span class="lineNum"> 66</span> <span class="tlaGNC"> 59 : break;</span></span>
<span id="L67"><span class="lineNum"> 67</span> : } else {</span>
<span id="L68"><span class="lineNum"> 68</span> <span class="tlaGNC"> 30 : num++;</span></span>
<span id="L69"><span class="lineNum"> 69</span> <span class="tlaGNC"> 30 : item_ant = item;</span></span>
<span id="L68"><span class="lineNum"> 68</span> <span class="tlaGNC"> 354 : num++;</span></span>
<span id="L69"><span class="lineNum"> 69</span> <span class="tlaGNC"> 354 : item_ant = item;</span></span>
<span id="L70"><span class="lineNum"> 70</span> : }</span>
<span id="L71"><span class="lineNum"> 71</span> : }</span>
<span id="L72"><span class="lineNum"> 72</span> <span class="tlaGNC"> 10 : if (num == 5) {</span></span>
<span id="L73"><span class="lineNum"> 73</span> <span class="tlaGNC"> 5 : return false;</span></span>
<span id="L72"><span class="lineNum"> 72</span> <span class="tlaGNC"> 118 : if (num == 5) {</span></span>
<span id="L73"><span class="lineNum"> 73</span> <span class="tlaGNC"> 59 : return false;</span></span>
<span id="L74"><span class="lineNum"> 74</span> : }</span>
<span id="L75"><span class="lineNum"> 75</span> <span class="tlaGNC"> 10 : }</span></span>
<span id="L76"><span class="lineNum"> 76</span> <span class="tlaGNC"> 22 : return true;</span></span>
<span id="L75"><span class="lineNum"> 75</span> <span class="tlaGNC"> 118 : }</span></span>
<span id="L76"><span class="lineNum"> 76</span> <span class="tlaGNC"> 258 : return true;</span></span>
<span id="L77"><span class="lineNum"> 77</span> : }</span>
<span id="L78"><span class="lineNum"> 78</span> : }</span>
</pre>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -72,21 +72,21 @@
<tr>
<td class="coverFn"><a href="CFS.h.gcov.html#L18">_ZN8bayesnet3CFSD0Ev</a></td>
<td class="coverFnHi">12</td>
<td class="coverFnHi">140</td>
</tr>
<tr>
<td class="coverFnAlias"><a href="CFS.h.gcov.html#L18">_ZN8bayesnet3CFSD0Ev</a></td>
<td class="coverFnAliasHi">6</td>
<td class="coverFnAliasHi">70</td>
</tr>
<tr>
<td class="coverFnAlias"><a href="CFS.h.gcov.html#L18">_ZN8bayesnet3CFSD2Ev</a></td>
<td class="coverFnAliasHi">6</td>
<td class="coverFnAliasHi">70</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -72,21 +72,21 @@
<tr>
<td class="coverFn"><a href="CFS.h.gcov.html#L18">_ZN8bayesnet3CFSD0Ev</a></td>
<td class="coverFnHi">12</td>
<td class="coverFnHi">140</td>
</tr>
<tr>
<td class="coverFnAlias"><a href="CFS.h.gcov.html#L18">_ZN8bayesnet3CFSD0Ev</a></td>
<td class="coverFnAliasHi">6</td>
<td class="coverFnAliasHi">70</td>
</tr>
<tr>
<td class="coverFnAlias"><a href="CFS.h.gcov.html#L18">_ZN8bayesnet3CFSD2Ev</a></td>
<td class="coverFnAliasHi">6</td>
<td class="coverFnAliasHi">70</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -79,7 +79,7 @@
<span id="L17"><span class="lineNum"> 17</span> <span class="tlaGNC"> 6 : FeatureSelect(samples, features, className, maxFeatures, classNumStates, weights)</span></span>
<span id="L18"><span class="lineNum"> 18</span> : {</span>
<span id="L19"><span class="lineNum"> 19</span> <span class="tlaGNC"> 6 : }</span></span>
<span id="L20"><span class="lineNum"> 20</span> <span class="tlaGNC"> 12 : virtual ~CFS() {};</span></span>
<span id="L20"><span class="lineNum"> 20</span> <span class="tlaGNC"> 140 : virtual ~CFS() {};</span></span>
<span id="L21"><span class="lineNum"> 21</span> : void fit() override;</span>
<span id="L22"><span class="lineNum"> 22</span> : private:</span>
<span id="L23"><span class="lineNum"> 23</span> : bool computeContinueCondition(const std::vector&lt;int&gt;&amp; featureOrder);</span>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,14 +65,14 @@
<tr>
<td class="coverFn"><a href="FCBF.cc.gcov.html#L16">_ZN8bayesnet4FCBF3fitEv</a></td>
<td class="coverFnHi">5</td>
<td class="coverFnHi">56</td>
</tr>
<tr>
<td class="coverFn"><a href="FCBF.cc.gcov.html#L9">_ZN8bayesnet4FCBFC2ERKN2at6TensorERKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISB_EERKSB_iiS4_d</a></td>
<td class="coverFnHi">7</td>
<td class="coverFnHi">78</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,14 +65,14 @@
<tr>
<td class="coverFn"><a href="FCBF.cc.gcov.html#L16">_ZN8bayesnet4FCBF3fitEv</a></td>
<td class="coverFnHi">5</td>
<td class="coverFnHi">56</td>
</tr>
<tr>
<td class="coverFn"><a href="FCBF.cc.gcov.html#L9">_ZN8bayesnet4FCBFC2ERKN2at6TensorERKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISB_EERKSB_iiS4_d</a></td>
<td class="coverFnHi">7</td>
<td class="coverFnHi">78</td>
</tr>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -70,45 +70,45 @@
<span id="L8"><span class="lineNum"> 8</span> : #include &quot;FCBF.h&quot;</span>
<span id="L9"><span class="lineNum"> 9</span> : namespace bayesnet {</span>
<span id="L10"><span class="lineNum"> 10</span> : </span>
<span id="L11"><span class="lineNum"> 11</span> <span class="tlaGNC tlaBgGNC"> 7 : FCBF::FCBF(const torch::Tensor&amp; samples, const std::vector&lt;std::string&gt;&amp; features, const std::string&amp; className, const int maxFeatures, const int classNumStates, const torch::Tensor&amp; weights, const double threshold) :</span></span>
<span id="L12"><span class="lineNum"> 12</span> <span class="tlaGNC"> 7 : FeatureSelect(samples, features, className, maxFeatures, classNumStates, weights), threshold(threshold)</span></span>
<span id="L11"><span class="lineNum"> 11</span> <span class="tlaGNC tlaBgGNC"> 78 : FCBF::FCBF(const torch::Tensor&amp; samples, const std::vector&lt;std::string&gt;&amp; features, const std::string&amp; className, const int maxFeatures, const int classNumStates, const torch::Tensor&amp; weights, const double threshold) :</span></span>
<span id="L12"><span class="lineNum"> 12</span> <span class="tlaGNC"> 78 : FeatureSelect(samples, features, className, maxFeatures, classNumStates, weights), threshold(threshold)</span></span>
<span id="L13"><span class="lineNum"> 13</span> : {</span>
<span id="L14"><span class="lineNum"> 14</span> <span class="tlaGNC"> 7 : if (threshold &lt; 1e-7) {</span></span>
<span id="L15"><span class="lineNum"> 15</span> <span class="tlaGNC"> 2 : throw std::invalid_argument(&quot;Threshold cannot be less than 1e-7&quot;);</span></span>
<span id="L14"><span class="lineNum"> 14</span> <span class="tlaGNC"> 78 : if (threshold &lt; 1e-7) {</span></span>
<span id="L15"><span class="lineNum"> 15</span> <span class="tlaGNC"> 22 : throw std::invalid_argument(&quot;Threshold cannot be less than 1e-7&quot;);</span></span>
<span id="L16"><span class="lineNum"> 16</span> : }</span>
<span id="L17"><span class="lineNum"> 17</span> <span class="tlaGNC"> 7 : }</span></span>
<span id="L18"><span class="lineNum"> 18</span> <span class="tlaGNC"> 5 : void FCBF::fit()</span></span>
<span id="L17"><span class="lineNum"> 17</span> <span class="tlaGNC"> 78 : }</span></span>
<span id="L18"><span class="lineNum"> 18</span> <span class="tlaGNC"> 56 : void FCBF::fit()</span></span>
<span id="L19"><span class="lineNum"> 19</span> : {</span>
<span id="L20"><span class="lineNum"> 20</span> <span class="tlaGNC"> 5 : initialize();</span></span>
<span id="L21"><span class="lineNum"> 21</span> <span class="tlaGNC"> 5 : computeSuLabels();</span></span>
<span id="L22"><span class="lineNum"> 22</span> <span class="tlaGNC"> 5 : auto featureOrder = argsort(suLabels); // sort descending order</span></span>
<span id="L23"><span class="lineNum"> 23</span> <span class="tlaGNC"> 5 : auto featureOrderCopy = featureOrder;</span></span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 42 : for (const auto&amp; feature : featureOrder) {</span></span>
<span id="L20"><span class="lineNum"> 20</span> <span class="tlaGNC"> 56 : initialize();</span></span>
<span id="L21"><span class="lineNum"> 21</span> <span class="tlaGNC"> 56 : computeSuLabels();</span></span>
<span id="L22"><span class="lineNum"> 22</span> <span class="tlaGNC"> 56 : auto featureOrder = argsort(suLabels); // sort descending order</span></span>
<span id="L23"><span class="lineNum"> 23</span> <span class="tlaGNC"> 56 : auto featureOrderCopy = featureOrder;</span></span>
<span id="L24"><span class="lineNum"> 24</span> <span class="tlaGNC"> 472 : for (const auto&amp; feature : featureOrder) {</span></span>
<span id="L25"><span class="lineNum"> 25</span> : // Don't self compare</span>
<span id="L26"><span class="lineNum"> 26</span> <span class="tlaGNC"> 37 : featureOrderCopy.erase(featureOrderCopy.begin());</span></span>
<span id="L27"><span class="lineNum"> 27</span> <span class="tlaGNC"> 37 : if (suLabels.at(feature) == 0.0) {</span></span>
<span id="L26"><span class="lineNum"> 26</span> <span class="tlaGNC"> 416 : featureOrderCopy.erase(featureOrderCopy.begin());</span></span>
<span id="L27"><span class="lineNum"> 27</span> <span class="tlaGNC"> 416 : if (suLabels.at(feature) == 0.0) {</span></span>
<span id="L28"><span class="lineNum"> 28</span> : // The feature has been removed from the list</span>
<span id="L29"><span class="lineNum"> 29</span> <span class="tlaGNC"> 16 : continue;</span></span>
<span id="L29"><span class="lineNum"> 29</span> <span class="tlaGNC"> 180 : continue;</span></span>
<span id="L30"><span class="lineNum"> 30</span> : }</span>
<span id="L31"><span class="lineNum"> 31</span> <span class="tlaGNC"> 21 : if (suLabels.at(feature) &lt; threshold) {</span></span>
<span id="L31"><span class="lineNum"> 31</span> <span class="tlaGNC"> 236 : if (suLabels.at(feature) &lt; threshold) {</span></span>
<span id="L32"><span class="lineNum"> 32</span> <span class="tlaUNC tlaBgUNC"> 0 : break;</span></span>
<span id="L33"><span class="lineNum"> 33</span> : }</span>
<span id="L34"><span class="lineNum"> 34</span> : // Remove redundant features</span>
<span id="L35"><span class="lineNum"> 35</span> <span class="tlaGNC tlaBgGNC"> 116 : for (const auto&amp; featureCopy : featureOrderCopy) {</span></span>
<span id="L36"><span class="lineNum"> 36</span> <span class="tlaGNC"> 95 : double value = computeSuFeatures(feature, featureCopy);</span></span>
<span id="L37"><span class="lineNum"> 37</span> <span class="tlaGNC"> 95 : if (value &gt;= suLabels.at(featureCopy)) {</span></span>
<span id="L35"><span class="lineNum"> 35</span> <span class="tlaGNC tlaBgGNC"> 1307 : for (const auto&amp; featureCopy : featureOrderCopy) {</span></span>
<span id="L36"><span class="lineNum"> 36</span> <span class="tlaGNC"> 1071 : double value = computeSuFeatures(feature, featureCopy);</span></span>
<span id="L37"><span class="lineNum"> 37</span> <span class="tlaGNC"> 1071 : if (value &gt;= suLabels.at(featureCopy)) {</span></span>
<span id="L38"><span class="lineNum"> 38</span> : // Remove feature from list</span>
<span id="L39"><span class="lineNum"> 39</span> <span class="tlaGNC"> 33 : suLabels[featureCopy] = 0.0;</span></span>
<span id="L39"><span class="lineNum"> 39</span> <span class="tlaGNC"> 373 : suLabels[featureCopy] = 0.0;</span></span>
<span id="L40"><span class="lineNum"> 40</span> : }</span>
<span id="L41"><span class="lineNum"> 41</span> : }</span>
<span id="L42"><span class="lineNum"> 42</span> <span class="tlaGNC"> 21 : selectedFeatures.push_back(feature);</span></span>
<span id="L43"><span class="lineNum"> 43</span> <span class="tlaGNC"> 21 : selectedScores.push_back(suLabels[feature]);</span></span>
<span id="L44"><span class="lineNum"> 44</span> <span class="tlaGNC"> 21 : if (selectedFeatures.size() == maxFeatures) {</span></span>
<span id="L42"><span class="lineNum"> 42</span> <span class="tlaGNC"> 236 : selectedFeatures.push_back(feature);</span></span>
<span id="L43"><span class="lineNum"> 43</span> <span class="tlaGNC"> 236 : selectedScores.push_back(suLabels[feature]);</span></span>
<span id="L44"><span class="lineNum"> 44</span> <span class="tlaGNC"> 236 : if (selectedFeatures.size() == maxFeatures) {</span></span>
<span id="L45"><span class="lineNum"> 45</span> <span class="tlaUNC tlaBgUNC"> 0 : break;</span></span>
<span id="L46"><span class="lineNum"> 46</span> : }</span>
<span id="L47"><span class="lineNum"> 47</span> : }</span>
<span id="L48"><span class="lineNum"> 48</span> <span class="tlaGNC tlaBgGNC"> 5 : fitted = true;</span></span>
<span id="L49"><span class="lineNum"> 49</span> <span class="tlaGNC"> 5 : }</span></span>
<span id="L48"><span class="lineNum"> 48</span> <span class="tlaGNC tlaBgGNC"> 56 : fitted = true;</span></span>
<span id="L49"><span class="lineNum"> 49</span> <span class="tlaGNC"> 56 : }</span></span>
<span id="L50"><span class="lineNum"> 50</span> : }</span>
</pre>
</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>

View File

@ -37,7 +37,7 @@
</tr>
<tr>
<td class="headerItem">Test Date:</td>
<td class="headerValue">2024-04-21 17:30:26</td>
<td class="headerValue">2024-04-29 20:48:03</td>
<td></td>
<td class="headerItem">Functions:</td>
<td class="headerCovTableEntryHi">100.0&nbsp;%</td>
@ -65,56 +65,56 @@
<tr>
<td class="coverFn"><a href="FeatureSelect.cc.gcov.html#L14">_ZN8bayesnet13FeatureSelect10initializeEv</a></td>
<td class="coverFnHi">16</td>
<td class="coverFnHi">184</td>
</tr>
<tr>
<td class="coverFn"><a href="FeatureSelect.cc.gcov.html#L34">_ZN8bayesnet13FeatureSelect15computeSuLabelsEv</a></td>
<td class="coverFnHi">16</td>
<td class="coverFnHi">184</td>
</tr>
<tr>
<td class="coverFn"><a href="FeatureSelect.cc.gcov.html#L68">_ZNK8bayesnet13FeatureSelect11getFeaturesEv</a></td>
<td class="coverFnHi">16</td>
<td class="coverFnHi">184</td>
</tr>
<tr>
<td class="coverFn"><a href="FeatureSelect.cc.gcov.html#L75">_ZNK8bayesnet13FeatureSelect9getScoresEv</a></td>
<td class="coverFnHi">16</td>
<td class="coverFnHi">184</td>
</tr>
<tr>
<td class="coverFn"><a href="FeatureSelect.cc.gcov.html#L9">_ZN8bayesnet13FeatureSelectC2ERKN2at6TensorERKSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaISB_EERKSB_iiS4_</a></td>
<td class="coverFnHi">22</td>
<td class="coverFnHi">250</td>
</tr>
<tr>
<td class="coverFn"><a href="FeatureSelect.cc.gcov.html#L55">_ZN8bayesnet13FeatureSelect15computeMeritCFSEv</a></td>
<td class="coverFnHi">158</td>
<td class="coverFnHi">1851</td>
</tr>
<tr>
<td class="coverFn"><a href="FeatureSelect.cc.gcov.html#L19">_ZN8bayesnet13FeatureSelect22symmetricalUncertaintyEii</a></td>
<td class="coverFnHi">411</td>
<td class="coverFnHi">5151</td>
</tr>
<tr>
<td class="coverFn"><a href="FeatureSelect.cc.gcov.html#L42">_ZN8bayesnet13FeatureSelect17computeSuFeaturesEii</a></td>
<td class="coverFnHi">980</td>
<td class="coverFnHi">11429</td>
</tr>

Some files were not shown because too many files have changed in this diff Show More