Update tests to 99,1% of coverage

This commit is contained in:
2024-11-23 18:14:15 +01:00
parent 3728bcb7d3
commit 5d01eccf1b
11 changed files with 5108 additions and 206 deletions

2
.vscode/launch.json vendored
View File

@@ -16,7 +16,7 @@
"name": "test",
"program": "${workspaceFolder}/build_Debug/tests/TestBayesNet",
"args": [
"Test Node computeCPT"
"No features selected"
],
"cwd": "${workspaceFolder}/build_Debug/tests"
},

View File

@@ -7,9 +7,9 @@
[![Security Rating](https://sonarcloud.io/api/project_badges/measure?project=rmontanana_BayesNet&metric=security_rating)](https://sonarcloud.io/summary/new_code?id=rmontanana_BayesNet)
[![Reliability Rating](https://sonarcloud.io/api/project_badges/measure?project=rmontanana_BayesNet&metric=reliability_rating)](https://sonarcloud.io/summary/new_code?id=rmontanana_BayesNet)
![Gitea Last Commit](https://img.shields.io/gitea/last-commit/rmontanana/bayesnet?gitea_url=https://gitea.rmontanana.es:3000&logo=gitea)
[![Coverage Badge](https://img.shields.io/badge/Coverage-97,0%25-green)](html/index.html)
[![Coverage Badge](https://img.shields.io/badge/Coverage-99,1%25-green)](html/index.html)
Bayesian Network Classifiers using libtorch from scratch
Bayesian Network Classifiers library
## Dependencies
@@ -71,6 +71,8 @@ make sample fname=tests/data/glass.arff
#### - AODE
#### - A2DE
#### - [BoostAODE](docs/BoostAODE.md)
#### - BoostA2DE

View File

@@ -59,6 +59,9 @@ namespace bayesnet {
std::vector<int> featuresUsed;
if (selectFeatures) {
featuresUsed = initializeModels(smoothing);
if (featuresUsed.size() == 0) {
return;
}
auto ypred = predict(X_train);
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
// Update significance of the models

View File

@@ -209,7 +209,7 @@ namespace bayesnet {
pthread_setname_np(threadName.c_str());
#endif
double numStates = static_cast<double>(node.second->getNumStates());
double smoothing_factor = 0.0;
double smoothing_factor;
switch (smoothing) {
case Smoothing_t::ORIGINAL:
smoothing_factor = 1.0 / n_samples;
@@ -221,7 +221,7 @@ namespace bayesnet {
smoothing_factor = 1 / numStates;
break;
default:
throw std::invalid_argument("Smoothing method not recognized " + std::to_string(static_cast<int>(smoothing)));
smoothing_factor = 0.0; // No smoothing
}
node.second->computeCPT(samples, features, smoothing_factor, weights);
semaphore.release();
@@ -234,16 +234,6 @@ namespace bayesnet {
for (auto& thread : threads) {
thread.join();
}
// std::fstream file;
// file.open("cpt.txt", std::fstream::out | std::fstream::app);
// file << std::string(80, '*') << std::endl;
// for (const auto& item : graph("Test")) {
// file << item << std::endl;
// }
// file << std::string(80, '-') << std::endl;
// file << dump_cpt() << std::endl;
// file << std::string(80, '=') << std::endl;
// file.close();
fitted = true;
}
torch::Tensor Network::predict_tensor(const torch::Tensor& samples, const bool proba)

View File

@@ -53,14 +53,14 @@ namespace bayesnet {
}
}
void insertElement(std::list<int>& variables, int variable)
void MST::insertElement(std::list<int>& variables, int variable)
{
if (std::find(variables.begin(), variables.end(), variable) == variables.end()) {
variables.push_front(variable);
}
}
std::vector<std::pair<int, int>> reorder(std::vector<std::pair<float, std::pair<int, int>>> T, int root_original)
std::vector<std::pair<int, int>> MST::reorder(std::vector<std::pair<float, std::pair<int, int>>> T, int root_original)
{
// Create the edges of a DAG from the MST
// replacing unordered_set with list because unordered_set cannot guarantee the order of the elements inserted

View File

@@ -14,6 +14,8 @@ namespace bayesnet {
public:
MST() = default;
MST(const std::vector<std::string>& features, const torch::Tensor& weights, const int root);
void insertElement(std::list<int>& variables, int variable);
std::vector<std::pair<int, int>> reorder(std::vector<std::pair<float, std::pair<int, int>>> T, int root_original);
std::vector<std::pair<int, int>> maximumSpanningTree();
private:
torch::Tensor weights;

View File

@@ -10,7 +10,7 @@ if(ENABLE_TESTING)
file(GLOB_RECURSE BayesNet_SOURCES "${BayesNet_SOURCE_DIR}/bayesnet/*.cc")
add_executable(TestBayesNet TestBayesNetwork.cc TestBayesNode.cc TestBayesClassifier.cc
TestBayesModels.cc TestBayesMetrics.cc TestFeatureSelection.cc TestBoostAODE.cc TestA2DE.cc
TestUtils.cc TestBayesEnsemble.cc TestModulesVersions.cc TestBoostA2DE.cc ${BayesNet_SOURCES})
TestUtils.cc TestBayesEnsemble.cc TestModulesVersions.cc TestBoostA2DE.cc TestMST.cc ${BayesNet_SOURCES})
target_link_libraries(TestBayesNet PUBLIC "${TORCH_LIBRARIES}" fimdlp PRIVATE Catch2::Catch2WithMain)
add_test(NAME BayesNetworkTest COMMAND TestBayesNet)
add_test(NAME A2DE COMMAND TestBayesNet "[A2DE]")
@@ -24,4 +24,5 @@ if(ENABLE_TESTING)
add_test(NAME Modules COMMAND TestBayesNet "[Modules]")
add_test(NAME Network COMMAND TestBayesNet "[Network]")
add_test(NAME Node COMMAND TestBayesNet "[Node]")
add_test(NAME MST COMMAND TestBayesNet "[MST]")
endif(ENABLE_TESTING)

View File

@@ -257,9 +257,9 @@ TEST_CASE("Test Bayesian Network", "[Network]")
REQUIRE(node->getCPT().equal(node2->getCPT()));
}
}
SECTION("Test oddities")
SECTION("Network oddities")
{
INFO("Test oddities");
INFO("Network oddities");
buildModel(net, raw.features, raw.className);
// predict without fitting
std::vector<std::vector<int>> test = { {1, 2, 0, 1, 1}, {0, 1, 2, 0, 1}, {0, 0, 0, 0, 1}, {2, 2, 2, 2, 1} };
@@ -329,6 +329,14 @@ TEST_CASE("Test Bayesian Network", "[Network]")
std::string invalid_state = "Feature sepallength not found in states";
REQUIRE_THROWS_AS(net4.fit(raw.Xv, raw.yv, raw.weightsv, raw.features, raw.className, std::map<std::string, std::vector<int>>(), raw.smoothing), std::invalid_argument);
REQUIRE_THROWS_WITH(net4.fit(raw.Xv, raw.yv, raw.weightsv, raw.features, raw.className, std::map<std::string, std::vector<int>>(), raw.smoothing), invalid_state);
// Try to add node or edge to a fitted network
auto net5 = bayesnet::Network();
buildModel(net5, raw.features, raw.className);
net5.fit(raw.Xv, raw.yv, raw.weightsv, raw.features, raw.className, raw.states, raw.smoothing);
REQUIRE_THROWS_AS(net5.addNode("A"), std::logic_error);
REQUIRE_THROWS_WITH(net5.addNode("A"), "Cannot add node to a fitted network. Initialize first.");
REQUIRE_THROWS_AS(net5.addEdge("A", "B"), std::logic_error);
REQUIRE_THROWS_WITH(net5.addEdge("A", "B"), "Cannot add edge to a fitted network. Initialize first.");
}
}
@@ -525,6 +533,7 @@ TEST_CASE("Test Smoothing A", "[Network]")
}
}
}
TEST_CASE("Test Smoothing B", "[Network]")
{
auto net = bayesnet::Network();
@@ -577,4 +586,13 @@ TEST_CASE("Test Smoothing B", "[Network]")
REQUIRE(cestnik_score.at(i).at(j) == Catch::Approx(cestnik_values.at(i).at(j)).margin(threshold));
}
}
INFO("Test Smoothing B - No smoothing");
net.fit(Data, C, weights, { "X", "Y", "Z" }, "C", states, bayesnet::Smoothing_t::NONE);
auto nosmooth_values = std::vector<std::vector<float>>({ {0.342465753, 0.65753424}, {0.0, 1.0} });
auto nosmooth_score = net.predict_proba({ {0, 1}, {1, 2}, {2, 3} });
for (auto i = 0; i < 2; ++i) {
for (auto j = 0; j < 2; ++j) {
REQUIRE(nosmooth_score.at(i).at(j) == Catch::Approx(nosmooth_values.at(i).at(j)).margin(threshold));
}
}
}

View File

@@ -27,189 +27,192 @@ TEST_CASE("Build basic model", "[BoostA2DE]")
auto score = clf.score(raw.Xv, raw.yv);
REQUIRE(score == Catch::Approx(0.919271).epsilon(raw.epsilon));
}
// TEST_CASE("Feature_select IWSS", "[BoostAODE]")
// {
// auto raw = RawDatasets("glass", true);
// auto clf = bayesnet::BoostAODE();
// clf.setHyperparameters({ {"select_features", "IWSS"}, {"threshold", 0.5 } });
// clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing);
// REQUIRE(clf.getNumberOfNodes() == 90);
// REQUIRE(clf.getNumberOfEdges() == 153);
// REQUIRE(clf.getNotes().size() == 2);
// REQUIRE(clf.getNotes()[0] == "Used features in initialization: 4 of 9 with IWSS");
// REQUIRE(clf.getNotes()[1] == "Number of models: 9");
// }
// TEST_CASE("Feature_select FCBF", "[BoostAODE]")
// {
// auto raw = RawDatasets("glass", true);
// auto clf = bayesnet::BoostAODE();
// clf.setHyperparameters({ {"select_features", "FCBF"}, {"threshold", 1e-7 } });
// clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing);
// REQUIRE(clf.getNumberOfNodes() == 90);
// REQUIRE(clf.getNumberOfEdges() == 153);
// REQUIRE(clf.getNotes().size() == 2);
// REQUIRE(clf.getNotes()[0] == "Used features in initialization: 4 of 9 with FCBF");
// REQUIRE(clf.getNotes()[1] == "Number of models: 9");
// }
// TEST_CASE("Test used features in train note and score", "[BoostAODE]")
// {
// auto raw = RawDatasets("diabetes", true);
// auto clf = bayesnet::BoostAODE(true);
// clf.setHyperparameters({
// {"order", "asc"},
// {"convergence", true},
// {"select_features","CFS"},
// });
// clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing);
// REQUIRE(clf.getNumberOfNodes() == 72);
// REQUIRE(clf.getNumberOfEdges() == 120);
// REQUIRE(clf.getNotes().size() == 2);
// REQUIRE(clf.getNotes()[0] == "Used features in initialization: 6 of 8 with CFS");
// REQUIRE(clf.getNotes()[1] == "Number of models: 8");
// auto score = clf.score(raw.Xv, raw.yv);
// auto scoret = clf.score(raw.Xt, raw.yt);
// REQUIRE(score == Catch::Approx(0.809895813).epsilon(raw.epsilon));
// REQUIRE(scoret == Catch::Approx(0.809895813).epsilon(raw.epsilon));
// }
// TEST_CASE("Voting vs proba", "[BoostAODE]")
// {
// auto raw = RawDatasets("iris", true);
// auto clf = bayesnet::BoostAODE(false);
// clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing);
// auto score_proba = clf.score(raw.Xv, raw.yv);
// auto pred_proba = clf.predict_proba(raw.Xv);
// clf.setHyperparameters({
// {"predict_voting",true},
// });
// auto score_voting = clf.score(raw.Xv, raw.yv);
// auto pred_voting = clf.predict_proba(raw.Xv);
// REQUIRE(score_proba == Catch::Approx(0.97333).epsilon(raw.epsilon));
// REQUIRE(score_voting == Catch::Approx(0.98).epsilon(raw.epsilon));
// REQUIRE(pred_voting[83][2] == Catch::Approx(1.0).epsilon(raw.epsilon));
// REQUIRE(pred_proba[83][2] == Catch::Approx(0.86121525).epsilon(raw.epsilon));
// REQUIRE(clf.dump_cpt() == "");
// REQUIRE(clf.topological_order() == std::vector<std::string>());
// }
// TEST_CASE("Order asc, desc & random", "[BoostAODE]")
// {
// auto raw = RawDatasets("glass", true);
// std::map<std::string, double> scores{
// {"asc", 0.83645f }, { "desc", 0.84579f }, { "rand", 0.84112 }
// };
// for (const std::string& order : { "asc", "desc", "rand" }) {
// auto clf = bayesnet::BoostAODE();
// clf.setHyperparameters({
// {"order", order},
// {"bisection", false},
// {"maxTolerance", 1},
// {"convergence", false},
// });
// clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing);
// auto score = clf.score(raw.Xv, raw.yv);
// auto scoret = clf.score(raw.Xt, raw.yt);
// INFO("BoostAODE order: " + order);
// REQUIRE(score == Catch::Approx(scores[order]).epsilon(raw.epsilon));
// REQUIRE(scoret == Catch::Approx(scores[order]).epsilon(raw.epsilon));
// }
// }
// TEST_CASE("Oddities", "[BoostAODE]")
// {
// auto clf = bayesnet::BoostAODE();
// auto raw = RawDatasets("iris", true);
// auto bad_hyper = nlohmann::json{
// { { "order", "duck" } },
// { { "select_features", "duck" } },
// { { "maxTolerance", 0 } },
// { { "maxTolerance", 5 } },
// };
// for (const auto& hyper : bad_hyper.items()) {
// INFO("BoostAODE hyper: " + hyper.value().dump());
// REQUIRE_THROWS_AS(clf.setHyperparameters(hyper.value()), std::invalid_argument);
// }
// REQUIRE_THROWS_AS(clf.setHyperparameters({ {"maxTolerance", 0 } }), std::invalid_argument);
// auto bad_hyper_fit = nlohmann::json{
// { { "select_features","IWSS" }, { "threshold", -0.01 } },
// { { "select_features","IWSS" }, { "threshold", 0.51 } },
// { { "select_features","FCBF" }, { "threshold", 1e-8 } },
// { { "select_features","FCBF" }, { "threshold", 1.01 } },
// };
// for (const auto& hyper : bad_hyper_fit.items()) {
// INFO("BoostAODE hyper: " + hyper.value().dump());
// clf.setHyperparameters(hyper.value());
// REQUIRE_THROWS_AS(clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing, std::invalid_argument);
// }
// }
// TEST_CASE("Bisection Best", "[BoostAODE]")
// {
// auto clf = bayesnet::BoostAODE();
// auto raw = RawDatasets("kdd_JapaneseVowels", true, 1200, true, false);
// clf.setHyperparameters({
// {"bisection", true},
// {"maxTolerance", 3},
// {"convergence", true},
// {"block_update", false},
// {"convergence_best", false},
// });
// clf.fit(raw.X_train, raw.y_train, raw.features, raw.className, raw.states, raw.smoothing);
// REQUIRE(clf.getNumberOfNodes() == 210);
// REQUIRE(clf.getNumberOfEdges() == 378);
// REQUIRE(clf.getNotes().size() == 1);
// REQUIRE(clf.getNotes().at(0) == "Number of models: 14");
// auto score = clf.score(raw.X_test, raw.y_test);
// auto scoret = clf.score(raw.X_test, raw.y_test);
// REQUIRE(score == Catch::Approx(0.991666675f).epsilon(raw.epsilon));
// REQUIRE(scoret == Catch::Approx(0.991666675f).epsilon(raw.epsilon));
// }
// TEST_CASE("Bisection Best vs Last", "[BoostAODE]")
// {
// auto raw = RawDatasets("kdd_JapaneseVowels", true, 1500, true, false);
// auto clf = bayesnet::BoostAODE(true);
// auto hyperparameters = nlohmann::json{
// {"bisection", true},
// {"maxTolerance", 3},
// {"convergence", true},
// {"convergence_best", true},
// };
// clf.setHyperparameters(hyperparameters);
// clf.fit(raw.X_train, raw.y_train, raw.features, raw.className, raw.states, raw.smoothing);
// auto score_best = clf.score(raw.X_test, raw.y_test);
// REQUIRE(score_best == Catch::Approx(0.980000019f).epsilon(raw.epsilon));
// // Now we will set the hyperparameter to use the last accuracy
// hyperparameters["convergence_best"] = false;
// clf.setHyperparameters(hyperparameters);
// clf.fit(raw.X_train, raw.y_train, raw.features, raw.className, raw.states, raw.smoothing);
// auto score_last = clf.score(raw.X_test, raw.y_test);
// REQUIRE(score_last == Catch::Approx(0.976666689f).epsilon(raw.epsilon));
// }
// TEST_CASE("Block Update", "[BoostAODE]")
// {
// auto clf = bayesnet::BoostAODE();
// auto raw = RawDatasets("mfeat-factors", true, 500);
// clf.setHyperparameters({
// {"bisection", true},
// {"block_update", true},
// {"maxTolerance", 3},
// {"convergence", true},
// });
// clf.fit(raw.X_train, raw.y_train, raw.features, raw.className, raw.states, raw.smoothing);
// REQUIRE(clf.getNumberOfNodes() == 868);
// REQUIRE(clf.getNumberOfEdges() == 1724);
// REQUIRE(clf.getNotes().size() == 3);
// REQUIRE(clf.getNotes()[0] == "Convergence threshold reached & 15 models eliminated");
// REQUIRE(clf.getNotes()[1] == "Used features in train: 19 of 216");
// REQUIRE(clf.getNotes()[2] == "Number of models: 4");
// auto score = clf.score(raw.X_test, raw.y_test);
// auto scoret = clf.score(raw.X_test, raw.y_test);
// REQUIRE(score == Catch::Approx(0.99f).epsilon(raw.epsilon));
// REQUIRE(scoret == Catch::Approx(0.99f).epsilon(raw.epsilon));
// //
// // std::cout << "Number of nodes " << clf.getNumberOfNodes() << std::endl;
// // std::cout << "Number of edges " << clf.getNumberOfEdges() << std::endl;
// // std::cout << "Notes size " << clf.getNotes().size() << std::endl;
// // for (auto note : clf.getNotes()) {
// // std::cout << note << std::endl;
// // }
// // std::cout << "Score " << score << std::endl;
TEST_CASE("Feature_select IWSS", "[BoostA2DE]")
{
auto raw = RawDatasets("glass", true);
auto clf = bayesnet::BoostA2DE();
clf.setHyperparameters({ {"select_features", "IWSS"}, {"threshold", 0.5 } });
clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing);
REQUIRE(clf.getNumberOfNodes() == 140);
REQUIRE(clf.getNumberOfEdges() == 294);
REQUIRE(clf.getNotes().size() == 4);
REQUIRE(clf.getNotes()[0] == "Used features in initialization: 4 of 9 with IWSS");
REQUIRE(clf.getNotes()[1] == "Convergence threshold reached & 15 models eliminated");
REQUIRE(clf.getNotes()[2] == "Pairs not used in train: 2");
REQUIRE(clf.getNotes()[3] == "Number of models: 14");
}
TEST_CASE("Feature_select FCBF", "[BoostA2DE]")
{
auto raw = RawDatasets("glass", true);
auto clf = bayesnet::BoostA2DE();
clf.setHyperparameters({ {"select_features", "FCBF"}, {"threshold", 1e-7 } });
clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing);
REQUIRE(clf.getNumberOfNodes() == 110);
REQUIRE(clf.getNumberOfEdges() == 231);
REQUIRE(clf.getNotes()[0] == "Used features in initialization: 4 of 9 with FCBF");
REQUIRE(clf.getNotes()[1] == "Convergence threshold reached & 15 models eliminated");
REQUIRE(clf.getNotes()[2] == "Pairs not used in train: 2");
REQUIRE(clf.getNotes()[3] == "Number of models: 11");
}
TEST_CASE("Test used features in train note and score", "[BoostA2DE]")
{
auto raw = RawDatasets("diabetes", true);
auto clf = bayesnet::BoostA2DE(true);
clf.setHyperparameters({
{"order", "asc"},
{"convergence", true},
{"select_features","CFS"},
});
clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing);
REQUIRE(clf.getNumberOfNodes() == 144);
REQUIRE(clf.getNumberOfEdges() == 288);
REQUIRE(clf.getNotes().size() == 2);
REQUIRE(clf.getNotes()[0] == "Used features in initialization: 6 of 8 with CFS");
REQUIRE(clf.getNotes()[1] == "Number of models: 16");
auto score = clf.score(raw.Xv, raw.yv);
auto scoret = clf.score(raw.Xt, raw.yt);
REQUIRE(score == Catch::Approx(0.856771).epsilon(raw.epsilon));
REQUIRE(scoret == Catch::Approx(0.856771).epsilon(raw.epsilon));
}
TEST_CASE("Voting vs proba", "[BoostA2DE]")
{
auto raw = RawDatasets("iris", true);
auto clf = bayesnet::BoostA2DE(false);
clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing);
auto score_proba = clf.score(raw.Xv, raw.yv);
auto pred_proba = clf.predict_proba(raw.Xv);
clf.setHyperparameters({
{"predict_voting",true},
});
auto score_voting = clf.score(raw.Xv, raw.yv);
auto pred_voting = clf.predict_proba(raw.Xv);
REQUIRE(score_proba == Catch::Approx(0.98).epsilon(raw.epsilon));
REQUIRE(score_voting == Catch::Approx(0.946667).epsilon(raw.epsilon));
REQUIRE(pred_voting[83][2] == Catch::Approx(0.53508).epsilon(raw.epsilon));
REQUIRE(pred_proba[83][2] == Catch::Approx(0.48394).epsilon(raw.epsilon));
REQUIRE(clf.dump_cpt() == "");
REQUIRE(clf.topological_order() == std::vector<std::string>());
}
TEST_CASE("Order asc, desc & random", "[BoostA2DE]")
{
auto raw = RawDatasets("glass", true);
std::map<std::string, double> scores{
{"asc", 0.752336f }, { "desc", 0.813084f }, { "rand", 0.850467 }
};
for (const std::string& order : { "asc", "desc", "rand" }) {
auto clf = bayesnet::BoostA2DE();
clf.setHyperparameters({
{"order", order},
{"bisection", false},
{"maxTolerance", 1},
{"convergence", false},
});
clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing);
auto score = clf.score(raw.Xv, raw.yv);
auto scoret = clf.score(raw.Xt, raw.yt);
INFO("BoostA2DE order: " + order);
REQUIRE(score == Catch::Approx(scores[order]).epsilon(raw.epsilon));
REQUIRE(scoret == Catch::Approx(scores[order]).epsilon(raw.epsilon));
}
}
TEST_CASE("Oddities2", "[BoostA2DE]")
{
auto clf = bayesnet::BoostA2DE();
auto raw = RawDatasets("iris", true);
auto bad_hyper = nlohmann::json{
{ { "order", "duck" } },
{ { "select_features", "duck" } },
{ { "maxTolerance", 0 } },
{ { "maxTolerance", 5 } },
};
for (const auto& hyper : bad_hyper.items()) {
INFO("BoostA2DE hyper: " + hyper.value().dump());
REQUIRE_THROWS_AS(clf.setHyperparameters(hyper.value()), std::invalid_argument);
}
REQUIRE_THROWS_AS(clf.setHyperparameters({ {"maxTolerance", 0 } }), std::invalid_argument);
auto bad_hyper_fit = nlohmann::json{
{ { "select_features","IWSS" }, { "threshold", -0.01 } },
{ { "select_features","IWSS" }, { "threshold", 0.51 } },
{ { "select_features","FCBF" }, { "threshold", 1e-8 } },
{ { "select_features","FCBF" }, { "threshold", 1.01 } },
};
for (const auto& hyper : bad_hyper_fit.items()) {
INFO("BoostA2DE hyper: " + hyper.value().dump());
clf.setHyperparameters(hyper.value());
REQUIRE_THROWS_AS(clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing), std::invalid_argument);
}
}
TEST_CASE("No features selected", "[BoostA2DE]")
{
// Check that the note "No features selected in initialization" is added
//
auto raw = RawDatasets("iris", true);
auto clf = bayesnet::BoostA2DE();
clf.setHyperparameters({ {"select_features","FCBF"}, {"threshold", 1 } });
clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing);
REQUIRE(clf.getNotes().size() == 1);
REQUIRE(clf.getNotes()[0] == "No features selected in initialization");
}
TEST_CASE("Bisection Best", "[BoostA2DE]")
{
auto clf = bayesnet::BoostA2DE();
auto raw = RawDatasets("kdd_JapaneseVowels", true, 1200, true, false);
clf.setHyperparameters({
{"bisection", true},
{"maxTolerance", 3},
{"convergence", true},
{"block_update", false},
{"convergence_best", false},
});
clf.fit(raw.X_train, raw.y_train, raw.features, raw.className, raw.states, raw.smoothing);
REQUIRE(clf.getNumberOfNodes() == 480);
REQUIRE(clf.getNumberOfEdges() == 1152);
REQUIRE(clf.getNotes().size() == 3);
REQUIRE(clf.getNotes().at(0) == "Convergence threshold reached & 15 models eliminated");
REQUIRE(clf.getNotes().at(1) == "Pairs not used in train: 83");
REQUIRE(clf.getNotes().at(2) == "Number of models: 32");
auto score = clf.score(raw.X_test, raw.y_test);
auto scoret = clf.score(raw.X_test, raw.y_test);
REQUIRE(score == Catch::Approx(0.966667f).epsilon(raw.epsilon));
REQUIRE(scoret == Catch::Approx(0.966667f).epsilon(raw.epsilon));
}
TEST_CASE("Block Update", "[BoostA2DE]")
{
auto clf = bayesnet::BoostA2DE();
auto raw = RawDatasets("spambase", true, 500);
clf.setHyperparameters({
{"bisection", true},
{"block_update", true},
{"maxTolerance", 3},
{"convergence", true},
});
clf.fit(raw.X_train, raw.y_train, raw.features, raw.className, raw.states, raw.smoothing);
REQUIRE(clf.getNumberOfNodes() == 58);
REQUIRE(clf.getNumberOfEdges() == 165);
REQUIRE(clf.getNotes().size() == 3);
REQUIRE(clf.getNotes()[0] == "Convergence threshold reached & 15 models eliminated");
REQUIRE(clf.getNotes()[1] == "Pairs not used in train: 1588");
REQUIRE(clf.getNotes()[2] == "Number of models: 1");
auto score = clf.score(raw.X_test, raw.y_test);
auto scoret = clf.score(raw.X_test, raw.y_test);
REQUIRE(score == Catch::Approx(1.0f).epsilon(raw.epsilon));
REQUIRE(scoret == Catch::Approx(1.0f).epsilon(raw.epsilon));
//
// std::cout << "Number of nodes " << clf.getNumberOfNodes() << std::endl;
// std::cout << "Number of edges " << clf.getNumberOfEdges() << std::endl;
// std::cout << "Notes size " << clf.getNotes().size() << std::endl;
// for (auto note : clf.getNotes()) {
// std::cout << note << std::endl;
// }
// std::cout << "Score " << score << std::endl;
}
TEST_CASE("Test graph b2a2de", "[BoostA2DE]")
{
auto raw = RawDatasets("iris", true);
auto clf = bayesnet::BoostA2DE();
clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing);
auto graph = clf.graph();
REQUIRE(graph.size() == 26);
REQUIRE(graph[0] == "digraph BayesNet {\nlabel=<BayesNet BoostA2DE_0>\nfontsize=30\nfontcolor=blue\nlabelloc=t\nlayout=circo\n");
REQUIRE(graph[1] == "\"class\" [shape=circle, fontcolor=red, fillcolor=lightblue, style=filled ] \n");
}

72
tests/TestMST.cc Normal file
View File

@@ -0,0 +1,72 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#include <catch2/catch_test_macros.hpp>
#include <catch2/catch_approx.hpp>
#include <catch2/generators/catch_generators.hpp>
#include <catch2/matchers/catch_matchers.hpp>
#include <string>
#include <vector>
#include "TestUtils.h"
#include "bayesnet/utils/Mst.h"
TEST_CASE("MST::insertElement tests", "[MST]")
{
bayesnet::MST mst({}, torch::tensor({}), 0);
SECTION("Insert into an empty list")
{
std::list<int> variables;
mst.insertElement(variables, 5);
REQUIRE(variables == std::list<int>{5});
}
SECTION("Insert a non-duplicate element")
{
std::list<int> variables = { 1, 2, 3 };
mst.insertElement(variables, 4);
REQUIRE(variables == std::list<int>{4, 1, 2, 3});
}
SECTION("Insert a duplicate element")
{
std::list<int> variables = { 1, 2, 3 };
mst.insertElement(variables, 2);
REQUIRE(variables == std::list<int>{1, 2, 3});
}
}
TEST_CASE("MST::reorder tests", "[MST]")
{
bayesnet::MST mst({}, torch::tensor({}), 0);
SECTION("Reorder simple graph")
{
std::vector<std::pair<float, std::pair<int, int>>> T = { {2.0, {1, 2}}, {1.0, {0, 1}} };
auto result = mst.reorder(T, 0);
REQUIRE(result == std::vector<std::pair<int, int>>{{0, 1}, { 1, 2 }});
}
SECTION("Reorder with disconnected graph")
{
std::vector<std::pair<float, std::pair<int, int>>> T = { {2.0, {1, 2}}, {1.0, {0, 1}} };
auto result = mst.reorder(T, 0);
REQUIRE(result == std::vector<std::pair<int, int>>{{0, 1}, { 2, 3 }});
}
}
TEST_CASE("MST::maximumSpanningTree tests", "[MST]")
{
std::vector<std::string> features = { "A", "B", "C" };
auto weights = torch::tensor({
{0.0, 1.0, 2.0},
{1.0, 0.0, 3.0},
{2.0, 3.0, 0.0}
});
bayesnet::MST mst(features, weights, 0);
SECTION("MST of a complete graph")
{
auto result = mst.maximumSpanningTree();
REQUIRE(result.size() == 2); // Un MST para 3 nodos tiene 2 aristas
}
}

4811
tests/data/spambase.arff Executable file

File diff suppressed because it is too large Load Diff