Begin Test Folding

This commit is contained in:
2023-10-06 17:08:54 +02:00
parent b9e0028e9d
commit 17e079edd5
10 changed files with 250 additions and 55 deletions

View File

@@ -5,7 +5,7 @@ if(ENABLE_TESTING)
include_directories(${BayesNet_SOURCE_DIR}/lib/Files)
include_directories(${BayesNet_SOURCE_DIR}/lib/mdlp)
include_directories(${BayesNet_SOURCE_DIR}/lib/json/include)
set(TEST_SOURCES TestBayesModels.cc TestBayesNetwork.cc TestBayesMetrics.cc TestUtils.cc ${BayesNet_SOURCE_DIR}/src/Platform/Folding.cc ${BayesNet_SOURCES})
set(TEST_SOURCES TestBayesModels.cc TestBayesNetwork.cc TestBayesMetrics.cc TestFolding.cc TestUtils.cc ${BayesNet_SOURCE_DIR}/src/Platform/Folding.cc ${BayesNet_SOURCES})
add_executable(${TEST_MAIN} ${TEST_SOURCES})
target_link_libraries(${TEST_MAIN} PUBLIC "${TORCH_LIBRARIES}" ArffFiles mdlp Catch2::Catch2WithMain)
add_test(NAME ${TEST_MAIN} COMMAND ${TEST_MAIN})

View File

@@ -22,19 +22,13 @@ TEST_CASE("Metrics Test", "[BayesNet]")
{"diabetes", 0.0345470614}
};
map<string, vector<pair<int, int>>> resultsMST = {
{"glass", {{0,6}, {0,5}, {0,3}, {5,1}, {5,8}, {6,2}, {6,7}, {7,4}}},
{"glass", {{0,6}, {0,5}, {0,3}, {6,2}, {6,7}, {5,1}, {5,8}, {5,4}}},
{"iris", {{0,1},{0,2},{1,3}}},
{"ecoli", {{0,1}, {0,2}, {1,5}, {1,3}, {5,6}, {5,4}}},
{"diabetes", {{0,7}, {0,2}, {0,6}, {2,3}, {3,4}, {3,5}, {4,1}}}
};
auto [XDisc, yDisc, featuresDisc, classNameDisc, statesDisc] = loadDataset(file_name, true, true);
int classNumStates = statesDisc.at(classNameDisc).size();
auto yresized = torch::transpose(yDisc.view({ yDisc.size(0), 1 }), 0, 1);
torch::Tensor dataset = torch::cat({ XDisc, yresized }, 0);
int nSamples = dataset.size(1);
double epsilon = 1e-5;
torch::Tensor weights = torch::full({ nSamples }, 1.0 / nSamples, torch::kDouble);
bayesnet::Metrics metrics(dataset, featuresDisc, classNameDisc, classNumStates);
auto raw = RawDatasets(file_name, true);
bayesnet::Metrics metrics(raw.dataset, raw.featurest, raw.classNamet, raw.classNumStates);
SECTION("Test Constructor")
{
@@ -43,21 +37,21 @@ TEST_CASE("Metrics Test", "[BayesNet]")
SECTION("Test SelectKBestWeighted")
{
vector<int> kBest = metrics.SelectKBestWeighted(weights, true, resultsKBest.at(file_name).first);
vector<int> kBest = metrics.SelectKBestWeighted(raw.weights, true, resultsKBest.at(file_name).first);
REQUIRE(kBest.size() == resultsKBest.at(file_name).first);
REQUIRE(kBest == resultsKBest.at(file_name).second);
}
SECTION("Test Mutual Information")
{
auto result = metrics.mutualInformation(dataset.index({ 1, "..." }), dataset.index({ 2, "..." }), weights);
REQUIRE(result == Catch::Approx(resultsMI.at(file_name)).epsilon(epsilon));
auto result = metrics.mutualInformation(raw.dataset.index({ 1, "..." }), raw.dataset.index({ 2, "..." }), raw.weights);
REQUIRE(result == Catch::Approx(resultsMI.at(file_name)).epsilon(raw.epsilon));
}
SECTION("Test Maximum Spanning Tree")
{
auto weights_matrix = metrics.conditionalEdge(weights);
auto result = metrics.maximumSpanningTree(featuresDisc, weights_matrix, 0);
auto weights_matrix = metrics.conditionalEdge(raw.weights);
auto result = metrics.maximumSpanningTree(raw.featurest, weights_matrix, 0);
REQUIRE(result == resultsMST.at(file_name));
}
}

View File

@@ -34,83 +34,81 @@ TEST_CASE("Test Bayesian Classifiers score", "[BayesNet]")
};
string file_name = GENERATE("glass", "iris", "ecoli", "diabetes");
auto [XCont, yCont, featuresCont, classNameCont, statesCont] = loadDataset(file_name, true, false);
auto [XDisc, yDisc, featuresDisc, classNameDisc, statesDisc] = loadFile(file_name);
double epsilon = 1e-5;
auto raw = RawDatasets(file_name, false);
SECTION("Test TAN classifier (" + file_name + ")")
{
auto clf = bayesnet::TAN();
clf.fit(XDisc, yDisc, featuresDisc, classNameDisc, statesDisc);
auto score = clf.score(XDisc, yDisc);
clf.fit(raw.Xv, raw.yv, raw.featuresv, raw.classNamev, raw.statesv);
auto score = clf.score(raw.Xv, raw.yv);
//scores[{file_name, "TAN"}] = score;
REQUIRE(score == Catch::Approx(scores[{file_name, "TAN"}]).epsilon(epsilon));
REQUIRE(score == Catch::Approx(scores[{file_name, "TAN"}]).epsilon(raw.epsilon));
}
SECTION("Test TANLd classifier (" + file_name + ")")
{
auto clf = bayesnet::TANLd();
clf.fit(XCont, yCont, featuresCont, classNameCont, statesCont);
auto score = clf.score(XCont, yCont);
clf.fit(raw.Xt, raw.yt, raw.featurest, raw.classNamet, raw.statest);
auto score = clf.score(raw.Xt, raw.yt);
//scores[{file_name, "TANLd"}] = score;
REQUIRE(score == Catch::Approx(scores[{file_name, "TANLd"}]).epsilon(epsilon));
REQUIRE(score == Catch::Approx(scores[{file_name, "TANLd"}]).epsilon(raw.epsilon));
}
SECTION("Test KDB classifier (" + file_name + ")")
{
auto clf = bayesnet::KDB(2);
clf.fit(XDisc, yDisc, featuresDisc, classNameDisc, statesDisc);
auto score = clf.score(XDisc, yDisc);
clf.fit(raw.Xv, raw.yv, raw.featuresv, raw.classNamev, raw.statesv);
auto score = clf.score(raw.Xv, raw.yv);
//scores[{file_name, "KDB"}] = score;
REQUIRE(score == Catch::Approx(scores[{file_name, "KDB"
}]).epsilon(epsilon));
}]).epsilon(raw.epsilon));
}
SECTION("Test KDBLd classifier (" + file_name + ")")
{
auto clf = bayesnet::KDBLd(2);
clf.fit(XCont, yCont, featuresCont, classNameCont, statesCont);
auto score = clf.score(XCont, yCont);
clf.fit(raw.Xt, raw.yt, raw.featurest, raw.classNamet, raw.statest);
auto score = clf.score(raw.Xt, raw.yt);
//scores[{file_name, "KDBLd"}] = score;
REQUIRE(score == Catch::Approx(scores[{file_name, "KDBLd"
}]).epsilon(epsilon));
}]).epsilon(raw.epsilon));
}
SECTION("Test SPODE classifier (" + file_name + ")")
{
auto clf = bayesnet::SPODE(1);
clf.fit(XDisc, yDisc, featuresDisc, classNameDisc, statesDisc);
auto score = clf.score(XDisc, yDisc);
clf.fit(raw.Xv, raw.yv, raw.featuresv, raw.classNamev, raw.statesv);
auto score = clf.score(raw.Xv, raw.yv);
// scores[{file_name, "SPODE"}] = score;
REQUIRE(score == Catch::Approx(scores[{file_name, "SPODE"}]).epsilon(epsilon));
REQUIRE(score == Catch::Approx(scores[{file_name, "SPODE"}]).epsilon(raw.epsilon));
}
SECTION("Test SPODELd classifier (" + file_name + ")")
{
auto clf = bayesnet::SPODELd(1);
clf.fit(XCont, yCont, featuresCont, classNameCont, statesCont);
auto score = clf.score(XCont, yCont);
clf.fit(raw.Xt, raw.yt, raw.featurest, raw.classNamet, raw.statest);
auto score = clf.score(raw.Xt, raw.yt);
// scores[{file_name, "SPODELd"}] = score;
REQUIRE(score == Catch::Approx(scores[{file_name, "SPODELd"}]).epsilon(epsilon));
REQUIRE(score == Catch::Approx(scores[{file_name, "SPODELd"}]).epsilon(raw.epsilon));
}
SECTION("Test AODE classifier (" + file_name + ")")
{
auto clf = bayesnet::AODE();
clf.fit(XDisc, yDisc, featuresDisc, classNameDisc, statesDisc);
auto score = clf.score(XDisc, yDisc);
clf.fit(raw.Xv, raw.yv, raw.featuresv, raw.classNamev, raw.statesv);
auto score = clf.score(raw.Xv, raw.yv);
// scores[{file_name, "AODE"}] = score;
REQUIRE(score == Catch::Approx(scores[{file_name, "AODE"}]).epsilon(epsilon));
REQUIRE(score == Catch::Approx(scores[{file_name, "AODE"}]).epsilon(raw.epsilon));
}
SECTION("Test AODELd classifier (" + file_name + ")")
{
auto clf = bayesnet::AODELd();
clf.fit(XCont, yCont, featuresCont, classNameCont, statesCont);
auto score = clf.score(XCont, yCont);
clf.fit(raw.Xt, raw.yt, raw.featurest, raw.classNamet, raw.statest);
auto score = clf.score(raw.Xt, raw.yt);
// scores[{file_name, "AODELd"}] = score;
REQUIRE(score == Catch::Approx(scores[{file_name, "AODELd"}]).epsilon(epsilon));
REQUIRE(score == Catch::Approx(scores[{file_name, "AODELd"}]).epsilon(raw.epsilon));
}
SECTION("Test BoostAODE classifier (" + file_name + ")")
{
auto clf = bayesnet::BoostAODE();
clf.fit(XDisc, yDisc, featuresDisc, classNameDisc, statesDisc);
auto score = clf.score(XDisc, yDisc);
clf.fit(raw.Xv, raw.yv, raw.featuresv, raw.classNamev, raw.statesv);
auto score = clf.score(raw.Xv, raw.yv);
// scores[{file_name, "BoostAODE"}] = score;
REQUIRE(score == Catch::Approx(scores[{file_name, "BoostAODE"}]).epsilon(epsilon));
REQUIRE(score == Catch::Approx(scores[{file_name, "BoostAODE"}]).epsilon(raw.epsilon));
}
// for (auto scores : scores) {
// cout << "{{\"" << scores.first.first << "\", \"" << scores.first.second << "\"}, " << scores.second << "}, ";
@@ -125,10 +123,9 @@ TEST_CASE("Models features", "[BayesNet]")
"sepallength -> sepalwidth", "sepalwidth [shape=circle] \n", "sepalwidth -> petalwidth", "}\n"
}
);
auto raw = RawDatasets("iris", true);
auto clf = bayesnet::TAN();
auto [XDisc, yDisc, featuresDisc, classNameDisc, statesDisc] = loadFile("iris");
clf.fit(XDisc, yDisc, featuresDisc, classNameDisc, statesDisc);
clf.fit(raw.Xv, raw.yv, raw.featuresv, raw.classNamev, raw.statesv);
REQUIRE(clf.getNumberOfNodes() == 6);
REQUIRE(clf.getNumberOfEdges() == 7);
REQUIRE(clf.show() == vector<string>{"class -> sepallength, sepalwidth, petallength, petalwidth, ", "petallength -> sepallength, ", "petalwidth -> ", "sepallength -> sepalwidth, ", "sepalwidth -> petalwidth, "});
@@ -136,9 +133,9 @@ TEST_CASE("Models features", "[BayesNet]")
}
TEST_CASE("Get num features & num edges", "[BayesNet]")
{
auto [XDisc, yDisc, featuresDisc, classNameDisc, statesDisc] = loadFile("iris");
auto raw = RawDatasets("iris", true);
auto clf = bayesnet::KDB(2);
clf.fit(XDisc, yDisc, featuresDisc, classNameDisc, statesDisc);
clf.fit(raw.Xv, raw.yv, raw.featuresv, raw.classNamev, raw.statesv);
REQUIRE(clf.getNumberOfNodes() == 6);
REQUIRE(clf.getNumberOfEdges() == 8);
}

98
tests/TestFolding.cc Normal file
View File

@@ -0,0 +1,98 @@
#include <catch2/catch_test_macros.hpp>
#include <catch2/catch_approx.hpp>
#include <catch2/generators/catch_generators.hpp>
#include "TestUtils.h"
#include "Folding.h"
TEST_CASE("KFold Test", "[KFold]")
{
// Initialize a KFold object with k=5 and a seed of 19.
string file_name = GENERATE("glass", "iris", "ecoli", "diabetes");
auto raw = RawDatasets(file_name, true);
int nFolds = 5;
platform::KFold kfold(nFolds, raw.nSamples, 19);s
int number = raw.nSamples * (kfold.getNumberOfFolds() - 1) / kfold.getNumberOfFolds();
SECTION("Number of Folds")
{
REQUIRE(kfold.getNumberOfFolds() == nFolds);
}
SECTION("Fold Test")
{
// Test each fold's size and contents.
for (int i = 0; i < nFolds; ++i) {
auto [train_indices, test_indices] = kfold.getFold(i);
bool result = train_indices.size() == number || train_indices.size() == number + 1;
REQUIRE(result);
REQUIRE(train_indices.size() + test_indices.size() == raw.nSamples);
}
}
}
map<int, int> counts(vector<int> y, vector<int> indices)
{
map<int, int> result;
for (auto i = 0; i < indices.size(); ++i) {
result[y[indices[i]]]++;
}
return result;
}
TEST_CASE("StratifiedKFold Test", "[StratifiedKFold]")
{
int nFolds = 3;
// Initialize a StratifiedKFold object with k=3, using the y vector, and a seed of 17.
string file_name = GENERATE("glass", "iris", "ecoli", "diabetes");
auto raw = RawDatasets(file_name, true);
platform::StratifiedKFold stratified_kfoldt(nFolds, raw.yt, 17);
platform::StratifiedKFold stratified_kfoldv(nFolds, raw.yv, 17);
int number = raw.nSamples * (stratified_kfold.getNumberOfFolds() - 1) / stratified_kfold.getNumberOfFolds();
// SECTION("Number of Folds")
// {
// REQUIRE(stratified_kfold.getNumberOfFolds() == nFolds);
// }
SECTION("Fold Test")
{
// Test each fold's size and contents.
auto counts = vector<int>(raw.classNumStates, 0);
for (int i = 0; i < nFolds; ++i) {
auto [train_indicest, test_indicest] = stratified_kfoldt.getFold(i);
auto [train_indicesv, test_indicesv] = stratified_kfoldv.getFold(i);
REQUIRE(train_indicest == train_indicesv);
REQUIRE(test_indicest == test_indicesv);
bool result = train_indices.size() == number || train_indices.size() == number + 1;
REQUIRE(result);
REQUIRE(train_indices.size() + test_indices.size() == raw.nSamples);
auto train_t = torch::tensor(train_indices);
auto ytrain = raw.yt.index({ train_t });
cout << "dataset=" << file_name << endl;
cout << "nSamples=" << raw.nSamples << endl;;
cout << "number=" << number << endl;
cout << "train_indices.size()=" << train_indices.size() << endl;
cout << "test_indices.size()=" << test_indices.size() << endl;
cout << "Class Name = " << raw.classNamet << endl;
cout << "Features = ";
for (const auto& item : raw.featurest) {
cout << item << ", ";
}
cout << endl;
cout << "Class States: ";
for (const auto& item : raw.statest.at(raw.classNamet)) {
cout << item << ", ";
}
cout << endl;
// Check that the class labels have been equally assign to each fold
for (const auto& idx : train_indices) {
counts[ytrain[idx].item<int>()]++;
}
int j = 0;
for (const auto& item : counts) {
cout << "j=" << j++ << item << endl;
}
}
REQUIRE(1 == 1);
}
}

View File

@@ -14,6 +14,29 @@ pair<vector<mdlp::labels_t>, map<string, int>> discretize(vector<mdlp::samples_t
vector<mdlp::labels_t> discretizeDataset(vector<mdlp::samples_t>& X, mdlp::labels_t& y);
tuple<vector<vector<int>>, vector<int>, vector<string>, string, map<string, vector<int>>> loadFile(const string& name);
tuple<torch::Tensor, torch::Tensor, vector<string>, string, map<string, vector<int>>> loadDataset(const string& name, bool class_last, bool discretize_dataset);
#endif //TEST_UTILS_H
#
class RawDatasets {
public:
RawDatasets(const string& file_name, bool discretize)
{
// Xt can be either discretized or not
tie(Xt, yt, featurest, classNamet, statest) = loadDataset(file_name, true, discretize);
// Xv is always discretized
tie(Xv, yv, featuresv, classNamev, statesv) = loadFile(file_name);
auto yresized = torch::transpose(yt.view({ yt.size(0), 1 }), 0, 1);
dataset = torch::cat({ Xt, yresized }, 0);
nSamples = dataset.size(1);
weights = torch::full({ nSamples }, 1.0 / nSamples, torch::kDouble);
classNumStates = discretize ? statest.at(classNamet).size() : 0;
}
torch::Tensor Xt, yt, dataset, weights;
vector<vector<int>> Xv;
vector<int> yv;
vector<string> featurest, featuresv;
map<string, vector<int>> statest, statesv;
string classNamet, classNamev;
int nSamples, classNumStates;
double epsilon = 1e-5;
};
#endif //TEST_UTILS_H