Compare commits
3 Commits
2e3e0e0fc2
...
80043d5181
Author | SHA1 | Date | |
---|---|---|---|
80043d5181
|
|||
677ec5613d
|
|||
cccaa6e0af
|
@@ -50,120 +50,115 @@ namespace bayesnet {
|
||||
// loguru::g_stderr_verbosity = loguru::Verbosity_OFF;
|
||||
// loguru::add_file("boostA2DE.log", loguru::Truncate, loguru::Verbosity_MAX);
|
||||
|
||||
// // Algorithm based on the adaboost algorithm for classification
|
||||
// // as explained in Ensemble methods (Zhi-Hua Zhou, 2012)
|
||||
// fitted = true;
|
||||
// double alpha_t = 0;
|
||||
// torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);
|
||||
// bool finished = false;
|
||||
// std::vector<int> featuresUsed;
|
||||
// if (selectFeatures) {
|
||||
// featuresUsed = initializeModels();
|
||||
// auto ypred = predict(X_train);
|
||||
// std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
|
||||
// // Update significance of the models
|
||||
// for (int i = 0; i < n_models; ++i) {
|
||||
// significanceModels[i] = alpha_t;
|
||||
// }
|
||||
// if (finished) {
|
||||
// return;
|
||||
// }
|
||||
// }
|
||||
// int numItemsPack = 0; // The counter of the models inserted in the current pack
|
||||
// // Variables to control the accuracy finish condition
|
||||
// double priorAccuracy = 0.0;
|
||||
// double improvement = 1.0;
|
||||
// double convergence_threshold = 1e-4;
|
||||
// int tolerance = 0; // number of times the accuracy is lower than the convergence_threshold
|
||||
// // Step 0: Set the finish condition
|
||||
// // epsilon sub t > 0.5 => inverse the weights policy
|
||||
// // validation error is not decreasing
|
||||
// // run out of features
|
||||
// bool ascending = order_algorithm == Orders.ASC;
|
||||
// std::mt19937 g{ 173 };
|
||||
// while (!finished) {
|
||||
// // Step 1: Build ranking with mutual information
|
||||
// auto pairSelection = metrics.SelectKBestWeighted(weights_, ascending, n); // Get all the features sorted
|
||||
// if (order_algorithm == Orders.RAND) {
|
||||
// std::shuffle(featureSelection.begin(), featureSelection.end(), g);
|
||||
// }
|
||||
// // Remove used features
|
||||
// featureSelection.erase(remove_if(begin(featureSelection), end(featureSelection), [&](auto x)
|
||||
// { return std::find(begin(featuresUsed), end(featuresUsed), x) != end(featuresUsed);}),
|
||||
// end(featureSelection)
|
||||
// );
|
||||
// int k = bisection ? pow(2, tolerance) : 1;
|
||||
// int counter = 0; // The model counter of the current pack
|
||||
// VLOG_SCOPE_F(1, "counter=%d k=%d featureSelection.size: %zu", counter, k, featureSelection.size());
|
||||
// while (counter++ < k && featureSelection.size() > 0) {
|
||||
// auto feature = featureSelection[0];
|
||||
// featureSelection.erase(featureSelection.begin());
|
||||
// std::unique_ptr<Classifier> model;
|
||||
// model = std::make_unique<SPODE>(feature);
|
||||
// model->fit(dataset, features, className, states, weights_);
|
||||
// alpha_t = 0.0;
|
||||
// if (!block_update) {
|
||||
// auto ypred = model->predict(X_train);
|
||||
// // Step 3.1: Compute the classifier amout of say
|
||||
// std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
|
||||
// }
|
||||
// // Step 3.4: Store classifier and its accuracy to weigh its future vote
|
||||
// numItemsPack++;
|
||||
// featuresUsed.push_back(feature);
|
||||
// models.push_back(std::move(model));
|
||||
// significanceModels.push_back(alpha_t);
|
||||
// n_models++;
|
||||
// VLOG_SCOPE_F(2, "numItemsPack: %d n_models: %d featuresUsed: %zu", numItemsPack, n_models, featuresUsed.size());
|
||||
// }
|
||||
// if (block_update) {
|
||||
// std::tie(weights_, alpha_t, finished) = update_weights_block(k, y_train, weights_);
|
||||
// }
|
||||
// if (convergence && !finished) {
|
||||
// auto y_val_predict = predict(X_test);
|
||||
// double accuracy = (y_val_predict == y_test).sum().item<double>() / (double)y_test.size(0);
|
||||
// if (priorAccuracy == 0) {
|
||||
// priorAccuracy = accuracy;
|
||||
// } else {
|
||||
// improvement = accuracy - priorAccuracy;
|
||||
// }
|
||||
// if (improvement < convergence_threshold) {
|
||||
// VLOG_SCOPE_F(3, " (improvement<threshold) tolerance: %d numItemsPack: %d improvement: %f prior: %f current: %f", tolerance, numItemsPack, improvement, priorAccuracy, accuracy);
|
||||
// tolerance++;
|
||||
// } else {
|
||||
// VLOG_SCOPE_F(3, "* (improvement>=threshold) Reset. tolerance: %d numItemsPack: %d improvement: %f prior: %f current: %f", tolerance, numItemsPack, improvement, priorAccuracy, accuracy);
|
||||
// tolerance = 0; // Reset the counter if the model performs better
|
||||
// numItemsPack = 0;
|
||||
// }
|
||||
// if (convergence_best) {
|
||||
// // Keep the best accuracy until now as the prior accuracy
|
||||
// priorAccuracy = std::max(accuracy, priorAccuracy);
|
||||
// } else {
|
||||
// // Keep the last accuray obtained as the prior accuracy
|
||||
// priorAccuracy = accuracy;
|
||||
// }
|
||||
// }
|
||||
// VLOG_SCOPE_F(1, "tolerance: %d featuresUsed.size: %zu features.size: %zu", tolerance, featuresUsed.size(), features.size());
|
||||
// finished = finished || tolerance > maxTolerance || featuresUsed.size() == features.size();
|
||||
// }
|
||||
// if (tolerance > maxTolerance) {
|
||||
// if (numItemsPack < n_models) {
|
||||
// notes.push_back("Convergence threshold reached & " + std::to_string(numItemsPack) + " models eliminated");
|
||||
// VLOG_SCOPE_F(4, "Convergence threshold reached & %d models eliminated of %d", numItemsPack, n_models);
|
||||
// for (int i = 0; i < numItemsPack; ++i) {
|
||||
// significanceModels.pop_back();
|
||||
// models.pop_back();
|
||||
// n_models--;
|
||||
// }
|
||||
// } else {
|
||||
// notes.push_back("Convergence threshold reached & 0 models eliminated");
|
||||
// VLOG_SCOPE_F(4, "Convergence threshold reached & 0 models eliminated n_models=%d numItemsPack=%d", n_models, numItemsPack);
|
||||
// }
|
||||
// }
|
||||
// if (featuresUsed.size() != features.size()) {
|
||||
// notes.push_back("Used features in train: " + std::to_string(featuresUsed.size()) + " of " + std::to_string(features.size()));
|
||||
// status = WARNING;
|
||||
// }
|
||||
// notes.push_back("Number of models: " + std::to_string(n_models));
|
||||
// Algorithm based on the adaboost algorithm for classification
|
||||
// as explained in Ensemble methods (Zhi-Hua Zhou, 2012)
|
||||
fitted = true;
|
||||
double alpha_t = 0;
|
||||
torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);
|
||||
bool finished = false;
|
||||
std::vector<int> featuresUsed;
|
||||
if (selectFeatures) {
|
||||
featuresUsed = initializeModels();
|
||||
auto ypred = predict(X_train);
|
||||
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
|
||||
// Update significance of the models
|
||||
for (int i = 0; i < n_models; ++i) {
|
||||
significanceModels[i] = alpha_t;
|
||||
}
|
||||
if (finished) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
int numItemsPack = 0; // The counter of the models inserted in the current pack
|
||||
// Variables to control the accuracy finish condition
|
||||
double priorAccuracy = 0.0;
|
||||
double improvement = 1.0;
|
||||
double convergence_threshold = 1e-4;
|
||||
int tolerance = 0; // number of times the accuracy is lower than the convergence_threshold
|
||||
// Step 0: Set the finish condition
|
||||
// epsilon sub t > 0.5 => inverse the weights policy
|
||||
// validation error is not decreasing
|
||||
// run out of features
|
||||
bool ascending = order_algorithm == Orders.ASC;
|
||||
std::mt19937 g{ 173 };
|
||||
std::vector<std::pair<int, int>> pairSelection;
|
||||
while (!finished) {
|
||||
// Step 1: Build ranking with mutual information
|
||||
pairSelection = metrics.SelectKPairs(weights_, featuresUsed, ascending, 0); // Get all the pairs sorted
|
||||
if (order_algorithm == Orders.RAND) {
|
||||
std::shuffle(pairSelection.begin(), pairSelection.end(), g);
|
||||
}
|
||||
int k = bisection ? pow(2, tolerance) : 1;
|
||||
int counter = 0; // The model counter of the current pack
|
||||
// VLOG_SCOPE_F(1, "counter=%d k=%d featureSelection.size: %zu", counter, k, featureSelection.size());
|
||||
while (counter++ < k && pairSelection.size() > 0) {
|
||||
auto feature_pair = pairSelection[0];
|
||||
pairSelection.erase(pairSelection.begin());
|
||||
std::unique_ptr<Classifier> model;
|
||||
model = std::make_unique<SPnDE>(std::vector<int>({ feature_pair.first, feature_pair.second }));
|
||||
model->fit(dataset, features, className, states, weights_);
|
||||
alpha_t = 0.0;
|
||||
if (!block_update) {
|
||||
auto ypred = model->predict(X_train);
|
||||
// Step 3.1: Compute the classifier amout of say
|
||||
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
|
||||
}
|
||||
// Step 3.4: Store classifier and its accuracy to weigh its future vote
|
||||
numItemsPack++;
|
||||
models.push_back(std::move(model));
|
||||
significanceModels.push_back(alpha_t);
|
||||
n_models++;
|
||||
// VLOG_SCOPE_F(2, "numItemsPack: %d n_models: %d featuresUsed: %zu", numItemsPack, n_models, featuresUsed.size());
|
||||
}
|
||||
if (block_update) {
|
||||
std::tie(weights_, alpha_t, finished) = update_weights_block(k, y_train, weights_);
|
||||
}
|
||||
if (convergence && !finished) {
|
||||
auto y_val_predict = predict(X_test);
|
||||
double accuracy = (y_val_predict == y_test).sum().item<double>() / (double)y_test.size(0);
|
||||
if (priorAccuracy == 0) {
|
||||
priorAccuracy = accuracy;
|
||||
} else {
|
||||
improvement = accuracy - priorAccuracy;
|
||||
}
|
||||
if (improvement < convergence_threshold) {
|
||||
// VLOG_SCOPE_F(3, " (improvement<threshold) tolerance: %d numItemsPack: %d improvement: %f prior: %f current: %f", tolerance, numItemsPack, improvement, priorAccuracy, accuracy);
|
||||
tolerance++;
|
||||
} else {
|
||||
// VLOG_SCOPE_F(3, "* (improvement>=threshold) Reset. tolerance: %d numItemsPack: %d improvement: %f prior: %f current: %f", tolerance, numItemsPack, improvement, priorAccuracy, accuracy);
|
||||
tolerance = 0; // Reset the counter if the model performs better
|
||||
numItemsPack = 0;
|
||||
}
|
||||
if (convergence_best) {
|
||||
// Keep the best accuracy until now as the prior accuracy
|
||||
priorAccuracy = std::max(accuracy, priorAccuracy);
|
||||
} else {
|
||||
// Keep the last accuray obtained as the prior accuracy
|
||||
priorAccuracy = accuracy;
|
||||
}
|
||||
}
|
||||
// VLOG_SCOPE_F(1, "tolerance: %d featuresUsed.size: %zu features.size: %zu", tolerance, featuresUsed.size(), features.size());
|
||||
finished = finished || tolerance > maxTolerance || pairSelection.size() == 0;
|
||||
}
|
||||
if (tolerance > maxTolerance) {
|
||||
if (numItemsPack < n_models) {
|
||||
notes.push_back("Convergence threshold reached & " + std::to_string(numItemsPack) + " models eliminated");
|
||||
// VLOG_SCOPE_F(4, "Convergence threshold reached & %d models eliminated of %d", numItemsPack, n_models);
|
||||
for (int i = 0; i < numItemsPack; ++i) {
|
||||
significanceModels.pop_back();
|
||||
models.pop_back();
|
||||
n_models--;
|
||||
}
|
||||
} else {
|
||||
notes.push_back("Convergence threshold reached & 0 models eliminated");
|
||||
// VLOG_SCOPE_F(4, "Convergence threshold reached & 0 models eliminated n_models=%d numItemsPack=%d", n_models, numItemsPack);
|
||||
}
|
||||
}
|
||||
if (pairSelection.size() > 0) {
|
||||
notes.push_back("Used pairs not used in train: " + std::to_string(pairSelection.size()));
|
||||
status = WARNING;
|
||||
}
|
||||
notes.push_back("Number of models: " + std::to_string(n_models));
|
||||
}
|
||||
std::vector<std::string> BoostA2DE::graph(const std::string& title) const
|
||||
{
|
||||
|
@@ -30,42 +30,51 @@ namespace bayesnet {
|
||||
}
|
||||
samples.index_put_({ -1, "..." }, torch::tensor(labels, torch::kInt32));
|
||||
}
|
||||
std::vector<std::pair<int, int>> Metrics::SelectKPairs(const torch::Tensor& weights, bool ascending, unsigned k)
|
||||
std::vector<std::pair<int, int>> Metrics::SelectKPairs(const torch::Tensor& weights, std::vector<int>& featuresExcluded, bool ascending, unsigned k)
|
||||
{
|
||||
// Return the K Best features
|
||||
auto n = features.size();
|
||||
if (k == 0) {
|
||||
k = n;
|
||||
}
|
||||
// compute scores
|
||||
scoresKPairs.clear();
|
||||
pairsKBest.clear();
|
||||
auto label = samples.index({ -1, "..." });
|
||||
// for (int i = 0; i < n; ++i) {
|
||||
// for (int j = i + 1; j < n; ++j) {
|
||||
// scoresKBest.push_back(mutualInformation(samples.index({ i, "..." }), samples.index({ j, "..." }), weights));
|
||||
// featuresKBest.push_back(i);
|
||||
// featuresKBest.push_back(j);
|
||||
// }
|
||||
// }
|
||||
// // sort & reduce scores and features
|
||||
// if (ascending) {
|
||||
// sort(featuresKBest.begin(), featuresKBest.end(), [&](int i, int j)
|
||||
// { return scoresKBest[i] < scoresKBest[j]; });
|
||||
// sort(scoresKBest.begin(), scoresKBest.end(), std::less<double>());
|
||||
// if (k < n) {
|
||||
// for (int i = 0; i < n - k; ++i) {
|
||||
// featuresKBest.erase(featuresKBest.begin());
|
||||
// scoresKBest.erase(scoresKBest.begin());
|
||||
// }
|
||||
// }
|
||||
// } else {
|
||||
// sort(featuresKBest.begin(), featuresKBest.end(), [&](int i, int j)
|
||||
// { return scoresKBest[i] > scoresKBest[j]; });
|
||||
// sort(scoresKBest.begin(), scoresKBest.end(), std::greater<double>());
|
||||
// featuresKBest.resize(k);
|
||||
// scoresKBest.resize(k);
|
||||
// }
|
||||
auto labels = samples.index({ -1, "..." });
|
||||
for (int i = 0; i < n - 1; ++i) {
|
||||
if (std::find(featuresExcluded.begin(), featuresExcluded.end(), i) != featuresExcluded.end()) {
|
||||
continue;
|
||||
}
|
||||
for (int j = i + 1; j < n; ++j) {
|
||||
if (std::find(featuresExcluded.begin(), featuresExcluded.end(), j) != featuresExcluded.end()) {
|
||||
continue;
|
||||
}
|
||||
auto key = std::make_pair(i, j);
|
||||
auto value = conditionalMutualInformation(samples.index({ i, "..." }), samples.index({ j, "..." }), labels, weights);
|
||||
scoresKPairs.push_back({ key, value });
|
||||
}
|
||||
}
|
||||
// sort scores
|
||||
if (ascending) {
|
||||
sort(scoresKPairs.begin(), scoresKPairs.end(), [](auto& a, auto& b)
|
||||
{ return a.second < b.second; });
|
||||
|
||||
} else {
|
||||
sort(scoresKPairs.begin(), scoresKPairs.end(), [](auto& a, auto& b)
|
||||
{ return a.second > b.second; });
|
||||
}
|
||||
for (auto& [pairs, score] : scoresKPairs) {
|
||||
pairsKBest.push_back(pairs);
|
||||
}
|
||||
if (k != 0 && k < pairsKBest.size()) {
|
||||
if (ascending) {
|
||||
int limit = pairsKBest.size() - k;
|
||||
for (int i = 0; i < limit; i++) {
|
||||
pairsKBest.erase(pairsKBest.begin());
|
||||
scoresKPairs.erase(scoresKPairs.begin());
|
||||
}
|
||||
} else {
|
||||
pairsKBest.resize(k);
|
||||
scoresKPairs.resize(k);
|
||||
}
|
||||
}
|
||||
return pairsKBest;
|
||||
}
|
||||
std::vector<int> Metrics::SelectKBestWeighted(const torch::Tensor& weights, bool ascending, unsigned k)
|
||||
@@ -107,7 +116,10 @@ namespace bayesnet {
|
||||
{
|
||||
return scoresKBest;
|
||||
}
|
||||
|
||||
std::vector<std::pair<std::pair<int, int>, double>> Metrics::getScoresKPairs() const
|
||||
{
|
||||
return scoresKPairs;
|
||||
}
|
||||
torch::Tensor Metrics::conditionalEdge(const torch::Tensor& weights)
|
||||
{
|
||||
auto result = std::vector<double>();
|
||||
|
@@ -16,8 +16,9 @@ namespace bayesnet {
|
||||
Metrics(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int classNumStates);
|
||||
Metrics(const std::vector<std::vector<int>>& vsamples, const std::vector<int>& labels, const std::vector<std::string>& features, const std::string& className, const int classNumStates);
|
||||
std::vector<int> SelectKBestWeighted(const torch::Tensor& weights, bool ascending = false, unsigned k = 0);
|
||||
std::vector<std::pair<int, int>> SelectKPairs(const torch::Tensor& weights, bool ascending = false, unsigned k = 0);
|
||||
std::vector<std::pair<int, int>> SelectKPairs(const torch::Tensor& weights, std::vector<int>& featuresExcluded, bool ascending = false, unsigned k = 0);
|
||||
std::vector<double> getScoresKBest() const;
|
||||
std::vector<std::pair<std::pair<int, int>, double>> getScoresKPairs() const;
|
||||
double mutualInformation(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights);
|
||||
double conditionalMutualInformation(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& labels, const torch::Tensor& weights);
|
||||
torch::Tensor conditionalEdge(const torch::Tensor& weights);
|
||||
@@ -34,7 +35,7 @@ namespace bayesnet {
|
||||
std::vector<std::pair<T, T>> doCombinations(const std::vector<T>& source)
|
||||
{
|
||||
std::vector<std::pair<T, T>> result;
|
||||
for (int i = 0; i < source.size(); ++i) {
|
||||
for (int i = 0; i < source.size() - 1; ++i) {
|
||||
T temp = source[i];
|
||||
for (int j = i + 1; j < source.size(); ++j) {
|
||||
result.push_back({ temp, source[j] });
|
||||
@@ -42,7 +43,7 @@ namespace bayesnet {
|
||||
}
|
||||
return result;
|
||||
}
|
||||
template <class T>
|
||||
template <class T>
|
||||
T pop_first(std::vector<T>& v)
|
||||
{
|
||||
T temp = v[0];
|
||||
@@ -54,7 +55,7 @@ namespace bayesnet {
|
||||
std::vector<double> scoresKBest;
|
||||
std::vector<int> featuresKBest; // sorted indices of the features
|
||||
std::vector<std::pair<int, int>> pairsKBest; // sorted indices of the pairs
|
||||
std::map<std::pair<int, int>, double> scoresKPairs;
|
||||
std::vector<std::pair<std::pair<int, int>, double>> scoresKPairs;
|
||||
double conditionalEntropy(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights);
|
||||
};
|
||||
}
|
||||
|
@@ -137,3 +137,132 @@ TEST_CASE("Conditional Mutual Information", "[Metrics]")
|
||||
}
|
||||
}
|
||||
}
|
||||
TEST_CASE("Select K Pairs descending", "[Metrics]")
|
||||
{
|
||||
auto raw = RawDatasets("iris", true);
|
||||
bayesnet::Metrics metrics(raw.dataset, raw.features, raw.className, raw.classNumStates);
|
||||
std::vector<int> empty;
|
||||
auto results = metrics.SelectKPairs(raw.weights, empty, false);
|
||||
auto expected = std::vector<std::pair<std::pair<int, int>, double>>{
|
||||
{ { 1, 3 }, 1.31852 },
|
||||
{ { 1, 2 }, 1.17112 },
|
||||
{ { 0, 3 }, 0.403749 },
|
||||
{ { 0, 2 }, 0.287696 },
|
||||
{ { 2, 3 }, 0.210068 },
|
||||
{ { 0, 1 }, 0.0 },
|
||||
};
|
||||
auto scores = metrics.getScoresKPairs();
|
||||
for (int i = 0; i < results.size(); ++i) {
|
||||
auto result = results[i];
|
||||
auto expect = expected[i];
|
||||
auto score = scores[i];
|
||||
REQUIRE(result.first == expect.first.first);
|
||||
REQUIRE(result.second == expect.first.second);
|
||||
REQUIRE(score.first.first == expect.first.first);
|
||||
REQUIRE(score.first.second == expect.first.second);
|
||||
REQUIRE(score.second == Catch::Approx(expect.second).epsilon(raw.epsilon));
|
||||
}
|
||||
REQUIRE(results.size() == 6);
|
||||
REQUIRE(scores.size() == 6);
|
||||
}
|
||||
TEST_CASE("Select K Pairs ascending", "[Metrics]")
|
||||
{
|
||||
auto raw = RawDatasets("iris", true);
|
||||
bayesnet::Metrics metrics(raw.dataset, raw.features, raw.className, raw.classNumStates);
|
||||
std::vector<int> empty;
|
||||
auto results = metrics.SelectKPairs(raw.weights, empty, true);
|
||||
auto expected = std::vector<std::pair<std::pair<int, int>, double>>{
|
||||
{ { 0, 1 }, 0.0 },
|
||||
{ { 2, 3 }, 0.210068 },
|
||||
{ { 0, 2 }, 0.287696 },
|
||||
{ { 0, 3 }, 0.403749 },
|
||||
{ { 1, 2 }, 1.17112 },
|
||||
{ { 1, 3 }, 1.31852 },
|
||||
};
|
||||
auto scores = metrics.getScoresKPairs();
|
||||
for (int i = 0; i < results.size(); ++i) {
|
||||
auto result = results[i];
|
||||
auto expect = expected[i];
|
||||
auto score = scores[i];
|
||||
REQUIRE(result.first == expect.first.first);
|
||||
REQUIRE(result.second == expect.first.second);
|
||||
REQUIRE(score.first.first == expect.first.first);
|
||||
REQUIRE(score.first.second == expect.first.second);
|
||||
REQUIRE(score.second == Catch::Approx(expect.second).epsilon(raw.epsilon));
|
||||
}
|
||||
REQUIRE(results.size() == 6);
|
||||
REQUIRE(scores.size() == 6);
|
||||
}
|
||||
TEST_CASE("Select K Pairs with features excluded", "[Metrics]")
|
||||
{
|
||||
auto raw = RawDatasets("iris", true);
|
||||
bayesnet::Metrics metrics(raw.dataset, raw.features, raw.className, raw.classNumStates);
|
||||
std::vector<int> excluded = { 0, 3 };
|
||||
auto results = metrics.SelectKPairs(raw.weights, excluded, true);
|
||||
auto expected = std::vector<std::pair<std::pair<int, int>, double>>{
|
||||
{ { 1, 2 }, 1.17112 },
|
||||
};
|
||||
auto scores = metrics.getScoresKPairs();
|
||||
for (int i = 0; i < results.size(); ++i) {
|
||||
auto result = results[i];
|
||||
auto expect = expected[i];
|
||||
auto score = scores[i];
|
||||
REQUIRE(result.first == expect.first.first);
|
||||
REQUIRE(result.second == expect.first.second);
|
||||
REQUIRE(score.first.first == expect.first.first);
|
||||
REQUIRE(score.first.second == expect.first.second);
|
||||
REQUIRE(score.second == Catch::Approx(expect.second).epsilon(raw.epsilon));
|
||||
}
|
||||
REQUIRE(results.size() == 1);
|
||||
REQUIRE(scores.size() == 1);
|
||||
}
|
||||
TEST_CASE("Select K Pairs with number of pairs descending", "[Metrics]")
|
||||
{
|
||||
auto raw = RawDatasets("iris", true);
|
||||
bayesnet::Metrics metrics(raw.dataset, raw.features, raw.className, raw.classNumStates);
|
||||
std::vector<int> empty;
|
||||
auto results = metrics.SelectKPairs(raw.weights, empty, false, 3);
|
||||
auto expected = std::vector<std::pair<std::pair<int, int>, double>>{
|
||||
{ { 1, 3 }, 1.31852 },
|
||||
{ { 1, 2 }, 1.17112 },
|
||||
{ { 0, 3 }, 0.403749 }
|
||||
};
|
||||
auto scores = metrics.getScoresKPairs();
|
||||
REQUIRE(results.size() == 3);
|
||||
REQUIRE(scores.size() == 3);
|
||||
for (int i = 0; i < results.size(); ++i) {
|
||||
auto result = results[i];
|
||||
auto expect = expected[i];
|
||||
auto score = scores[i];
|
||||
REQUIRE(result.first == expect.first.first);
|
||||
REQUIRE(result.second == expect.first.second);
|
||||
REQUIRE(score.first.first == expect.first.first);
|
||||
REQUIRE(score.first.second == expect.first.second);
|
||||
REQUIRE(score.second == Catch::Approx(expect.second).epsilon(raw.epsilon));
|
||||
}
|
||||
}
|
||||
TEST_CASE("Select K Pairs with number of pairs ascending", "[Metrics]")
|
||||
{
|
||||
auto raw = RawDatasets("iris", true);
|
||||
bayesnet::Metrics metrics(raw.dataset, raw.features, raw.className, raw.classNumStates);
|
||||
std::vector<int> empty;
|
||||
auto results = metrics.SelectKPairs(raw.weights, empty, true, 3);
|
||||
auto expected = std::vector<std::pair<std::pair<int, int>, double>>{
|
||||
{ { 0, 3 }, 0.403749 },
|
||||
{ { 1, 2 }, 1.17112 },
|
||||
{ { 1, 3 }, 1.31852 }
|
||||
};
|
||||
auto scores = metrics.getScoresKPairs();
|
||||
REQUIRE(results.size() == 3);
|
||||
REQUIRE(scores.size() == 3);
|
||||
for (int i = 0; i < results.size(); ++i) {
|
||||
auto result = results[i];
|
||||
auto expect = expected[i];
|
||||
auto score = scores[i];
|
||||
REQUIRE(result.first == expect.first.first);
|
||||
REQUIRE(result.second == expect.first.second);
|
||||
REQUIRE(score.first.first == expect.first.first);
|
||||
REQUIRE(score.first.second == expect.first.second);
|
||||
REQUIRE(score.second == Catch::Approx(expect.second).epsilon(raw.epsilon));
|
||||
}
|
||||
}
|
@@ -56,14 +56,14 @@ TEST_CASE("Test Bayesian Classifiers score & version", "[Models]")
|
||||
auto raw = RawDatasets(file_name, discretize);
|
||||
clf->fit(raw.Xt, raw.yt, raw.features, raw.className, raw.states);
|
||||
auto score = clf->score(raw.Xt, raw.yt);
|
||||
INFO("Classifier: " + name + " File: " + file_name);
|
||||
INFO("Classifier: " << name << " File: " << file_name);
|
||||
REQUIRE(score == Catch::Approx(scores[{file_name, name}]).epsilon(raw.epsilon));
|
||||
REQUIRE(clf->getStatus() == bayesnet::NORMAL);
|
||||
}
|
||||
}
|
||||
SECTION("Library check version")
|
||||
{
|
||||
INFO("Checking version of " + name + " classifier");
|
||||
INFO("Checking version of " << name << " classifier");
|
||||
REQUIRE(clf->getVersion() == ACTUAL_VERSION);
|
||||
}
|
||||
delete clf;
|
||||
|
@@ -8,6 +8,7 @@
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include <catch2/catch_approx.hpp>
|
||||
#include <catch2/generators/catch_generators.hpp>
|
||||
#include <catch2/matchers/catch_matchers.hpp>
|
||||
#include "bayesnet/ensembles/BoostAODE.h"
|
||||
#include "TestUtils.h"
|
||||
|
||||
|
Reference in New Issue
Block a user