commit inicial
This commit is contained in:
49
bayesnet/BaseClassifier.h
Normal file
49
bayesnet/BaseClassifier.h
Normal file
@@ -0,0 +1,49 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#pragma once
|
||||
#include <vector>
|
||||
#include <torch/torch.h>
|
||||
#include <nlohmann/json.hpp>
|
||||
#include "bayesnet/network/Network.h"
|
||||
|
||||
namespace bayesnet {
|
||||
enum status_t { NORMAL, WARNING, ERROR };
|
||||
class BaseClassifier {
|
||||
public:
|
||||
virtual ~BaseClassifier() = default;
|
||||
// X is nxm std::vector, y is nx1 std::vector
|
||||
virtual BaseClassifier& fit(std::vector<std::vector<int>>& X, std::vector<int>& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) = 0;
|
||||
// X is nxm tensor, y is nx1 tensor
|
||||
virtual BaseClassifier& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) = 0;
|
||||
virtual BaseClassifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) = 0;
|
||||
virtual BaseClassifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing) = 0;
|
||||
torch::Tensor virtual predict(torch::Tensor& X) = 0;
|
||||
std::vector<int> virtual predict(std::vector<std::vector<int >>& X) = 0;
|
||||
torch::Tensor virtual predict_proba(torch::Tensor& X) = 0;
|
||||
std::vector<std::vector<double>> virtual predict_proba(std::vector<std::vector<int >>& X) = 0;
|
||||
status_t virtual getStatus() const = 0;
|
||||
float virtual score(std::vector<std::vector<int>>& X, std::vector<int>& y) = 0;
|
||||
float virtual score(torch::Tensor& X, torch::Tensor& y) = 0;
|
||||
int virtual getNumberOfNodes() const = 0;
|
||||
int virtual getNumberOfEdges() const = 0;
|
||||
int virtual getNumberOfStates() const = 0;
|
||||
int virtual getClassNumStates() const = 0;
|
||||
std::vector<std::string> virtual show() const = 0;
|
||||
std::vector<std::string> virtual graph(const std::string& title = "") const = 0;
|
||||
virtual std::string getVersion() = 0;
|
||||
std::vector<std::string> virtual topological_order() = 0;
|
||||
std::vector<std::string> virtual getNotes() const = 0;
|
||||
std::string virtual dump_cpt() const = 0;
|
||||
virtual void setHyperparameters(const nlohmann::json& hyperparameters) = 0;
|
||||
std::vector<std::string>& getValidHyperparameters() { return validHyperparameters; }
|
||||
protected:
|
||||
virtual void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) = 0;
|
||||
std::vector<std::string> validHyperparameters;
|
||||
std::vector<std::string> notes; // Used to store messages occurred during the fit process
|
||||
status_t status = NORMAL;
|
||||
};
|
||||
}
|
13
bayesnet/CMakeLists.txt
Normal file
13
bayesnet/CMakeLists.txt
Normal file
@@ -0,0 +1,13 @@
|
||||
include_directories(
|
||||
${BayesNet_SOURCE_DIR}/lib/log
|
||||
${BayesNet_SOURCE_DIR}/lib/mdlp/src
|
||||
${BayesNet_SOURCE_DIR}/lib/folding
|
||||
${BayesNet_SOURCE_DIR}/lib/json/include
|
||||
${BayesNet_SOURCE_DIR}
|
||||
${CMAKE_BINARY_DIR}/configured_files/include
|
||||
)
|
||||
|
||||
file(GLOB_RECURSE Sources "*.cc")
|
||||
|
||||
add_library(BayesNet ${Sources})
|
||||
target_link_libraries(BayesNet fimdlp "${TORCH_LIBRARIES}")
|
193
bayesnet/classifiers/Classifier.cc
Normal file
193
bayesnet/classifiers/Classifier.cc
Normal file
@@ -0,0 +1,193 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include <sstream>
|
||||
#include "bayesnet/utils/bayesnetUtils.h"
|
||||
#include "Classifier.h"
|
||||
|
||||
namespace bayesnet {
|
||||
Classifier::Classifier(Network model) : model(model), m(0), n(0), metrics(Metrics()), fitted(false) {}
|
||||
Classifier& Classifier::build(const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing)
|
||||
{
|
||||
this->features = features;
|
||||
this->className = className;
|
||||
this->states = states;
|
||||
m = dataset.size(1);
|
||||
n = features.size();
|
||||
checkFitParameters();
|
||||
auto n_classes = states.at(className).size();
|
||||
metrics = Metrics(dataset, features, className, n_classes);
|
||||
model.initialize();
|
||||
buildModel(weights);
|
||||
trainModel(weights, smoothing);
|
||||
fitted = true;
|
||||
return *this;
|
||||
}
|
||||
void Classifier::buildDataset(torch::Tensor& ytmp)
|
||||
{
|
||||
try {
|
||||
auto yresized = torch::transpose(ytmp.view({ ytmp.size(0), 1 }), 0, 1);
|
||||
dataset = torch::cat({ dataset, yresized }, 0);
|
||||
}
|
||||
catch (const std::exception& e) {
|
||||
std::stringstream oss;
|
||||
oss << "* Error in X and y dimensions *\n";
|
||||
oss << "X dimensions: " << dataset.sizes() << "\n";
|
||||
oss << "y dimensions: " << ytmp.sizes();
|
||||
throw std::runtime_error(oss.str());
|
||||
}
|
||||
}
|
||||
void Classifier::trainModel(const torch::Tensor& weights, Smoothing_t smoothing)
|
||||
{
|
||||
model.fit(dataset, weights, features, className, states, smoothing);
|
||||
}
|
||||
// X is nxm where n is the number of features and m the number of samples
|
||||
Classifier& Classifier::fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing)
|
||||
{
|
||||
dataset = X;
|
||||
buildDataset(y);
|
||||
const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);
|
||||
return build(features, className, states, weights, smoothing);
|
||||
}
|
||||
// X is nxm where n is the number of features and m the number of samples
|
||||
Classifier& Classifier::fit(std::vector<std::vector<int>>& X, std::vector<int>& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing)
|
||||
{
|
||||
dataset = torch::zeros({ static_cast<int>(X.size()), static_cast<int>(X[0].size()) }, torch::kInt32);
|
||||
for (int i = 0; i < X.size(); ++i) {
|
||||
dataset.index_put_({ i, "..." }, torch::tensor(X[i], torch::kInt32));
|
||||
}
|
||||
auto ytmp = torch::tensor(y, torch::kInt32);
|
||||
buildDataset(ytmp);
|
||||
const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);
|
||||
return build(features, className, states, weights, smoothing);
|
||||
}
|
||||
Classifier& Classifier::fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing)
|
||||
{
|
||||
this->dataset = dataset;
|
||||
const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);
|
||||
return build(features, className, states, weights, smoothing);
|
||||
}
|
||||
Classifier& Classifier::fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing)
|
||||
{
|
||||
this->dataset = dataset;
|
||||
return build(features, className, states, weights, smoothing);
|
||||
}
|
||||
void Classifier::checkFitParameters()
|
||||
{
|
||||
if (torch::is_floating_point(dataset)) {
|
||||
throw std::invalid_argument("dataset (X, y) must be of type Integer");
|
||||
}
|
||||
if (dataset.size(0) - 1 != features.size()) {
|
||||
throw std::invalid_argument("Classifier: X " + std::to_string(dataset.size(0) - 1) + " and features " + std::to_string(features.size()) + " must have the same number of features");
|
||||
}
|
||||
if (states.find(className) == states.end()) {
|
||||
throw std::invalid_argument("class name not found in states");
|
||||
}
|
||||
for (auto feature : features) {
|
||||
if (states.find(feature) == states.end()) {
|
||||
throw std::invalid_argument("feature [" + feature + "] not found in states");
|
||||
}
|
||||
}
|
||||
}
|
||||
torch::Tensor Classifier::predict(torch::Tensor& X)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(CLASSIFIER_NOT_FITTED);
|
||||
}
|
||||
return model.predict(X);
|
||||
}
|
||||
std::vector<int> Classifier::predict(std::vector<std::vector<int>>& X)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(CLASSIFIER_NOT_FITTED);
|
||||
}
|
||||
auto m_ = X[0].size();
|
||||
auto n_ = X.size();
|
||||
std::vector<std::vector<int>> Xd(n_, std::vector<int>(m_, 0));
|
||||
for (auto i = 0; i < n_; i++) {
|
||||
Xd[i] = std::vector<int>(X[i].begin(), X[i].end());
|
||||
}
|
||||
auto yp = model.predict(Xd);
|
||||
return yp;
|
||||
}
|
||||
torch::Tensor Classifier::predict_proba(torch::Tensor& X)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(CLASSIFIER_NOT_FITTED);
|
||||
}
|
||||
return model.predict_proba(X);
|
||||
}
|
||||
std::vector<std::vector<double>> Classifier::predict_proba(std::vector<std::vector<int>>& X)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(CLASSIFIER_NOT_FITTED);
|
||||
}
|
||||
auto m_ = X[0].size();
|
||||
auto n_ = X.size();
|
||||
std::vector<std::vector<int>> Xd(n_, std::vector<int>(m_, 0));
|
||||
// Convert to nxm vector
|
||||
for (auto i = 0; i < n_; i++) {
|
||||
Xd[i] = std::vector<int>(X[i].begin(), X[i].end());
|
||||
}
|
||||
auto yp = model.predict_proba(Xd);
|
||||
return yp;
|
||||
}
|
||||
float Classifier::score(torch::Tensor& X, torch::Tensor& y)
|
||||
{
|
||||
torch::Tensor y_pred = predict(X);
|
||||
return (y_pred == y).sum().item<float>() / y.size(0);
|
||||
}
|
||||
float Classifier::score(std::vector<std::vector<int>>& X, std::vector<int>& y)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(CLASSIFIER_NOT_FITTED);
|
||||
}
|
||||
return model.score(X, y);
|
||||
}
|
||||
std::vector<std::string> Classifier::show() const
|
||||
{
|
||||
return model.show();
|
||||
}
|
||||
void Classifier::addNodes()
|
||||
{
|
||||
// Add all nodes to the network
|
||||
for (const auto& feature : features) {
|
||||
model.addNode(feature);
|
||||
}
|
||||
model.addNode(className);
|
||||
}
|
||||
int Classifier::getNumberOfNodes() const
|
||||
{
|
||||
// Features does not include class
|
||||
return fitted ? model.getFeatures().size() : 0;
|
||||
}
|
||||
int Classifier::getNumberOfEdges() const
|
||||
{
|
||||
return fitted ? model.getNumEdges() : 0;
|
||||
}
|
||||
int Classifier::getNumberOfStates() const
|
||||
{
|
||||
return fitted ? model.getStates() : 0;
|
||||
}
|
||||
int Classifier::getClassNumStates() const
|
||||
{
|
||||
return fitted ? model.getClassNumStates() : 0;
|
||||
}
|
||||
std::vector<std::string> Classifier::topological_order()
|
||||
{
|
||||
return model.topological_sort();
|
||||
}
|
||||
std::string Classifier::dump_cpt() const
|
||||
{
|
||||
return model.dump_cpt();
|
||||
}
|
||||
void Classifier::setHyperparameters(const nlohmann::json& hyperparameters)
|
||||
{
|
||||
if (!hyperparameters.empty()) {
|
||||
throw std::invalid_argument("Invalid hyperparameters" + hyperparameters.dump());
|
||||
}
|
||||
}
|
||||
}
|
63
bayesnet/classifiers/Classifier.h
Normal file
63
bayesnet/classifiers/Classifier.h
Normal file
@@ -0,0 +1,63 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef CLASSIFIER_H
|
||||
#define CLASSIFIER_H
|
||||
#include <torch/torch.h>
|
||||
#include "bayesnet/utils/BayesMetrics.h"
|
||||
#include "bayesnet/BaseClassifier.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class Classifier : public BaseClassifier {
|
||||
public:
|
||||
Classifier(Network model);
|
||||
virtual ~Classifier() = default;
|
||||
Classifier& fit(std::vector<std::vector<int>>& X, std::vector<int>& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
|
||||
Classifier& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
|
||||
Classifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
|
||||
Classifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing) override;
|
||||
void addNodes();
|
||||
int getNumberOfNodes() const override;
|
||||
int getNumberOfEdges() const override;
|
||||
int getNumberOfStates() const override;
|
||||
int getClassNumStates() const override;
|
||||
torch::Tensor predict(torch::Tensor& X) override;
|
||||
std::vector<int> predict(std::vector<std::vector<int>>& X) override;
|
||||
torch::Tensor predict_proba(torch::Tensor& X) override;
|
||||
std::vector<std::vector<double>> predict_proba(std::vector<std::vector<int>>& X) override;
|
||||
status_t getStatus() const override { return status; }
|
||||
std::string getVersion() override { return { project_version.begin(), project_version.end() }; };
|
||||
float score(torch::Tensor& X, torch::Tensor& y) override;
|
||||
float score(std::vector<std::vector<int>>& X, std::vector<int>& y) override;
|
||||
std::vector<std::string> show() const override;
|
||||
std::vector<std::string> topological_order() override;
|
||||
std::vector<std::string> getNotes() const override { return notes; }
|
||||
std::string dump_cpt() const override;
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters) override; //For classifiers that don't have hyperparameters
|
||||
protected:
|
||||
bool fitted;
|
||||
unsigned int m, n; // m: number of samples, n: number of features
|
||||
Network model;
|
||||
Metrics metrics;
|
||||
std::vector<std::string> features;
|
||||
std::string className;
|
||||
std::map<std::string, std::vector<int>> states;
|
||||
torch::Tensor dataset; // (n+1)xm tensor
|
||||
void checkFitParameters();
|
||||
virtual void buildModel(const torch::Tensor& weights) = 0;
|
||||
void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
|
||||
void buildDataset(torch::Tensor& y);
|
||||
const std::string CLASSIFIER_NOT_FITTED = "Classifier has not been fitted";
|
||||
private:
|
||||
Classifier& build(const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing);
|
||||
};
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
||||
|
111
bayesnet/classifiers/KDB.cc
Normal file
111
bayesnet/classifiers/KDB.cc
Normal file
@@ -0,0 +1,111 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
#include "bayesnet/utils/bayesnetUtils.h"
|
||||
#include "KDB.h"
|
||||
|
||||
namespace bayesnet {
|
||||
KDB::KDB(int k, float theta) : Classifier(Network()), k(k), theta(theta)
|
||||
{
|
||||
validHyperparameters = { "k", "theta" };
|
||||
|
||||
}
|
||||
void KDB::setHyperparameters(const nlohmann::json& hyperparameters_)
|
||||
{
|
||||
auto hyperparameters = hyperparameters_;
|
||||
if (hyperparameters.contains("k")) {
|
||||
k = hyperparameters["k"];
|
||||
hyperparameters.erase("k");
|
||||
}
|
||||
if (hyperparameters.contains("theta")) {
|
||||
theta = hyperparameters["theta"];
|
||||
hyperparameters.erase("theta");
|
||||
}
|
||||
Classifier::setHyperparameters(hyperparameters);
|
||||
}
|
||||
void KDB::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
/*
|
||||
1. For each feature Xi, compute mutual information, I(X;C),
|
||||
where C is the class.
|
||||
2. Compute class conditional mutual information I(Xi;XjIC), f or each
|
||||
pair of features Xi and Xj, where i#j.
|
||||
3. Let the used variable list, S, be empty.
|
||||
4. Let the DAG network being constructed, BN, begin with a single
|
||||
class node, C.
|
||||
5. Repeat until S includes all domain features
|
||||
5.1. Select feature Xmax which is not in S and has the largest value
|
||||
I(Xmax;C).
|
||||
5.2. Add a node to BN representing Xmax.
|
||||
5.3. Add an arc from C to Xmax in BN.
|
||||
5.4. Add m = min(lSl,/c) arcs from m distinct features Xj in S with
|
||||
the highest value for I(Xmax;X,jC).
|
||||
5.5. Add Xmax to S.
|
||||
Compute the conditional probabilility infered by the structure of BN by
|
||||
using counts from DB, and output BN.
|
||||
*/
|
||||
// 1. For each feature Xi, compute mutual information, I(X;C),
|
||||
// where C is the class.
|
||||
addNodes();
|
||||
const torch::Tensor& y = dataset.index({ -1, "..." });
|
||||
std::vector<double> mi;
|
||||
for (auto i = 0; i < features.size(); i++) {
|
||||
torch::Tensor firstFeature = dataset.index({ i, "..." });
|
||||
mi.push_back(metrics.mutualInformation(firstFeature, y, weights));
|
||||
}
|
||||
// 2. Compute class conditional mutual information I(Xi;XjIC), f or each
|
||||
auto conditionalEdgeWeights = metrics.conditionalEdge(weights);
|
||||
// 3. Let the used variable list, S, be empty.
|
||||
std::vector<int> S;
|
||||
// 4. Let the DAG network being constructed, BN, begin with a single
|
||||
// class node, C.
|
||||
// 5. Repeat until S includes all domain features
|
||||
// 5.1. Select feature Xmax which is not in S and has the largest value
|
||||
// I(Xmax;C).
|
||||
auto order = argsort(mi);
|
||||
for (auto idx : order) {
|
||||
// 5.2. Add a node to BN representing Xmax.
|
||||
// 5.3. Add an arc from C to Xmax in BN.
|
||||
model.addEdge(className, features[idx]);
|
||||
// 5.4. Add m = min(lSl,/c) arcs from m distinct features Xj in S with
|
||||
// the highest value for I(Xmax;X,jC).
|
||||
add_m_edges(idx, S, conditionalEdgeWeights);
|
||||
// 5.5. Add Xmax to S.
|
||||
S.push_back(idx);
|
||||
}
|
||||
}
|
||||
void KDB::add_m_edges(int idx, std::vector<int>& S, torch::Tensor& weights)
|
||||
{
|
||||
auto n_edges = std::min(k, static_cast<int>(S.size()));
|
||||
auto cond_w = clone(weights);
|
||||
bool exit_cond = k == 0;
|
||||
int num = 0;
|
||||
while (!exit_cond) {
|
||||
auto max_minfo = argmax(cond_w.index({ idx, "..." })).item<int>();
|
||||
auto belongs = find(S.begin(), S.end(), max_minfo) != S.end();
|
||||
if (belongs && cond_w.index({ idx, max_minfo }).item<float>() > theta) {
|
||||
try {
|
||||
model.addEdge(features[max_minfo], features[idx]);
|
||||
num++;
|
||||
}
|
||||
catch (const std::invalid_argument& e) {
|
||||
// Loops are not allowed
|
||||
}
|
||||
}
|
||||
cond_w.index_put_({ idx, max_minfo }, -1);
|
||||
auto candidates_mask = cond_w.index({ idx, "..." }).gt(theta);
|
||||
auto candidates = candidates_mask.nonzero();
|
||||
exit_cond = num == n_edges || candidates.size(0) == 0;
|
||||
}
|
||||
}
|
||||
std::vector<std::string> KDB::graph(const std::string& title) const
|
||||
{
|
||||
std::string header{ title };
|
||||
if (title == "KDB") {
|
||||
header += " (k=" + std::to_string(k) + ", theta=" + std::to_string(theta) + ")";
|
||||
}
|
||||
return model.graph(header);
|
||||
}
|
||||
}
|
26
bayesnet/classifiers/KDB.h
Normal file
26
bayesnet/classifiers/KDB.h
Normal file
@@ -0,0 +1,26 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef KDB_H
|
||||
#define KDB_H
|
||||
#include <torch/torch.h>
|
||||
#include "Classifier.h"
|
||||
namespace bayesnet {
|
||||
class KDB : public Classifier {
|
||||
private:
|
||||
int k;
|
||||
float theta;
|
||||
protected:
|
||||
void add_m_edges(int idx, std::vector<int>& S, torch::Tensor& weights);
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
public:
|
||||
explicit KDB(int k, float theta = 0.03);
|
||||
virtual ~KDB() = default;
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters_) override;
|
||||
std::vector<std::string> graph(const std::string& name = "KDB") const override;
|
||||
};
|
||||
}
|
||||
#endif
|
35
bayesnet/classifiers/KDBLd.cc
Normal file
35
bayesnet/classifiers/KDBLd.cc
Normal file
@@ -0,0 +1,35 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include "KDBLd.h"
|
||||
|
||||
namespace bayesnet {
|
||||
KDBLd::KDBLd(int k) : KDB(k), Proposal(dataset, features, className) {}
|
||||
KDBLd& KDBLd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing)
|
||||
{
|
||||
checkInput(X_, y_);
|
||||
features = features_;
|
||||
className = className_;
|
||||
Xf = X_;
|
||||
y = y_;
|
||||
// Fills std::vectors Xv & yv with the data from tensors X_ (discretized) & y
|
||||
states = fit_local_discretization(y);
|
||||
// We have discretized the input data
|
||||
// 1st we need to fit the model to build the normal KDB structure, KDB::fit initializes the base Bayesian network
|
||||
KDB::fit(dataset, features, className, states, smoothing);
|
||||
states = localDiscretizationProposal(states, model);
|
||||
return *this;
|
||||
}
|
||||
torch::Tensor KDBLd::predict(torch::Tensor& X)
|
||||
{
|
||||
auto Xt = prepareX(X);
|
||||
return KDB::predict(Xt);
|
||||
}
|
||||
std::vector<std::string> KDBLd::graph(const std::string& name) const
|
||||
{
|
||||
return KDB::graph(name);
|
||||
}
|
||||
}
|
24
bayesnet/classifiers/KDBLd.h
Normal file
24
bayesnet/classifiers/KDBLd.h
Normal file
@@ -0,0 +1,24 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef KDBLD_H
|
||||
#define KDBLD_H
|
||||
#include "Proposal.h"
|
||||
#include "KDB.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class KDBLd : public KDB, public Proposal {
|
||||
private:
|
||||
public:
|
||||
explicit KDBLd(int k);
|
||||
virtual ~KDBLd() = default;
|
||||
KDBLd& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
|
||||
std::vector<std::string> graph(const std::string& name = "KDB") const override;
|
||||
torch::Tensor predict(torch::Tensor& X) override;
|
||||
static inline std::string version() { return "0.0.1"; };
|
||||
};
|
||||
}
|
||||
#endif // !KDBLD_H
|
129
bayesnet/classifiers/Proposal.cc
Normal file
129
bayesnet/classifiers/Proposal.cc
Normal file
@@ -0,0 +1,129 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include "Proposal.h"
|
||||
|
||||
namespace bayesnet {
|
||||
Proposal::Proposal(torch::Tensor& dataset_, std::vector<std::string>& features_, std::string& className_) : pDataset(dataset_), pFeatures(features_), pClassName(className_) {}
|
||||
Proposal::~Proposal()
|
||||
{
|
||||
for (auto& [key, value] : discretizers) {
|
||||
delete value;
|
||||
}
|
||||
}
|
||||
void Proposal::checkInput(const torch::Tensor& X, const torch::Tensor& y)
|
||||
{
|
||||
if (!torch::is_floating_point(X)) {
|
||||
throw std::invalid_argument("X must be a floating point tensor");
|
||||
}
|
||||
if (torch::is_floating_point(y)) {
|
||||
throw std::invalid_argument("y must be an integer tensor");
|
||||
}
|
||||
}
|
||||
map<std::string, std::vector<int>> Proposal::localDiscretizationProposal(const map<std::string, std::vector<int>>& oldStates, Network& model)
|
||||
{
|
||||
// order of local discretization is important. no good 0, 1, 2...
|
||||
// although we rediscretize features after the local discretization of every feature
|
||||
auto order = model.topological_sort();
|
||||
auto& nodes = model.getNodes();
|
||||
map<std::string, std::vector<int>> states = oldStates;
|
||||
std::vector<int> indicesToReDiscretize;
|
||||
bool upgrade = false; // Flag to check if we need to upgrade the model
|
||||
for (auto feature : order) {
|
||||
auto nodeParents = nodes[feature]->getParents();
|
||||
if (nodeParents.size() < 2) continue; // Only has class as parent
|
||||
upgrade = true;
|
||||
int index = find(pFeatures.begin(), pFeatures.end(), feature) - pFeatures.begin();
|
||||
indicesToReDiscretize.push_back(index); // We need to re-discretize this feature
|
||||
std::vector<std::string> parents;
|
||||
transform(nodeParents.begin(), nodeParents.end(), back_inserter(parents), [](const auto& p) { return p->getName(); });
|
||||
// Remove class as parent as it will be added later
|
||||
parents.erase(remove(parents.begin(), parents.end(), pClassName), parents.end());
|
||||
// Get the indices of the parents
|
||||
std::vector<int> indices;
|
||||
indices.push_back(-1); // Add class index
|
||||
transform(parents.begin(), parents.end(), back_inserter(indices), [&](const auto& p) {return find(pFeatures.begin(), pFeatures.end(), p) - pFeatures.begin(); });
|
||||
// Now we fit the discretizer of the feature, conditioned on its parents and the class i.e. discretizer.fit(X[index], X[indices] + y)
|
||||
std::vector<std::string> yJoinParents(Xf.size(1));
|
||||
for (auto idx : indices) {
|
||||
for (int i = 0; i < Xf.size(1); ++i) {
|
||||
yJoinParents[i] += to_string(pDataset.index({ idx, i }).item<int>());
|
||||
}
|
||||
}
|
||||
auto yxv = factorize(yJoinParents);
|
||||
auto xvf_ptr = Xf.index({ index }).data_ptr<float>();
|
||||
auto xvf = std::vector<mdlp::precision_t>(xvf_ptr, xvf_ptr + Xf.size(1));
|
||||
discretizers[feature]->fit(xvf, yxv);
|
||||
}
|
||||
if (upgrade) {
|
||||
// Discretize again X (only the affected indices) with the new fitted discretizers
|
||||
for (auto index : indicesToReDiscretize) {
|
||||
auto Xt_ptr = Xf.index({ index }).data_ptr<float>();
|
||||
auto Xt = std::vector<float>(Xt_ptr, Xt_ptr + Xf.size(1));
|
||||
pDataset.index_put_({ index, "..." }, torch::tensor(discretizers[pFeatures[index]]->transform(Xt)));
|
||||
auto xStates = std::vector<int>(discretizers[pFeatures[index]]->getCutPoints().size() + 1);
|
||||
iota(xStates.begin(), xStates.end(), 0);
|
||||
//Update new states of the feature/node
|
||||
states[pFeatures[index]] = xStates;
|
||||
}
|
||||
const torch::Tensor weights = torch::full({ pDataset.size(1) }, 1.0 / pDataset.size(1), torch::kDouble);
|
||||
model.fit(pDataset, weights, pFeatures, pClassName, states, Smoothing_t::ORIGINAL);
|
||||
}
|
||||
return states;
|
||||
}
|
||||
map<std::string, std::vector<int>> Proposal::fit_local_discretization(const torch::Tensor& y)
|
||||
{
|
||||
// Discretize the continuous input data and build pDataset (Classifier::dataset)
|
||||
int m = Xf.size(1);
|
||||
int n = Xf.size(0);
|
||||
map<std::string, std::vector<int>> states;
|
||||
pDataset = torch::zeros({ n + 1, m }, torch::kInt32);
|
||||
auto yv = std::vector<int>(y.data_ptr<int>(), y.data_ptr<int>() + y.size(0));
|
||||
// discretize input data by feature(row)
|
||||
for (auto i = 0; i < pFeatures.size(); ++i) {
|
||||
auto* discretizer = new mdlp::CPPFImdlp();
|
||||
auto Xt_ptr = Xf.index({ i }).data_ptr<float>();
|
||||
auto Xt = std::vector<float>(Xt_ptr, Xt_ptr + Xf.size(1));
|
||||
discretizer->fit(Xt, yv);
|
||||
pDataset.index_put_({ i, "..." }, torch::tensor(discretizer->transform(Xt)));
|
||||
auto xStates = std::vector<int>(discretizer->getCutPoints().size() + 1);
|
||||
iota(xStates.begin(), xStates.end(), 0);
|
||||
states[pFeatures[i]] = xStates;
|
||||
discretizers[pFeatures[i]] = discretizer;
|
||||
}
|
||||
int n_classes = torch::max(y).item<int>() + 1;
|
||||
auto yStates = std::vector<int>(n_classes);
|
||||
iota(yStates.begin(), yStates.end(), 0);
|
||||
states[pClassName] = yStates;
|
||||
pDataset.index_put_({ n, "..." }, y);
|
||||
return states;
|
||||
}
|
||||
torch::Tensor Proposal::prepareX(torch::Tensor& X)
|
||||
{
|
||||
auto Xtd = torch::zeros_like(X, torch::kInt32);
|
||||
for (int i = 0; i < X.size(0); ++i) {
|
||||
auto Xt = std::vector<float>(X[i].data_ptr<float>(), X[i].data_ptr<float>() + X.size(1));
|
||||
auto Xd = discretizers[pFeatures[i]]->transform(Xt);
|
||||
Xtd.index_put_({ i }, torch::tensor(Xd, torch::kInt32));
|
||||
}
|
||||
return Xtd;
|
||||
}
|
||||
std::vector<int> Proposal::factorize(const std::vector<std::string>& labels_t)
|
||||
{
|
||||
std::vector<int> yy;
|
||||
yy.reserve(labels_t.size());
|
||||
std::map<std::string, int> labelMap;
|
||||
int i = 0;
|
||||
for (const std::string& label : labels_t) {
|
||||
if (labelMap.find(label) == labelMap.end()) {
|
||||
labelMap[label] = i++;
|
||||
bool allDigits = std::all_of(label.begin(), label.end(), ::isdigit);
|
||||
}
|
||||
yy.push_back(labelMap[label]);
|
||||
}
|
||||
return yy;
|
||||
}
|
||||
}
|
37
bayesnet/classifiers/Proposal.h
Normal file
37
bayesnet/classifiers/Proposal.h
Normal file
@@ -0,0 +1,37 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef PROPOSAL_H
|
||||
#define PROPOSAL_H
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <torch/torch.h>
|
||||
#include <fimdlp/CPPFImdlp.h>
|
||||
#include "bayesnet/network/Network.h"
|
||||
#include "Classifier.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class Proposal {
|
||||
public:
|
||||
Proposal(torch::Tensor& pDataset, std::vector<std::string>& features_, std::string& className_);
|
||||
virtual ~Proposal();
|
||||
protected:
|
||||
void checkInput(const torch::Tensor& X, const torch::Tensor& y);
|
||||
torch::Tensor prepareX(torch::Tensor& X);
|
||||
map<std::string, std::vector<int>> localDiscretizationProposal(const map<std::string, std::vector<int>>& states, Network& model);
|
||||
map<std::string, std::vector<int>> fit_local_discretization(const torch::Tensor& y);
|
||||
torch::Tensor Xf; // X continuous nxm tensor
|
||||
torch::Tensor y; // y discrete nx1 tensor
|
||||
map<std::string, mdlp::CPPFImdlp*> discretizers;
|
||||
private:
|
||||
std::vector<int> factorize(const std::vector<std::string>& labels_t);
|
||||
torch::Tensor& pDataset; // (n+1)xm tensor
|
||||
std::vector<std::string>& pFeatures;
|
||||
std::string& pClassName;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
46
bayesnet/classifiers/SPODE.cc
Normal file
46
bayesnet/classifiers/SPODE.cc
Normal file
@@ -0,0 +1,46 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include "SPODE.h"
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
SPODE::SPODE(int root) : Classifier(Network()), root(root)
|
||||
{
|
||||
validHyperparameters = { "parent" };
|
||||
}
|
||||
|
||||
void SPODE::setHyperparameters(const nlohmann::json& hyperparameters_)
|
||||
{
|
||||
auto hyperparameters = hyperparameters_;
|
||||
if (hyperparameters.contains("parent")) {
|
||||
root = hyperparameters["parent"];
|
||||
hyperparameters.erase("parent");
|
||||
}
|
||||
Classifier::setHyperparameters(hyperparameters);
|
||||
}
|
||||
void SPODE::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
// 0. Add all nodes to the model
|
||||
addNodes();
|
||||
// 1. Add edges from the class node to all other nodes
|
||||
// 2. Add edges from the root node to all other nodes
|
||||
if (root >= static_cast<int>(features.size())) {
|
||||
throw std::invalid_argument("The parent node is not in the dataset");
|
||||
}
|
||||
for (int i = 0; i < static_cast<int>(features.size()); ++i) {
|
||||
model.addEdge(className, features[i]);
|
||||
if (i != root) {
|
||||
model.addEdge(features[root], features[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
std::vector<std::string> SPODE::graph(const std::string& name) const
|
||||
{
|
||||
return model.graph(name);
|
||||
}
|
||||
|
||||
}
|
24
bayesnet/classifiers/SPODE.h
Normal file
24
bayesnet/classifiers/SPODE.h
Normal file
@@ -0,0 +1,24 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef SPODE_H
|
||||
#define SPODE_H
|
||||
#include "Classifier.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class SPODE : public Classifier {
|
||||
public:
|
||||
explicit SPODE(int root);
|
||||
virtual ~SPODE() = default;
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters_) override;
|
||||
std::vector<std::string> graph(const std::string& name = "SPODE") const override;
|
||||
protected:
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
private:
|
||||
int root;
|
||||
};
|
||||
}
|
||||
#endif
|
50
bayesnet/classifiers/SPODELd.cc
Normal file
50
bayesnet/classifiers/SPODELd.cc
Normal file
@@ -0,0 +1,50 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include "SPODELd.h"
|
||||
|
||||
namespace bayesnet {
|
||||
SPODELd::SPODELd(int root) : SPODE(root), Proposal(dataset, features, className) {}
|
||||
SPODELd& SPODELd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing)
|
||||
{
|
||||
checkInput(X_, y_);
|
||||
Xf = X_;
|
||||
y = y_;
|
||||
return commonFit(features_, className_, states_, smoothing);
|
||||
}
|
||||
|
||||
SPODELd& SPODELd::fit(torch::Tensor& dataset, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing)
|
||||
{
|
||||
if (!torch::is_floating_point(dataset)) {
|
||||
throw std::runtime_error("Dataset must be a floating point tensor");
|
||||
}
|
||||
Xf = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), "..." }).clone();
|
||||
y = dataset.index({ -1, "..." }).clone().to(torch::kInt32);
|
||||
return commonFit(features_, className_, states_, smoothing);
|
||||
}
|
||||
|
||||
SPODELd& SPODELd::commonFit(const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing)
|
||||
{
|
||||
features = features_;
|
||||
className = className_;
|
||||
// Fills std::vectors Xv & yv with the data from tensors X_ (discretized) & y
|
||||
states = fit_local_discretization(y);
|
||||
// We have discretized the input data
|
||||
// 1st we need to fit the model to build the normal SPODE structure, SPODE::fit initializes the base Bayesian network
|
||||
SPODE::fit(dataset, features, className, states, smoothing);
|
||||
states = localDiscretizationProposal(states, model);
|
||||
return *this;
|
||||
}
|
||||
torch::Tensor SPODELd::predict(torch::Tensor& X)
|
||||
{
|
||||
auto Xt = prepareX(X);
|
||||
return SPODE::predict(Xt);
|
||||
}
|
||||
std::vector<std::string> SPODELd::graph(const std::string& name) const
|
||||
{
|
||||
return SPODE::graph(name);
|
||||
}
|
||||
}
|
25
bayesnet/classifiers/SPODELd.h
Normal file
25
bayesnet/classifiers/SPODELd.h
Normal file
@@ -0,0 +1,25 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef SPODELD_H
|
||||
#define SPODELD_H
|
||||
#include "SPODE.h"
|
||||
#include "Proposal.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class SPODELd : public SPODE, public Proposal {
|
||||
public:
|
||||
explicit SPODELd(int root);
|
||||
virtual ~SPODELd() = default;
|
||||
SPODELd& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
|
||||
SPODELd& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
|
||||
SPODELd& commonFit(const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states, const Smoothing_t smoothing);
|
||||
std::vector<std::string> graph(const std::string& name = "SPODELd") const override;
|
||||
torch::Tensor predict(torch::Tensor& X) override;
|
||||
static inline std::string version() { return "0.0.1"; };
|
||||
};
|
||||
}
|
||||
#endif // !SPODELD_H
|
38
bayesnet/classifiers/SPnDE.cc
Normal file
38
bayesnet/classifiers/SPnDE.cc
Normal file
@@ -0,0 +1,38 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include "SPnDE.h"
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
SPnDE::SPnDE(std::vector<int> parents) : Classifier(Network()), parents(parents) {}
|
||||
|
||||
void SPnDE::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
// 0. Add all nodes to the model
|
||||
addNodes();
|
||||
std::vector<int> attributes;
|
||||
for (int i = 0; i < static_cast<int>(features.size()); ++i) {
|
||||
if (std::find(parents.begin(), parents.end(), i) == parents.end()) {
|
||||
attributes.push_back(i);
|
||||
}
|
||||
}
|
||||
// 1. Add edges from the class node to all other nodes
|
||||
// 2. Add edges from the parents nodes to all other nodes
|
||||
for (const auto& attribute : attributes) {
|
||||
model.addEdge(className, features[attribute]);
|
||||
for (const auto& root : parents) {
|
||||
|
||||
model.addEdge(features[root], features[attribute]);
|
||||
}
|
||||
}
|
||||
}
|
||||
std::vector<std::string> SPnDE::graph(const std::string& name) const
|
||||
{
|
||||
return model.graph(name);
|
||||
}
|
||||
|
||||
}
|
26
bayesnet/classifiers/SPnDE.h
Normal file
26
bayesnet/classifiers/SPnDE.h
Normal file
@@ -0,0 +1,26 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef SPnDE_H
|
||||
#define SPnDE_H
|
||||
#include <vector>
|
||||
#include "Classifier.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class SPnDE : public Classifier {
|
||||
public:
|
||||
explicit SPnDE(std::vector<int> parents);
|
||||
virtual ~SPnDE() = default;
|
||||
std::vector<std::string> graph(const std::string& name = "SPnDE") const override;
|
||||
protected:
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
private:
|
||||
std::vector<int> parents;
|
||||
|
||||
|
||||
};
|
||||
}
|
||||
#endif
|
60
bayesnet/classifiers/TAN.cc
Normal file
60
bayesnet/classifiers/TAN.cc
Normal file
@@ -0,0 +1,60 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include "TAN.h"
|
||||
|
||||
namespace bayesnet {
|
||||
TAN::TAN() : Classifier(Network())
|
||||
{
|
||||
validHyperparameters = { "parent" };
|
||||
}
|
||||
|
||||
void TAN::setHyperparameters(const nlohmann::json& hyperparameters_)
|
||||
{
|
||||
auto hyperparameters = hyperparameters_;
|
||||
if (hyperparameters.contains("parent")) {
|
||||
parent = hyperparameters["parent"];
|
||||
hyperparameters.erase("parent");
|
||||
}
|
||||
Classifier::setHyperparameters(hyperparameters);
|
||||
}
|
||||
void TAN::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
// 0. Add all nodes to the model
|
||||
addNodes();
|
||||
// 1. Compute mutual information between each feature and the class and set the root node
|
||||
// as the highest mutual information with the class
|
||||
auto mi = std::vector <std::pair<int, float >>();
|
||||
torch::Tensor class_dataset = dataset.index({ -1, "..." });
|
||||
for (int i = 0; i < static_cast<int>(features.size()); ++i) {
|
||||
torch::Tensor feature_dataset = dataset.index({ i, "..." });
|
||||
auto mi_value = metrics.mutualInformation(class_dataset, feature_dataset, weights);
|
||||
mi.push_back({ i, mi_value });
|
||||
}
|
||||
sort(mi.begin(), mi.end(), [](const auto& left, const auto& right) {return left.second < right.second;});
|
||||
auto root = parent == -1 ? mi[mi.size() - 1].first : parent;
|
||||
if (root >= static_cast<int>(features.size())) {
|
||||
throw std::invalid_argument("The parent node is not in the dataset");
|
||||
}
|
||||
// 2. Compute mutual information between each feature and the class
|
||||
auto weights_matrix = metrics.conditionalEdge(weights);
|
||||
// 3. Compute the maximum spanning tree
|
||||
auto mst = metrics.maximumSpanningTree(features, weights_matrix, root);
|
||||
// 4. Add edges from the maximum spanning tree to the model
|
||||
for (auto i = 0; i < mst.size(); ++i) {
|
||||
auto [from, to] = mst[i];
|
||||
model.addEdge(features[from], features[to]);
|
||||
}
|
||||
// 5. Add edges from the class to all features
|
||||
for (auto feature : features) {
|
||||
model.addEdge(className, feature);
|
||||
}
|
||||
}
|
||||
std::vector<std::string> TAN::graph(const std::string& title) const
|
||||
{
|
||||
return model.graph(title);
|
||||
}
|
||||
}
|
23
bayesnet/classifiers/TAN.h
Normal file
23
bayesnet/classifiers/TAN.h
Normal file
@@ -0,0 +1,23 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef TAN_H
|
||||
#define TAN_H
|
||||
#include "Classifier.h"
|
||||
namespace bayesnet {
|
||||
class TAN : public Classifier {
|
||||
public:
|
||||
TAN();
|
||||
virtual ~TAN() = default;
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters_) override;
|
||||
std::vector<std::string> graph(const std::string& name = "TAN") const override;
|
||||
protected:
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
private:
|
||||
int parent = -1;
|
||||
};
|
||||
}
|
||||
#endif
|
36
bayesnet/classifiers/TANLd.cc
Normal file
36
bayesnet/classifiers/TANLd.cc
Normal file
@@ -0,0 +1,36 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include "TANLd.h"
|
||||
|
||||
namespace bayesnet {
|
||||
TANLd::TANLd() : TAN(), Proposal(dataset, features, className) {}
|
||||
TANLd& TANLd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing)
|
||||
{
|
||||
checkInput(X_, y_);
|
||||
features = features_;
|
||||
className = className_;
|
||||
Xf = X_;
|
||||
y = y_;
|
||||
// Fills std::vectors Xv & yv with the data from tensors X_ (discretized) & y
|
||||
states = fit_local_discretization(y);
|
||||
// We have discretized the input data
|
||||
// 1st we need to fit the model to build the normal TAN structure, TAN::fit initializes the base Bayesian network
|
||||
TAN::fit(dataset, features, className, states, smoothing);
|
||||
states = localDiscretizationProposal(states, model);
|
||||
return *this;
|
||||
|
||||
}
|
||||
torch::Tensor TANLd::predict(torch::Tensor& X)
|
||||
{
|
||||
auto Xt = prepareX(X);
|
||||
return TAN::predict(Xt);
|
||||
}
|
||||
std::vector<std::string> TANLd::graph(const std::string& name) const
|
||||
{
|
||||
return TAN::graph(name);
|
||||
}
|
||||
}
|
23
bayesnet/classifiers/TANLd.h
Normal file
23
bayesnet/classifiers/TANLd.h
Normal file
@@ -0,0 +1,23 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef TANLD_H
|
||||
#define TANLD_H
|
||||
#include "TAN.h"
|
||||
#include "Proposal.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class TANLd : public TAN, public Proposal {
|
||||
private:
|
||||
public:
|
||||
TANLd();
|
||||
virtual ~TANLd() = default;
|
||||
TANLd& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
|
||||
std::vector<std::string> graph(const std::string& name = "TANLd") const override;
|
||||
torch::Tensor predict(torch::Tensor& X) override;
|
||||
};
|
||||
}
|
||||
#endif // !TANLD_H
|
575
bayesnet/classifiers/XSP2DE.cc
Normal file
575
bayesnet/classifiers/XSP2DE.cc
Normal file
@@ -0,0 +1,575 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include "XSP2DE.h"
|
||||
#include <pthread.h> // for pthread_setname_np on linux
|
||||
#include <cassert>
|
||||
#include <cmath>
|
||||
#include <limits>
|
||||
#include <stdexcept>
|
||||
#include <iostream>
|
||||
#include "bayesnet/utils/TensorUtils.h"
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
// --------------------------------------
|
||||
// Constructor
|
||||
// --------------------------------------
|
||||
XSp2de::XSp2de(int spIndex1, int spIndex2)
|
||||
: superParent1_{ spIndex1 }
|
||||
, superParent2_{ spIndex2 }
|
||||
, nFeatures_{0}
|
||||
, statesClass_{0}
|
||||
, alpha_{1.0}
|
||||
, initializer_{1.0}
|
||||
, semaphore_{ CountingSemaphore::getInstance() }
|
||||
, Classifier(Network())
|
||||
{
|
||||
validHyperparameters = { "parent1", "parent2" };
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// setHyperparameters
|
||||
// --------------------------------------
|
||||
void XSp2de::setHyperparameters(const nlohmann::json &hyperparameters_)
|
||||
{
|
||||
auto hyperparameters = hyperparameters_;
|
||||
if (hyperparameters.contains("parent1")) {
|
||||
superParent1_ = hyperparameters["parent1"];
|
||||
hyperparameters.erase("parent1");
|
||||
}
|
||||
if (hyperparameters.contains("parent2")) {
|
||||
superParent2_ = hyperparameters["parent2"];
|
||||
hyperparameters.erase("parent2");
|
||||
}
|
||||
// Hand off anything else to base Classifier
|
||||
Classifier::setHyperparameters(hyperparameters);
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// fitx
|
||||
// --------------------------------------
|
||||
void XSp2de::fitx(torch::Tensor & X, torch::Tensor & y,
|
||||
torch::Tensor & weights_, const Smoothing_t smoothing)
|
||||
{
|
||||
m = X.size(1); // number of samples
|
||||
n = X.size(0); // number of features
|
||||
dataset = X;
|
||||
|
||||
// Build the dataset in your environment if needed:
|
||||
buildDataset(y);
|
||||
|
||||
// Construct the data structures needed for counting
|
||||
buildModel(weights_);
|
||||
|
||||
// Accumulate counts & convert to probabilities
|
||||
trainModel(weights_, smoothing);
|
||||
fitted = true;
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// buildModel
|
||||
// --------------------------------------
|
||||
void XSp2de::buildModel(const torch::Tensor &weights)
|
||||
{
|
||||
nFeatures_ = n;
|
||||
|
||||
// Derive the number of states for each feature from the dataset
|
||||
// states_[f] = max value in dataset[f] + 1.
|
||||
states_.resize(nFeatures_);
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
// This is naive: we take max in feature f. You might adapt for real data.
|
||||
states_[f] = dataset[f].max().item<int>() + 1;
|
||||
}
|
||||
// Class states:
|
||||
statesClass_ = dataset[-1].max().item<int>() + 1;
|
||||
|
||||
// Initialize the class counts
|
||||
classCounts_.resize(statesClass_, 0.0);
|
||||
|
||||
// For sp1 -> p(sp1Val| c)
|
||||
sp1FeatureCounts_.resize(states_[superParent1_] * statesClass_, 0.0);
|
||||
|
||||
// For sp2 -> p(sp2Val| c)
|
||||
sp2FeatureCounts_.resize(states_[superParent2_] * statesClass_, 0.0);
|
||||
|
||||
// For child features, we store p(childVal | c, sp1Val, sp2Val).
|
||||
// childCounts_ will hold raw counts. We’ll gather them in one big vector.
|
||||
// We need an offset for each feature.
|
||||
childOffsets_.resize(nFeatures_, -1);
|
||||
|
||||
int totalSize = 0;
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
if (f == superParent1_ || f == superParent2_) {
|
||||
// skip the superparents
|
||||
childOffsets_[f] = -1;
|
||||
continue;
|
||||
}
|
||||
childOffsets_[f] = totalSize;
|
||||
// block size for a single child f: states_[f] * statesClass_
|
||||
// * states_[superParent1_]
|
||||
// * states_[superParent2_].
|
||||
totalSize += (states_[f] * statesClass_
|
||||
* states_[superParent1_]
|
||||
* states_[superParent2_]);
|
||||
}
|
||||
childCounts_.resize(totalSize, 0.0);
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// trainModel
|
||||
// --------------------------------------
|
||||
void XSp2de::trainModel(const torch::Tensor &weights,
|
||||
const bayesnet::Smoothing_t smoothing)
|
||||
{
|
||||
// Accumulate raw counts
|
||||
for (int i = 0; i < m; i++) {
|
||||
std::vector<int> instance(nFeatures_ + 1);
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
instance[f] = dataset[f][i].item<int>();
|
||||
}
|
||||
instance[nFeatures_] = dataset[-1][i].item<int>(); // class
|
||||
double w = weights[i].item<double>();
|
||||
addSample(instance, w);
|
||||
}
|
||||
|
||||
// Choose alpha based on smoothing:
|
||||
switch (smoothing) {
|
||||
case bayesnet::Smoothing_t::ORIGINAL:
|
||||
alpha_ = 1.0 / m;
|
||||
break;
|
||||
case bayesnet::Smoothing_t::LAPLACE:
|
||||
alpha_ = 1.0;
|
||||
break;
|
||||
default:
|
||||
alpha_ = 0.0; // no smoothing
|
||||
}
|
||||
|
||||
// Large initializer factor for numerical stability
|
||||
initializer_ = std::numeric_limits<double>::max() / (nFeatures_ * nFeatures_);
|
||||
|
||||
// Convert raw counts to probabilities
|
||||
computeProbabilities();
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// addSample
|
||||
// --------------------------------------
|
||||
void XSp2de::addSample(const std::vector<int> &instance, double weight)
|
||||
{
|
||||
if (weight <= 0.0)
|
||||
return;
|
||||
|
||||
int c = instance.back();
|
||||
// increment classCounts
|
||||
classCounts_[c] += weight;
|
||||
|
||||
int sp1Val = instance[superParent1_];
|
||||
int sp2Val = instance[superParent2_];
|
||||
|
||||
// p(sp1|c)
|
||||
sp1FeatureCounts_[sp1Val * statesClass_ + c] += weight;
|
||||
|
||||
// p(sp2|c)
|
||||
sp2FeatureCounts_[sp2Val * statesClass_ + c] += weight;
|
||||
|
||||
// p(childVal| c, sp1Val, sp2Val)
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
if (f == superParent1_ || f == superParent2_)
|
||||
continue;
|
||||
|
||||
int childVal = instance[f];
|
||||
int offset = childOffsets_[f];
|
||||
// block layout:
|
||||
// offset + (sp1Val*(states_[sp2_]* states_[f]* statesClass_))
|
||||
// + (sp2Val*(states_[f]* statesClass_))
|
||||
// + childVal*(statesClass_)
|
||||
// + c
|
||||
int blockSizeSp2 = states_[superParent2_]
|
||||
* states_[f]
|
||||
* statesClass_;
|
||||
int blockSizeChild = states_[f] * statesClass_;
|
||||
|
||||
int idx = offset
|
||||
+ sp1Val*blockSizeSp2
|
||||
+ sp2Val*blockSizeChild
|
||||
+ childVal*statesClass_
|
||||
+ c;
|
||||
childCounts_[idx] += weight;
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// computeProbabilities
|
||||
// --------------------------------------
|
||||
void XSp2de::computeProbabilities()
|
||||
{
|
||||
double totalCount = std::accumulate(classCounts_.begin(),
|
||||
classCounts_.end(), 0.0);
|
||||
|
||||
// classPriors_
|
||||
classPriors_.resize(statesClass_, 0.0);
|
||||
if (totalCount <= 0.0) {
|
||||
// fallback => uniform
|
||||
double unif = 1.0 / static_cast<double>(statesClass_);
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
classPriors_[c] = unif;
|
||||
}
|
||||
} else {
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
classPriors_[c] =
|
||||
(classCounts_[c] + alpha_)
|
||||
/ (totalCount + alpha_ * statesClass_);
|
||||
}
|
||||
}
|
||||
|
||||
// p(sp1Val| c)
|
||||
sp1FeatureProbs_.resize(sp1FeatureCounts_.size());
|
||||
int sp1Card = states_[superParent1_];
|
||||
for (int spVal = 0; spVal < sp1Card; spVal++) {
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
double denom = classCounts_[c] + alpha_ * sp1Card;
|
||||
double num = sp1FeatureCounts_[spVal * statesClass_ + c] + alpha_;
|
||||
sp1FeatureProbs_[spVal * statesClass_ + c] =
|
||||
(denom <= 0.0 ? 0.0 : num / denom);
|
||||
}
|
||||
}
|
||||
|
||||
// p(sp2Val| c)
|
||||
sp2FeatureProbs_.resize(sp2FeatureCounts_.size());
|
||||
int sp2Card = states_[superParent2_];
|
||||
for (int spVal = 0; spVal < sp2Card; spVal++) {
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
double denom = classCounts_[c] + alpha_ * sp2Card;
|
||||
double num = sp2FeatureCounts_[spVal * statesClass_ + c] + alpha_;
|
||||
sp2FeatureProbs_[spVal * statesClass_ + c] =
|
||||
(denom <= 0.0 ? 0.0 : num / denom);
|
||||
}
|
||||
}
|
||||
|
||||
// p(childVal| c, sp1Val, sp2Val)
|
||||
childProbs_.resize(childCounts_.size());
|
||||
int offset = 0;
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
if (f == superParent1_ || f == superParent2_)
|
||||
continue;
|
||||
|
||||
int fCard = states_[f];
|
||||
int sp1Card_ = states_[superParent1_];
|
||||
int sp2Card_ = states_[superParent2_];
|
||||
int childBlockSizeSp2 = sp2Card_ * fCard * statesClass_;
|
||||
int childBlockSizeF = fCard * statesClass_;
|
||||
|
||||
int blockSize = fCard * sp1Card_ * sp2Card_ * statesClass_;
|
||||
for (int sp1Val = 0; sp1Val < sp1Card_; sp1Val++) {
|
||||
for (int sp2Val = 0; sp2Val < sp2Card_; sp2Val++) {
|
||||
for (int childVal = 0; childVal < fCard; childVal++) {
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
// index in childCounts_
|
||||
int idx = offset
|
||||
+ sp1Val*childBlockSizeSp2
|
||||
+ sp2Val*childBlockSizeF
|
||||
+ childVal*statesClass_
|
||||
+ c;
|
||||
double num = childCounts_[idx] + alpha_;
|
||||
// denominator is the count of (sp1Val,sp2Val,c) plus alpha * fCard
|
||||
// We can find that by summing childVal dimension, but we already
|
||||
// have it in childCounts_[...] or we can re-check the superparent
|
||||
// counts if your approach is purely hierarchical.
|
||||
// Here we'll do it like the XSpode approach: sp1&sp2 are
|
||||
// conditionally independent given c, so denominators come from
|
||||
// summing the relevant block or we treat sp1,sp2 as "parents."
|
||||
// A simpler approach:
|
||||
double sumSp1Sp2C = 0.0;
|
||||
// sum over all childVal:
|
||||
for (int cv = 0; cv < fCard; cv++) {
|
||||
int idx2 = offset
|
||||
+ sp1Val*childBlockSizeSp2
|
||||
+ sp2Val*childBlockSizeF
|
||||
+ cv*statesClass_ + c;
|
||||
sumSp1Sp2C += childCounts_[idx2];
|
||||
}
|
||||
double denom = sumSp1Sp2C + alpha_ * fCard;
|
||||
childProbs_[idx] = (denom <= 0.0 ? 0.0 : num / denom);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
offset += blockSize;
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// predict_proba (single instance)
|
||||
// --------------------------------------
|
||||
std::vector<double> XSp2de::predict_proba(const std::vector<int> &instance) const
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(CLASSIFIER_NOT_FITTED);
|
||||
}
|
||||
std::vector<double> probs(statesClass_, 0.0);
|
||||
|
||||
int sp1Val = instance[superParent1_];
|
||||
int sp2Val = instance[superParent2_];
|
||||
|
||||
// Start with p(c) * p(sp1Val| c) * p(sp2Val| c)
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
double pC = classPriors_[c];
|
||||
double pSp1C = sp1FeatureProbs_[sp1Val * statesClass_ + c];
|
||||
double pSp2C = sp2FeatureProbs_[sp2Val * statesClass_ + c];
|
||||
probs[c] = pC * pSp1C * pSp2C * initializer_;
|
||||
}
|
||||
|
||||
// Multiply by each child feature f
|
||||
int offset = 0;
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
if (f == superParent1_ || f == superParent2_)
|
||||
continue;
|
||||
|
||||
int valF = instance[f];
|
||||
int fCard = states_[f];
|
||||
int sp1Card = states_[superParent1_];
|
||||
int sp2Card = states_[superParent2_];
|
||||
int blockSizeSp2 = sp2Card * fCard * statesClass_;
|
||||
int blockSizeF = fCard * statesClass_;
|
||||
|
||||
// base index for childProbs_ for this child and sp1Val, sp2Val
|
||||
int base = offset
|
||||
+ sp1Val*blockSizeSp2
|
||||
+ sp2Val*blockSizeF
|
||||
+ valF*statesClass_;
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
probs[c] *= childProbs_[base + c];
|
||||
}
|
||||
offset += (fCard * sp1Card * sp2Card * statesClass_);
|
||||
}
|
||||
|
||||
// Normalize
|
||||
normalize(probs);
|
||||
return probs;
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// predict_proba (batch)
|
||||
// --------------------------------------
|
||||
std::vector<std::vector<double>> XSp2de::predict_proba(std::vector<std::vector<int>> &test_data)
|
||||
{
|
||||
int test_size = test_data[0].size(); // each feature is test_data[f], size = #samples
|
||||
int sample_size = test_data.size(); // = nFeatures_
|
||||
std::vector<std::vector<double>> probabilities(
|
||||
test_size, std::vector<double>(statesClass_, 0.0));
|
||||
|
||||
// same concurrency approach
|
||||
int chunk_size = std::min(150, int(test_size / semaphore_.getMaxCount()) + 1);
|
||||
std::vector<std::thread> threads;
|
||||
|
||||
auto worker = [&](const std::vector<std::vector<int>> &samples,
|
||||
int begin,
|
||||
int chunk,
|
||||
int sample_size,
|
||||
std::vector<std::vector<double>> &predictions) {
|
||||
std::string threadName =
|
||||
"XSp2de-" + std::to_string(begin) + "-" + std::to_string(chunk);
|
||||
#if defined(__linux__)
|
||||
pthread_setname_np(pthread_self(), threadName.c_str());
|
||||
#else
|
||||
pthread_setname_np(threadName.c_str());
|
||||
#endif
|
||||
|
||||
std::vector<int> instance(sample_size);
|
||||
for (int sample = begin; sample < begin + chunk; ++sample) {
|
||||
for (int feature = 0; feature < sample_size; ++feature) {
|
||||
instance[feature] = samples[feature][sample];
|
||||
}
|
||||
predictions[sample] = predict_proba(instance);
|
||||
}
|
||||
semaphore_.release();
|
||||
};
|
||||
|
||||
for (int begin = 0; begin < test_size; begin += chunk_size) {
|
||||
int chunk = std::min(chunk_size, test_size - begin);
|
||||
semaphore_.acquire();
|
||||
threads.emplace_back(worker, test_data, begin, chunk, sample_size,
|
||||
std::ref(probabilities));
|
||||
}
|
||||
for (auto &th : threads) {
|
||||
th.join();
|
||||
}
|
||||
return probabilities;
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// predict (single instance)
|
||||
// --------------------------------------
|
||||
int XSp2de::predict(const std::vector<int> &instance) const
|
||||
{
|
||||
auto p = predict_proba(instance);
|
||||
return static_cast<int>(
|
||||
std::distance(p.begin(), std::max_element(p.begin(), p.end()))
|
||||
);
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// predict (batch of data)
|
||||
// --------------------------------------
|
||||
std::vector<int> XSp2de::predict(std::vector<std::vector<int>> &test_data)
|
||||
{
|
||||
auto probabilities = predict_proba(test_data);
|
||||
std::vector<int> predictions(probabilities.size(), 0);
|
||||
|
||||
for (size_t i = 0; i < probabilities.size(); i++) {
|
||||
predictions[i] = static_cast<int>(
|
||||
std::distance(probabilities[i].begin(),
|
||||
std::max_element(probabilities[i].begin(),
|
||||
probabilities[i].end()))
|
||||
);
|
||||
}
|
||||
return predictions;
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// predict (torch::Tensor version)
|
||||
// --------------------------------------
|
||||
torch::Tensor XSp2de::predict(torch::Tensor &X)
|
||||
{
|
||||
auto X_ = TensorUtils::to_matrix(X);
|
||||
auto result_v = predict(X_);
|
||||
return torch::tensor(result_v, torch::kInt32);
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// predict_proba (torch::Tensor version)
|
||||
// --------------------------------------
|
||||
torch::Tensor XSp2de::predict_proba(torch::Tensor &X)
|
||||
{
|
||||
auto X_ = TensorUtils::to_matrix(X);
|
||||
auto result_v = predict_proba(X_);
|
||||
int n_samples = X.size(1);
|
||||
torch::Tensor result =
|
||||
torch::zeros({ n_samples, statesClass_ }, torch::kDouble);
|
||||
for (int i = 0; i < (int)result_v.size(); ++i) {
|
||||
result.index_put_({ i, "..." }, torch::tensor(result_v[i]));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// score (torch::Tensor version)
|
||||
// --------------------------------------
|
||||
float XSp2de::score(torch::Tensor &X, torch::Tensor &y)
|
||||
{
|
||||
torch::Tensor y_pred = predict(X);
|
||||
return (y_pred == y).sum().item<float>() / y.size(0);
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// score (vector version)
|
||||
// --------------------------------------
|
||||
float XSp2de::score(std::vector<std::vector<int>> &X, std::vector<int> &y)
|
||||
{
|
||||
auto y_pred = predict(X);
|
||||
int correct = 0;
|
||||
for (size_t i = 0; i < y_pred.size(); ++i) {
|
||||
if (y_pred[i] == y[i]) {
|
||||
correct++;
|
||||
}
|
||||
}
|
||||
return static_cast<float>(correct) / static_cast<float>(y_pred.size());
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// Utility: normalize
|
||||
// --------------------------------------
|
||||
void XSp2de::normalize(std::vector<double> &v) const
|
||||
{
|
||||
double sum = 0.0;
|
||||
for (auto &val : v) {
|
||||
sum += val;
|
||||
}
|
||||
if (sum > 0.0) {
|
||||
for (auto &val : v) {
|
||||
val /= sum;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// to_string
|
||||
// --------------------------------------
|
||||
std::string XSp2de::to_string() const
|
||||
{
|
||||
std::ostringstream oss;
|
||||
oss << "----- XSp2de Model -----\n"
|
||||
<< "nFeatures_ = " << nFeatures_ << "\n"
|
||||
<< "superParent1_ = " << superParent1_ << "\n"
|
||||
<< "superParent2_ = " << superParent2_ << "\n"
|
||||
<< "statesClass_ = " << statesClass_ << "\n\n";
|
||||
|
||||
oss << "States: [";
|
||||
for (auto s : states_) oss << s << " ";
|
||||
oss << "]\n";
|
||||
|
||||
oss << "classCounts_:\n";
|
||||
for (auto v : classCounts_) oss << v << " ";
|
||||
oss << "\nclassPriors_:\n";
|
||||
for (auto v : classPriors_) oss << v << " ";
|
||||
oss << "\nsp1FeatureCounts_ (size=" << sp1FeatureCounts_.size() << ")\n";
|
||||
for (auto v : sp1FeatureCounts_) oss << v << " ";
|
||||
oss << "\nsp2FeatureCounts_ (size=" << sp2FeatureCounts_.size() << ")\n";
|
||||
for (auto v : sp2FeatureCounts_) oss << v << " ";
|
||||
oss << "\nchildCounts_ (size=" << childCounts_.size() << ")\n";
|
||||
for (auto v : childCounts_) oss << v << " ";
|
||||
|
||||
oss << "\nchildOffsets_:\n";
|
||||
for (auto c : childOffsets_) oss << c << " ";
|
||||
|
||||
oss << "\n----------------------------------------\n";
|
||||
return oss.str();
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// Some introspection about the graph
|
||||
// --------------------------------------
|
||||
int XSp2de::getNumberOfNodes() const
|
||||
{
|
||||
// nFeatures + 1 class node
|
||||
return nFeatures_ + 1;
|
||||
}
|
||||
|
||||
int XSp2de::getClassNumStates() const
|
||||
{
|
||||
return statesClass_;
|
||||
}
|
||||
|
||||
int XSp2de::getNFeatures() const
|
||||
{
|
||||
return nFeatures_;
|
||||
}
|
||||
|
||||
int XSp2de::getNumberOfStates() const
|
||||
{
|
||||
// purely an example. Possibly you want to sum up actual
|
||||
// cardinalities or something else.
|
||||
return std::accumulate(states_.begin(), states_.end(), 0) * nFeatures_;
|
||||
}
|
||||
|
||||
int XSp2de::getNumberOfEdges() const
|
||||
{
|
||||
// In an SPNDE with n=2, for each feature we have edges from class, sp1, sp2.
|
||||
// So that’s 3*(nFeatures_) edges, minus the ones for the superparents themselves,
|
||||
// plus the edges from class->superparent1, class->superparent2.
|
||||
// For a quick approximation:
|
||||
// - class->sp1, class->sp2 => 2 edges
|
||||
// - class->child => (nFeatures -2) edges
|
||||
// - sp1->child, sp2->child => 2*(nFeatures -2) edges
|
||||
// total = 2 + (nFeatures-2) + 2*(nFeatures-2) = 2 + 3*(nFeatures-2)
|
||||
// = 3nFeatures - 4 (just an example).
|
||||
// You can adapt to your liking:
|
||||
return 3 * nFeatures_ - 4;
|
||||
}
|
||||
|
||||
} // namespace bayesnet
|
||||
|
75
bayesnet/classifiers/XSP2DE.h
Normal file
75
bayesnet/classifiers/XSP2DE.h
Normal file
@@ -0,0 +1,75 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef XSP2DE_H
|
||||
#define XSP2DE_H
|
||||
|
||||
#include "Classifier.h"
|
||||
#include "bayesnet/utils/CountingSemaphore.h"
|
||||
#include <torch/torch.h>
|
||||
#include <vector>
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
class XSp2de : public Classifier {
|
||||
public:
|
||||
XSp2de(int spIndex1, int spIndex2);
|
||||
void setHyperparameters(const nlohmann::json &hyperparameters_) override;
|
||||
void fitx(torch::Tensor &X, torch::Tensor &y, torch::Tensor &weights_, const Smoothing_t smoothing);
|
||||
std::vector<double> predict_proba(const std::vector<int> &instance) const;
|
||||
std::vector<std::vector<double>> predict_proba(std::vector<std::vector<int>> &test_data) override;
|
||||
int predict(const std::vector<int> &instance) const;
|
||||
std::vector<int> predict(std::vector<std::vector<int>> &test_data) override;
|
||||
torch::Tensor predict(torch::Tensor &X) override;
|
||||
torch::Tensor predict_proba(torch::Tensor &X) override;
|
||||
|
||||
float score(torch::Tensor &X, torch::Tensor &y) override;
|
||||
float score(std::vector<std::vector<int>> &X, std::vector<int> &y) override;
|
||||
std::string to_string() const;
|
||||
std::vector<std::string> graph(const std::string &title) const override {
|
||||
return std::vector<std::string>({title});
|
||||
}
|
||||
|
||||
int getNumberOfNodes() const override;
|
||||
int getNumberOfEdges() const override;
|
||||
int getNFeatures() const;
|
||||
int getClassNumStates() const override;
|
||||
int getNumberOfStates() const override;
|
||||
|
||||
protected:
|
||||
void buildModel(const torch::Tensor &weights) override;
|
||||
void trainModel(const torch::Tensor &weights, const bayesnet::Smoothing_t smoothing) override;
|
||||
|
||||
private:
|
||||
void addSample(const std::vector<int> &instance, double weight);
|
||||
void normalize(std::vector<double> &v) const;
|
||||
void computeProbabilities();
|
||||
|
||||
int superParent1_;
|
||||
int superParent2_;
|
||||
int nFeatures_;
|
||||
int statesClass_;
|
||||
double alpha_;
|
||||
double initializer_;
|
||||
|
||||
std::vector<int> states_;
|
||||
std::vector<double> classCounts_;
|
||||
std::vector<double> classPriors_;
|
||||
std::vector<double> sp1FeatureCounts_, sp1FeatureProbs_;
|
||||
std::vector<double> sp2FeatureCounts_, sp2FeatureProbs_;
|
||||
// childOffsets_[f] will be the offset into childCounts_ for feature f.
|
||||
// If f is either superParent1 or superParent2, childOffsets_[f] = -1
|
||||
std::vector<int> childOffsets_;
|
||||
// For each child f, we store p(x_f | c, sp1Val, sp2Val). We'll store the raw
|
||||
// counts in childCounts_, and the probabilities in childProbs_, with a
|
||||
// dimension block of size: states_[f]* statesClass_* states_[sp1]* states_[sp2].
|
||||
std::vector<double> childCounts_;
|
||||
std::vector<double> childProbs_;
|
||||
CountingSemaphore &semaphore_;
|
||||
};
|
||||
|
||||
} // namespace bayesnet
|
||||
#endif // XSP2DE_H
|
450
bayesnet/classifiers/XSPODE.cc
Normal file
450
bayesnet/classifiers/XSPODE.cc
Normal file
@@ -0,0 +1,450 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <limits>
|
||||
#include <numeric>
|
||||
#include <sstream>
|
||||
#include <stdexcept>
|
||||
#include "XSPODE.h"
|
||||
#include "bayesnet/utils/TensorUtils.h"
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
// --------------------------------------
|
||||
// Constructor
|
||||
// --------------------------------------
|
||||
XSpode::XSpode(int spIndex)
|
||||
: superParent_{ spIndex }, nFeatures_{ 0 }, statesClass_{ 0 }, alpha_{ 1.0 },
|
||||
initializer_{ 1.0 }, semaphore_{ CountingSemaphore::getInstance() },
|
||||
Classifier(Network())
|
||||
{
|
||||
validHyperparameters = { "parent" };
|
||||
}
|
||||
|
||||
void XSpode::setHyperparameters(const nlohmann::json& hyperparameters_)
|
||||
{
|
||||
auto hyperparameters = hyperparameters_;
|
||||
if (hyperparameters.contains("parent")) {
|
||||
superParent_ = hyperparameters["parent"];
|
||||
hyperparameters.erase("parent");
|
||||
}
|
||||
Classifier::setHyperparameters(hyperparameters);
|
||||
}
|
||||
|
||||
void XSpode::fitx(torch::Tensor & X, torch::Tensor& y, torch::Tensor& weights_, const Smoothing_t smoothing)
|
||||
{
|
||||
m = X.size(1);
|
||||
n = X.size(0);
|
||||
dataset = X;
|
||||
buildDataset(y);
|
||||
buildModel(weights_);
|
||||
trainModel(weights_, smoothing);
|
||||
fitted = true;
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// trainModel
|
||||
// --------------------------------------
|
||||
// Initialize storage needed for the super-parent and child features counts and
|
||||
// probs.
|
||||
// --------------------------------------
|
||||
void XSpode::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
int numInstances = m;
|
||||
nFeatures_ = n;
|
||||
|
||||
// Derive the number of states for each feature and for the class.
|
||||
// (This is just one approach; adapt to match your environment.)
|
||||
// Here, we assume the user also gave us the total #states per feature in e.g.
|
||||
// statesMap. We'll simply reconstruct the integer states_ array. The last
|
||||
// entry is statesClass_.
|
||||
states_.resize(nFeatures_);
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
// Suppose you look up in “statesMap” by the feature name, or read directly
|
||||
// from X. We'll assume states_[f] = max value in X[f] + 1.
|
||||
states_[f] = dataset[f].max().item<int>() + 1;
|
||||
}
|
||||
// For the class: states_.back() = max(y)+1
|
||||
statesClass_ = dataset[-1].max().item<int>() + 1;
|
||||
|
||||
// Initialize counts
|
||||
classCounts_.resize(statesClass_, 0.0);
|
||||
// p(x_sp = spVal | c)
|
||||
// We'll store these counts in spFeatureCounts_[spVal * statesClass_ + c].
|
||||
spFeatureCounts_.resize(states_[superParent_] * statesClass_, 0.0);
|
||||
|
||||
// For each child ≠ sp, we store p(childVal| c, spVal) in a separate block of
|
||||
// childCounts_. childCounts_ will be sized as sum_{child≠sp} (states_[child]
|
||||
// * statesClass_ * states_[sp]). We also need an offset for each child to
|
||||
// index into childCounts_.
|
||||
childOffsets_.resize(nFeatures_, -1);
|
||||
int totalSize = 0;
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
if (f == superParent_)
|
||||
continue; // skip sp
|
||||
childOffsets_[f] = totalSize;
|
||||
// block size for this child's counts: states_[f] * statesClass_ *
|
||||
// states_[superParent_]
|
||||
totalSize += (states_[f] * statesClass_ * states_[superParent_]);
|
||||
}
|
||||
childCounts_.resize(totalSize, 0.0);
|
||||
}
|
||||
// --------------------------------------
|
||||
// buildModel
|
||||
// --------------------------------------
|
||||
//
|
||||
// We only store conditional probabilities for:
|
||||
// p(x_sp| c) (the super-parent feature)
|
||||
// p(x_child| c, x_sp) for all child ≠ sp
|
||||
//
|
||||
// --------------------------------------
|
||||
void XSpode::trainModel(const torch::Tensor& weights,
|
||||
const bayesnet::Smoothing_t smoothing)
|
||||
{
|
||||
// Accumulate raw counts
|
||||
for (int i = 0; i < m; i++) {
|
||||
std::vector<int> instance(nFeatures_ + 1);
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
instance[f] = dataset[f][i].item<int>();
|
||||
}
|
||||
instance[nFeatures_] = dataset[-1][i].item<int>();
|
||||
addSample(instance, weights[i].item<double>());
|
||||
}
|
||||
switch (smoothing) {
|
||||
case bayesnet::Smoothing_t::ORIGINAL:
|
||||
alpha_ = 1.0 / m;
|
||||
break;
|
||||
case bayesnet::Smoothing_t::LAPLACE:
|
||||
alpha_ = 1.0;
|
||||
break;
|
||||
default:
|
||||
alpha_ = 0.0; // No smoothing
|
||||
}
|
||||
initializer_ = std::numeric_limits<double>::max() /
|
||||
(nFeatures_ * nFeatures_); // for numerical stability
|
||||
// Convert raw counts to probabilities
|
||||
computeProbabilities();
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// addSample
|
||||
// --------------------------------------
|
||||
//
|
||||
// instance has size nFeatures_ + 1, with the class at the end.
|
||||
// We add 1 to the appropriate counters for each (c, superParentVal, childVal).
|
||||
//
|
||||
void XSpode::addSample(const std::vector<int>& instance, double weight)
|
||||
{
|
||||
if (weight <= 0.0)
|
||||
return;
|
||||
|
||||
int c = instance.back();
|
||||
// (A) increment classCounts
|
||||
classCounts_[c] += weight;
|
||||
|
||||
// (B) increment super-parent counts => p(x_sp | c)
|
||||
int spVal = instance[superParent_];
|
||||
spFeatureCounts_[spVal * statesClass_ + c] += weight;
|
||||
|
||||
// (C) increment child counts => p(childVal | c, x_sp)
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
if (f == superParent_)
|
||||
continue;
|
||||
int childVal = instance[f];
|
||||
int offset = childOffsets_[f];
|
||||
// Compute index in childCounts_.
|
||||
// Layout: [ offset + (spVal * states_[f] + childVal) * statesClass_ + c ]
|
||||
int blockSize = states_[f] * statesClass_;
|
||||
int idx = offset + spVal * blockSize + childVal * statesClass_ + c;
|
||||
childCounts_[idx] += weight;
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// computeProbabilities
|
||||
// --------------------------------------
|
||||
//
|
||||
// Once all samples are added in COUNTS mode, call this to:
|
||||
// p(c)
|
||||
// p(x_sp = spVal | c)
|
||||
// p(x_child = v | c, x_sp = s_sp)
|
||||
//
|
||||
// --------------------------------------
|
||||
void XSpode::computeProbabilities()
|
||||
{
|
||||
double totalCount =
|
||||
std::accumulate(classCounts_.begin(), classCounts_.end(), 0.0);
|
||||
|
||||
// p(c) => classPriors_
|
||||
classPriors_.resize(statesClass_, 0.0);
|
||||
if (totalCount <= 0.0) {
|
||||
// fallback => uniform
|
||||
double unif = 1.0 / static_cast<double>(statesClass_);
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
classPriors_[c] = unif;
|
||||
}
|
||||
} else {
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
classPriors_[c] =
|
||||
(classCounts_[c] + alpha_) / (totalCount + alpha_ * statesClass_);
|
||||
}
|
||||
}
|
||||
|
||||
// p(x_sp | c)
|
||||
spFeatureProbs_.resize(spFeatureCounts_.size());
|
||||
// denominator for spVal * statesClass_ + c is just classCounts_[c] + alpha_ *
|
||||
// (#states of sp)
|
||||
int spCard = states_[superParent_];
|
||||
for (int spVal = 0; spVal < spCard; spVal++) {
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
double denom = classCounts_[c] + alpha_ * spCard;
|
||||
double num = spFeatureCounts_[spVal * statesClass_ + c] + alpha_;
|
||||
spFeatureProbs_[spVal * statesClass_ + c] = (denom <= 0.0 ? 0.0 : num / denom);
|
||||
}
|
||||
}
|
||||
|
||||
// p(x_child | c, x_sp)
|
||||
childProbs_.resize(childCounts_.size());
|
||||
for (int f = 0; f < nFeatures_; f++) {
|
||||
if (f == superParent_)
|
||||
continue;
|
||||
int offset = childOffsets_[f];
|
||||
int childCard = states_[f];
|
||||
|
||||
// For each spVal, c, childVal in childCounts_:
|
||||
for (int spVal = 0; spVal < spCard; spVal++) {
|
||||
for (int childVal = 0; childVal < childCard; childVal++) {
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
int idx = offset + spVal * (childCard * statesClass_) +
|
||||
childVal * statesClass_ + c;
|
||||
|
||||
double num = childCounts_[idx] + alpha_;
|
||||
// denominator = spFeatureCounts_[spVal * statesClass_ + c] + alpha_ *
|
||||
// (#states of child)
|
||||
double denom =
|
||||
spFeatureCounts_[spVal * statesClass_ + c] + alpha_ * childCard;
|
||||
childProbs_[idx] = (denom <= 0.0 ? 0.0 : num / denom);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// predict_proba
|
||||
// --------------------------------------
|
||||
//
|
||||
// For a single instance x of dimension nFeatures_:
|
||||
// P(c | x) ∝ p(c) × p(x_sp | c) × ∏(child ≠ sp) p(x_child | c, x_sp).
|
||||
//
|
||||
// --------------------------------------
|
||||
std::vector<double> XSpode::predict_proba(const std::vector<int>& instance) const
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(CLASSIFIER_NOT_FITTED);
|
||||
}
|
||||
std::vector<double> probs(statesClass_, 0.0);
|
||||
// Multiply p(c) × p(x_sp | c)
|
||||
int spVal = instance[superParent_];
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
double pc = classPriors_[c];
|
||||
double pSpC = spFeatureProbs_[spVal * statesClass_ + c];
|
||||
probs[c] = pc * pSpC * initializer_;
|
||||
}
|
||||
|
||||
// Multiply by each child’s probability p(x_child | c, x_sp)
|
||||
for (int feature = 0; feature < nFeatures_; feature++) {
|
||||
if (feature == superParent_)
|
||||
continue; // skip sp
|
||||
int sf = instance[feature];
|
||||
int offset = childOffsets_[feature];
|
||||
int childCard = states_[feature]; // not used directly, but for clarity
|
||||
// Index into childProbs_ = offset + spVal*(childCard*statesClass_) +
|
||||
// childVal*statesClass_ + c
|
||||
int base = offset + spVal * (childCard * statesClass_) + sf * statesClass_;
|
||||
for (int c = 0; c < statesClass_; c++) {
|
||||
probs[c] *= childProbs_[base + c];
|
||||
}
|
||||
}
|
||||
|
||||
// Normalize
|
||||
normalize(probs);
|
||||
return probs;
|
||||
}
|
||||
std::vector<std::vector<double>> XSpode::predict_proba(std::vector<std::vector<int>>& test_data)
|
||||
{
|
||||
int test_size = test_data[0].size();
|
||||
int sample_size = test_data.size();
|
||||
auto probabilities = std::vector<std::vector<double>>(
|
||||
test_size, std::vector<double>(statesClass_));
|
||||
|
||||
int chunk_size = std::min(150, int(test_size / semaphore_.getMaxCount()) + 1);
|
||||
std::vector<std::thread> threads;
|
||||
auto worker = [&](const std::vector<std::vector<int>>& samples, int begin,
|
||||
int chunk, int sample_size,
|
||||
std::vector<std::vector<double>>& predictions) {
|
||||
std::string threadName =
|
||||
"(V)PWorker-" + std::to_string(begin) + "-" + std::to_string(chunk);
|
||||
#if defined(__linux__)
|
||||
pthread_setname_np(pthread_self(), threadName.c_str());
|
||||
#else
|
||||
pthread_setname_np(threadName.c_str());
|
||||
#endif
|
||||
|
||||
std::vector<int> instance(sample_size);
|
||||
for (int sample = begin; sample < begin + chunk; ++sample) {
|
||||
for (int feature = 0; feature < sample_size; ++feature) {
|
||||
instance[feature] = samples[feature][sample];
|
||||
}
|
||||
predictions[sample] = predict_proba(instance);
|
||||
}
|
||||
semaphore_.release();
|
||||
};
|
||||
for (int begin = 0; begin < test_size; begin += chunk_size) {
|
||||
int chunk = std::min(chunk_size, test_size - begin);
|
||||
semaphore_.acquire();
|
||||
threads.emplace_back(worker, test_data, begin, chunk, sample_size, std::ref(probabilities));
|
||||
}
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
return probabilities;
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// Utility: normalize
|
||||
// --------------------------------------
|
||||
void XSpode::normalize(std::vector<double>& v) const
|
||||
{
|
||||
double sum = 0.0;
|
||||
for (auto val : v) {
|
||||
sum += val;
|
||||
}
|
||||
if (sum <= 0.0) {
|
||||
return;
|
||||
}
|
||||
for (auto& val : v) {
|
||||
val /= sum;
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// representation of the model
|
||||
// --------------------------------------
|
||||
std::string XSpode::to_string() const
|
||||
{
|
||||
std::ostringstream oss;
|
||||
oss << "----- XSpode Model -----" << std::endl
|
||||
<< "nFeatures_ = " << nFeatures_ << std::endl
|
||||
<< "superParent_ = " << superParent_ << std::endl
|
||||
<< "statesClass_ = " << statesClass_ << std::endl
|
||||
<< std::endl;
|
||||
|
||||
oss << "States: [";
|
||||
for (int s : states_)
|
||||
oss << s << " ";
|
||||
oss << "]" << std::endl;
|
||||
oss << "classCounts_: [";
|
||||
for (double c : classCounts_)
|
||||
oss << c << " ";
|
||||
oss << "]" << std::endl;
|
||||
oss << "classPriors_: [";
|
||||
for (double c : classPriors_)
|
||||
oss << c << " ";
|
||||
oss << "]" << std::endl;
|
||||
oss << "spFeatureCounts_: size = " << spFeatureCounts_.size() << std::endl
|
||||
<< "[";
|
||||
for (double c : spFeatureCounts_)
|
||||
oss << c << " ";
|
||||
oss << "]" << std::endl;
|
||||
oss << "spFeatureProbs_: size = " << spFeatureProbs_.size() << std::endl
|
||||
<< "[";
|
||||
for (double c : spFeatureProbs_)
|
||||
oss << c << " ";
|
||||
oss << "]" << std::endl;
|
||||
oss << "childCounts_: size = " << childCounts_.size() << std::endl << "[";
|
||||
for (double cc : childCounts_)
|
||||
oss << cc << " ";
|
||||
oss << "]" << std::endl;
|
||||
|
||||
for (double cp : childProbs_)
|
||||
oss << cp << " ";
|
||||
oss << "]" << std::endl;
|
||||
oss << "childOffsets_: [";
|
||||
for (int co : childOffsets_)
|
||||
oss << co << " ";
|
||||
oss << "]" << std::endl;
|
||||
oss << std::string(40,'-') << std::endl;
|
||||
return oss.str();
|
||||
}
|
||||
int XSpode::getNumberOfNodes() const { return nFeatures_ + 1; }
|
||||
int XSpode::getClassNumStates() const { return statesClass_; }
|
||||
int XSpode::getNFeatures() const { return nFeatures_; }
|
||||
int XSpode::getNumberOfStates() const
|
||||
{
|
||||
return std::accumulate(states_.begin(), states_.end(), 0) * nFeatures_;
|
||||
}
|
||||
int XSpode::getNumberOfEdges() const
|
||||
{
|
||||
return 2 * nFeatures_ + 1;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Predict overrides (classifier interface)
|
||||
// ------------------------------------------------------
|
||||
int XSpode::predict(const std::vector<int>& instance) const
|
||||
{
|
||||
auto p = predict_proba(instance);
|
||||
return static_cast<int>(std::distance(p.begin(), std::max_element(p.begin(), p.end())));
|
||||
}
|
||||
std::vector<int> XSpode::predict(std::vector<std::vector<int>>& test_data)
|
||||
{
|
||||
auto probabilities = predict_proba(test_data);
|
||||
std::vector<int> predictions(probabilities.size(), 0);
|
||||
|
||||
for (size_t i = 0; i < probabilities.size(); i++) {
|
||||
predictions[i] = std::distance(
|
||||
probabilities[i].begin(),
|
||||
std::max_element(probabilities[i].begin(), probabilities[i].end()));
|
||||
}
|
||||
return predictions;
|
||||
}
|
||||
torch::Tensor XSpode::predict(torch::Tensor& X)
|
||||
{
|
||||
auto X_ = TensorUtils::to_matrix(X);
|
||||
auto result_v = predict(X_);
|
||||
return torch::tensor(result_v, torch::kInt32);
|
||||
}
|
||||
torch::Tensor XSpode::predict_proba(torch::Tensor& X)
|
||||
{
|
||||
auto X_ = TensorUtils::to_matrix(X);
|
||||
auto result_v = predict_proba(X_);
|
||||
int n_samples = X.size(1);
|
||||
torch::Tensor result =
|
||||
torch::zeros({ n_samples, statesClass_ }, torch::kDouble);
|
||||
for (int i = 0; i < result_v.size(); ++i) {
|
||||
result.index_put_({ i, "..." }, torch::tensor(result_v[i]));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
float XSpode::score(torch::Tensor& X, torch::Tensor& y)
|
||||
{
|
||||
torch::Tensor y_pred = predict(X);
|
||||
return (y_pred == y).sum().item<float>() / y.size(0);
|
||||
}
|
||||
float XSpode::score(std::vector<std::vector<int>>& X, std::vector<int>& y)
|
||||
{
|
||||
auto y_pred = this->predict(X);
|
||||
int correct = 0;
|
||||
for (int i = 0; i < y_pred.size(); ++i) {
|
||||
if (y_pred[i] == y[i]) {
|
||||
correct++;
|
||||
}
|
||||
}
|
||||
return (double)correct / y_pred.size();
|
||||
}
|
||||
} // namespace bayesnet
|
76
bayesnet/classifiers/XSPODE.h
Normal file
76
bayesnet/classifiers/XSPODE.h
Normal file
@@ -0,0 +1,76 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef XSPODE_H
|
||||
#define XSPODE_H
|
||||
|
||||
#include <vector>
|
||||
#include <torch/torch.h>
|
||||
#include "Classifier.h"
|
||||
#include "bayesnet/utils/CountingSemaphore.h"
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
class XSpode : public Classifier {
|
||||
public:
|
||||
explicit XSpode(int spIndex);
|
||||
std::vector<double> predict_proba(const std::vector<int>& instance) const;
|
||||
std::vector<std::vector<double>> predict_proba(std::vector<std::vector<int>>& X) override;
|
||||
int predict(const std::vector<int>& instance) const;
|
||||
void normalize(std::vector<double>& v) const;
|
||||
std::string to_string() const;
|
||||
int getNFeatures() const;
|
||||
int getNumberOfNodes() const override;
|
||||
int getNumberOfEdges() const override;
|
||||
int getNumberOfStates() const override;
|
||||
int getClassNumStates() const override;
|
||||
std::vector<int>& getStates();
|
||||
std::vector<std::string> graph(const std::string& title) const override { return std::vector<std::string>({ title }); }
|
||||
void fitx(torch::Tensor& X, torch::Tensor& y, torch::Tensor& weights_, const Smoothing_t smoothing);
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters_) override;
|
||||
|
||||
//
|
||||
// Classifier interface
|
||||
//
|
||||
torch::Tensor predict(torch::Tensor& X) override;
|
||||
std::vector<int> predict(std::vector<std::vector<int>>& X) override;
|
||||
torch::Tensor predict_proba(torch::Tensor& X) override;
|
||||
float score(torch::Tensor& X, torch::Tensor& y) override;
|
||||
float score(std::vector<std::vector<int>>& X, std::vector<int>& y) override;
|
||||
protected:
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
void trainModel(const torch::Tensor& weights, const bayesnet::Smoothing_t smoothing) override;
|
||||
private:
|
||||
void addSample(const std::vector<int>& instance, double weight);
|
||||
void computeProbabilities();
|
||||
int superParent_;
|
||||
int nFeatures_;
|
||||
int statesClass_;
|
||||
std::vector<int> states_; // [states_feat0, ..., states_feat(N-1)] (class not included in this array)
|
||||
|
||||
// Class counts
|
||||
std::vector<double> classCounts_; // [c], accumulative
|
||||
std::vector<double> classPriors_; // [c], after normalization
|
||||
|
||||
// For p(x_sp = spVal | c)
|
||||
std::vector<double> spFeatureCounts_; // [spVal * statesClass_ + c]
|
||||
std::vector<double> spFeatureProbs_; // same shape, after normalization
|
||||
|
||||
// For p(x_child = childVal | x_sp = spVal, c)
|
||||
// childCounts_ is big enough to hold all child features except sp:
|
||||
// For each child f, we store childOffsets_[f] as the start index, then
|
||||
// childVal, spVal, c => the data.
|
||||
std::vector<double> childCounts_;
|
||||
std::vector<double> childProbs_;
|
||||
std::vector<int> childOffsets_;
|
||||
|
||||
double alpha_ = 1.0;
|
||||
double initializer_; // for numerical stability
|
||||
CountingSemaphore& semaphore_;
|
||||
};
|
||||
}
|
||||
|
||||
#endif // XSPODE_H
|
40
bayesnet/ensembles/A2DE.cc
Normal file
40
bayesnet/ensembles/A2DE.cc
Normal file
@@ -0,0 +1,40 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include "A2DE.h"
|
||||
|
||||
namespace bayesnet {
|
||||
A2DE::A2DE(bool predict_voting) : Ensemble(predict_voting)
|
||||
{
|
||||
validHyperparameters = { "predict_voting" };
|
||||
}
|
||||
void A2DE::setHyperparameters(const nlohmann::json& hyperparameters_)
|
||||
{
|
||||
auto hyperparameters = hyperparameters_;
|
||||
if (hyperparameters.contains("predict_voting")) {
|
||||
predict_voting = hyperparameters["predict_voting"];
|
||||
hyperparameters.erase("predict_voting");
|
||||
}
|
||||
Classifier::setHyperparameters(hyperparameters);
|
||||
}
|
||||
void A2DE::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
models.clear();
|
||||
significanceModels.clear();
|
||||
for (int i = 0; i < features.size() - 1; ++i) {
|
||||
for (int j = i + 1; j < features.size(); ++j) {
|
||||
auto model = std::make_unique<SPnDE>(std::vector<int>({ i, j }));
|
||||
models.push_back(std::move(model));
|
||||
}
|
||||
}
|
||||
n_models = static_cast<unsigned>(models.size());
|
||||
significanceModels = std::vector<double>(n_models, 1.0);
|
||||
}
|
||||
std::vector<std::string> A2DE::graph(const std::string& title) const
|
||||
{
|
||||
return Ensemble::graph(title);
|
||||
}
|
||||
}
|
22
bayesnet/ensembles/A2DE.h
Normal file
22
bayesnet/ensembles/A2DE.h
Normal file
@@ -0,0 +1,22 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef A2DE_H
|
||||
#define A2DE_H
|
||||
#include "bayesnet/classifiers/SPnDE.h"
|
||||
#include "Ensemble.h"
|
||||
namespace bayesnet {
|
||||
class A2DE : public Ensemble {
|
||||
public:
|
||||
A2DE(bool predict_voting = false);
|
||||
virtual ~A2DE() {};
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters) override;
|
||||
std::vector<std::string> graph(const std::string& title = "A2DE") const override;
|
||||
protected:
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
};
|
||||
}
|
||||
#endif
|
38
bayesnet/ensembles/AODE.cc
Normal file
38
bayesnet/ensembles/AODE.cc
Normal file
@@ -0,0 +1,38 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include "AODE.h"
|
||||
|
||||
namespace bayesnet {
|
||||
AODE::AODE(bool predict_voting) : Ensemble(predict_voting)
|
||||
{
|
||||
validHyperparameters = { "predict_voting" };
|
||||
|
||||
}
|
||||
void AODE::setHyperparameters(const nlohmann::json& hyperparameters_)
|
||||
{
|
||||
auto hyperparameters = hyperparameters_;
|
||||
if (hyperparameters.contains("predict_voting")) {
|
||||
predict_voting = hyperparameters["predict_voting"];
|
||||
hyperparameters.erase("predict_voting");
|
||||
}
|
||||
Classifier::setHyperparameters(hyperparameters);
|
||||
}
|
||||
void AODE::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
models.clear();
|
||||
significanceModels.clear();
|
||||
for (int i = 0; i < features.size(); ++i) {
|
||||
models.push_back(std::make_unique<SPODE>(i));
|
||||
}
|
||||
n_models = models.size();
|
||||
significanceModels = std::vector<double>(n_models, 1.0);
|
||||
}
|
||||
std::vector<std::string> AODE::graph(const std::string& title) const
|
||||
{
|
||||
return Ensemble::graph(title);
|
||||
}
|
||||
}
|
22
bayesnet/ensembles/AODE.h
Normal file
22
bayesnet/ensembles/AODE.h
Normal file
@@ -0,0 +1,22 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef AODE_H
|
||||
#define AODE_H
|
||||
#include "bayesnet/classifiers/SPODE.h"
|
||||
#include "Ensemble.h"
|
||||
namespace bayesnet {
|
||||
class AODE : public Ensemble {
|
||||
public:
|
||||
AODE(bool predict_voting = false);
|
||||
virtual ~AODE() {};
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters) override;
|
||||
std::vector<std::string> graph(const std::string& title = "AODE") const override;
|
||||
protected:
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
};
|
||||
}
|
||||
#endif
|
48
bayesnet/ensembles/AODELd.cc
Normal file
48
bayesnet/ensembles/AODELd.cc
Normal file
@@ -0,0 +1,48 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include "AODELd.h"
|
||||
|
||||
namespace bayesnet {
|
||||
AODELd::AODELd(bool predict_voting) : Ensemble(predict_voting), Proposal(dataset, features, className)
|
||||
{
|
||||
}
|
||||
AODELd& AODELd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing)
|
||||
{
|
||||
checkInput(X_, y_);
|
||||
features = features_;
|
||||
className = className_;
|
||||
Xf = X_;
|
||||
y = y_;
|
||||
// Fills std::vectors Xv & yv with the data from tensors X_ (discretized) & y
|
||||
states = fit_local_discretization(y);
|
||||
// We have discretized the input data
|
||||
// 1st we need to fit the model to build the normal AODE structure, Ensemble::fit
|
||||
// calls buildModel to initialize the base models
|
||||
Ensemble::fit(dataset, features, className, states, smoothing);
|
||||
return *this;
|
||||
|
||||
}
|
||||
void AODELd::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
models.clear();
|
||||
for (int i = 0; i < features.size(); ++i) {
|
||||
models.push_back(std::make_unique<SPODELd>(i));
|
||||
}
|
||||
n_models = models.size();
|
||||
significanceModels = std::vector<double>(n_models, 1.0);
|
||||
}
|
||||
void AODELd::trainModel(const torch::Tensor& weights, const Smoothing_t smoothing)
|
||||
{
|
||||
for (const auto& model : models) {
|
||||
model->fit(Xf, y, features, className, states, smoothing);
|
||||
}
|
||||
}
|
||||
std::vector<std::string> AODELd::graph(const std::string& name) const
|
||||
{
|
||||
return Ensemble::graph(name);
|
||||
}
|
||||
}
|
25
bayesnet/ensembles/AODELd.h
Normal file
25
bayesnet/ensembles/AODELd.h
Normal file
@@ -0,0 +1,25 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef AODELD_H
|
||||
#define AODELD_H
|
||||
#include "bayesnet/classifiers/Proposal.h"
|
||||
#include "bayesnet/classifiers/SPODELd.h"
|
||||
#include "Ensemble.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class AODELd : public Ensemble, public Proposal {
|
||||
public:
|
||||
AODELd(bool predict_voting = true);
|
||||
virtual ~AODELd() = default;
|
||||
AODELd& fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing) override;
|
||||
std::vector<std::string> graph(const std::string& name = "AODELd") const override;
|
||||
protected:
|
||||
void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
};
|
||||
}
|
||||
#endif // !AODELD_H
|
268
bayesnet/ensembles/Boost.cc
Normal file
268
bayesnet/ensembles/Boost.cc
Normal file
@@ -0,0 +1,268 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
#include "Boost.h"
|
||||
#include "bayesnet/feature_selection/CFS.h"
|
||||
#include "bayesnet/feature_selection/FCBF.h"
|
||||
#include "bayesnet/feature_selection/IWSS.h"
|
||||
#include <folding.hpp>
|
||||
|
||||
namespace bayesnet {
|
||||
Boost::Boost(bool predict_voting) : Ensemble(predict_voting) {
|
||||
validHyperparameters = {"alpha_block", "order", "convergence", "convergence_best", "bisection",
|
||||
"threshold", "maxTolerance", "predict_voting", "select_features", "block_update"};
|
||||
}
|
||||
void Boost::setHyperparameters(const nlohmann::json &hyperparameters_) {
|
||||
auto hyperparameters = hyperparameters_;
|
||||
if (hyperparameters.contains("order")) {
|
||||
std::vector<std::string> algos = {Orders.ASC, Orders.DESC, Orders.RAND};
|
||||
order_algorithm = hyperparameters["order"];
|
||||
if (std::find(algos.begin(), algos.end(), order_algorithm) == algos.end()) {
|
||||
throw std::invalid_argument("Invalid order algorithm, valid values [" + Orders.ASC + ", " + Orders.DESC +
|
||||
", " + Orders.RAND + "]");
|
||||
}
|
||||
hyperparameters.erase("order");
|
||||
}
|
||||
if (hyperparameters.contains("alpha_block")) {
|
||||
alpha_block = hyperparameters["alpha_block"];
|
||||
hyperparameters.erase("alpha_block");
|
||||
}
|
||||
if (hyperparameters.contains("convergence")) {
|
||||
convergence = hyperparameters["convergence"];
|
||||
hyperparameters.erase("convergence");
|
||||
}
|
||||
if (hyperparameters.contains("convergence_best")) {
|
||||
convergence_best = hyperparameters["convergence_best"];
|
||||
hyperparameters.erase("convergence_best");
|
||||
}
|
||||
if (hyperparameters.contains("bisection")) {
|
||||
bisection = hyperparameters["bisection"];
|
||||
hyperparameters.erase("bisection");
|
||||
}
|
||||
if (hyperparameters.contains("threshold")) {
|
||||
threshold = hyperparameters["threshold"];
|
||||
hyperparameters.erase("threshold");
|
||||
}
|
||||
if (hyperparameters.contains("maxTolerance")) {
|
||||
maxTolerance = hyperparameters["maxTolerance"];
|
||||
if (maxTolerance < 1 || maxTolerance > 6)
|
||||
throw std::invalid_argument("Invalid maxTolerance value, must be greater in [1, 6]");
|
||||
hyperparameters.erase("maxTolerance");
|
||||
}
|
||||
if (hyperparameters.contains("predict_voting")) {
|
||||
predict_voting = hyperparameters["predict_voting"];
|
||||
hyperparameters.erase("predict_voting");
|
||||
}
|
||||
if (hyperparameters.contains("select_features")) {
|
||||
auto selectedAlgorithm = hyperparameters["select_features"];
|
||||
std::vector<std::string> algos = {SelectFeatures.IWSS, SelectFeatures.CFS, SelectFeatures.FCBF};
|
||||
selectFeatures = true;
|
||||
select_features_algorithm = selectedAlgorithm;
|
||||
if (std::find(algos.begin(), algos.end(), selectedAlgorithm) == algos.end()) {
|
||||
throw std::invalid_argument("Invalid selectFeatures value, valid values [" + SelectFeatures.IWSS + ", " +
|
||||
SelectFeatures.CFS + ", " + SelectFeatures.FCBF + "]");
|
||||
}
|
||||
hyperparameters.erase("select_features");
|
||||
}
|
||||
if (hyperparameters.contains("block_update")) {
|
||||
block_update = hyperparameters["block_update"];
|
||||
hyperparameters.erase("block_update");
|
||||
}
|
||||
if (block_update && alpha_block) {
|
||||
throw std::invalid_argument("alpha_block and block_update cannot be true at the same time");
|
||||
}
|
||||
if (block_update && !bisection) {
|
||||
throw std::invalid_argument("block_update needs bisection to be true");
|
||||
}
|
||||
Classifier::setHyperparameters(hyperparameters);
|
||||
}
|
||||
void Boost::add_model(std::unique_ptr<Classifier> model, double significance) {
|
||||
models.push_back(std::move(model));
|
||||
n_models++;
|
||||
significanceModels.push_back(significance);
|
||||
}
|
||||
void Boost::remove_last_model() {
|
||||
models.pop_back();
|
||||
significanceModels.pop_back();
|
||||
n_models--;
|
||||
}
|
||||
void Boost::buildModel(const torch::Tensor &weights) {
|
||||
// Models shall be built in trainModel
|
||||
models.clear();
|
||||
significanceModels.clear();
|
||||
n_models = 0;
|
||||
// Prepare the validation dataset
|
||||
auto y_ = dataset.index({-1, "..."});
|
||||
if (convergence) {
|
||||
// Prepare train & validation sets from train data
|
||||
auto fold = folding::StratifiedKFold(5, y_, 271);
|
||||
auto [train, test] = fold.getFold(0);
|
||||
auto train_t = torch::tensor(train);
|
||||
auto test_t = torch::tensor(test);
|
||||
// Get train and validation sets
|
||||
X_train = dataset.index({torch::indexing::Slice(0, dataset.size(0) - 1), train_t});
|
||||
y_train = dataset.index({-1, train_t});
|
||||
X_test = dataset.index({torch::indexing::Slice(0, dataset.size(0) - 1), test_t});
|
||||
y_test = dataset.index({-1, test_t});
|
||||
dataset = X_train;
|
||||
m = X_train.size(1);
|
||||
auto n_classes = states.at(className).size();
|
||||
// Build dataset with train data
|
||||
buildDataset(y_train);
|
||||
metrics = Metrics(dataset, features, className, n_classes);
|
||||
} else {
|
||||
// Use all data to train
|
||||
X_train = dataset.index({torch::indexing::Slice(0, dataset.size(0) - 1), "..."});
|
||||
y_train = y_;
|
||||
}
|
||||
}
|
||||
std::vector<int> Boost::featureSelection(torch::Tensor &weights_) {
|
||||
int maxFeatures = 0;
|
||||
if (select_features_algorithm == SelectFeatures.CFS) {
|
||||
featureSelector = new CFS(dataset, features, className, maxFeatures, states.at(className).size(), weights_);
|
||||
} else if (select_features_algorithm == SelectFeatures.IWSS) {
|
||||
if (threshold < 0 || threshold > 0.5) {
|
||||
throw std::invalid_argument("Invalid threshold value for " + SelectFeatures.IWSS + " [0, 0.5]");
|
||||
}
|
||||
featureSelector =
|
||||
new IWSS(dataset, features, className, maxFeatures, states.at(className).size(), weights_, threshold);
|
||||
} else if (select_features_algorithm == SelectFeatures.FCBF) {
|
||||
if (threshold < 1e-7 || threshold > 1) {
|
||||
throw std::invalid_argument("Invalid threshold value for " + SelectFeatures.FCBF + " [1e-7, 1]");
|
||||
}
|
||||
featureSelector =
|
||||
new FCBF(dataset, features, className, maxFeatures, states.at(className).size(), weights_, threshold);
|
||||
}
|
||||
featureSelector->fit();
|
||||
auto featuresUsed = featureSelector->getFeatures();
|
||||
delete featureSelector;
|
||||
return featuresUsed;
|
||||
}
|
||||
std::tuple<torch::Tensor &, double, bool> Boost::update_weights(torch::Tensor &ytrain, torch::Tensor &ypred,
|
||||
torch::Tensor &weights) {
|
||||
bool terminate = false;
|
||||
double alpha_t = 0;
|
||||
auto mask_wrong = ypred != ytrain;
|
||||
auto mask_right = ypred == ytrain;
|
||||
auto masked_weights = weights * mask_wrong.to(weights.dtype());
|
||||
double epsilon_t = masked_weights.sum().item<double>();
|
||||
// std::cout << "epsilon_t: " << epsilon_t << " count wrong: " << mask_wrong.sum().item<int>() << " count right: "
|
||||
// << mask_right.sum().item<int>() << std::endl;
|
||||
if (epsilon_t > 0.5) {
|
||||
// Inverse the weights policy (plot ln(wt))
|
||||
// "In each round of AdaBoost, there is a sanity check to ensure that the current base
|
||||
// learner is better than random guess" (Zhi-Hua Zhou, 2012)
|
||||
terminate = true;
|
||||
} else {
|
||||
double wt = (1 - epsilon_t) / epsilon_t;
|
||||
alpha_t = epsilon_t == 0 ? 1 : 0.5 * log(wt);
|
||||
// Step 3.2: Update weights for next classifier
|
||||
// Step 3.2.1: Update weights of wrong samples
|
||||
weights += mask_wrong.to(weights.dtype()) * exp(alpha_t) * weights;
|
||||
// Step 3.2.2: Update weights of right samples
|
||||
weights += mask_right.to(weights.dtype()) * exp(-alpha_t) * weights;
|
||||
// Step 3.3: Normalise the weights
|
||||
double totalWeights = torch::sum(weights).item<double>();
|
||||
weights = weights / totalWeights;
|
||||
}
|
||||
return {weights, alpha_t, terminate};
|
||||
}
|
||||
std::tuple<torch::Tensor &, double, bool> Boost::update_weights_block(int k, torch::Tensor &ytrain,
|
||||
torch::Tensor &weights) {
|
||||
/* Update Block algorithm
|
||||
k = # of models in block
|
||||
n_models = # of models in ensemble to make predictions
|
||||
n_models_bak = # models saved
|
||||
models = vector of models to make predictions
|
||||
models_bak = models not used to make predictions
|
||||
significances_bak = backup of significances vector
|
||||
|
||||
Case list
|
||||
A) k = 1, n_models = 1 => n = 0 , n_models = n + k
|
||||
B) k = 1, n_models = n + 1 => n_models = n + k
|
||||
C) k > 1, n_models = k + 1 => n= 1, n_models = n + k
|
||||
D) k > 1, n_models = k => n = 0, n_models = n + k
|
||||
E) k > 1, n_models = k + n => n_models = n + k
|
||||
|
||||
A, D) n=0, k > 0, n_models == k
|
||||
1. n_models_bak <- n_models
|
||||
2. significances_bak <- significances
|
||||
3. significances = vector(k, 1)
|
||||
4. Don’t move any classifiers out of models
|
||||
5. n_models <- k
|
||||
6. Make prediction, compute alpha, update weights
|
||||
7. Don’t restore any classifiers to models
|
||||
8. significances <- significances_bak
|
||||
9. Update last k significances
|
||||
10. n_models <- n_models_bak
|
||||
|
||||
B, C, E) n > 0, k > 0, n_models == n + k
|
||||
1. n_models_bak <- n_models
|
||||
2. significances_bak <- significances
|
||||
3. significances = vector(k, 1)
|
||||
4. Move first n classifiers to models_bak
|
||||
5. n_models <- k
|
||||
6. Make prediction, compute alpha, update weights
|
||||
7. Insert classifiers in models_bak to be the first n models
|
||||
8. significances <- significances_bak
|
||||
9. Update last k significances
|
||||
10. n_models <- n_models_bak
|
||||
*/
|
||||
//
|
||||
// Make predict with only the last k models
|
||||
//
|
||||
std::unique_ptr<Classifier> model;
|
||||
std::vector<std::unique_ptr<Classifier>> models_bak;
|
||||
// 1. n_models_bak <- n_models 2. significances_bak <- significances
|
||||
auto significance_bak = significanceModels;
|
||||
auto n_models_bak = n_models;
|
||||
// 3. significances = vector(k, 1)
|
||||
significanceModels = std::vector<double>(k, 1.0);
|
||||
// 4. Move first n classifiers to models_bak
|
||||
// backup the first n_models - k models (if n_models == k, don't backup any)
|
||||
for (int i = 0; i < n_models - k; ++i) {
|
||||
model = std::move(models[0]);
|
||||
models.erase(models.begin());
|
||||
models_bak.push_back(std::move(model));
|
||||
}
|
||||
assert(models.size() == k);
|
||||
// 5. n_models <- k
|
||||
n_models = k;
|
||||
// 6. Make prediction, compute alpha, update weights
|
||||
auto ypred = predict(X_train);
|
||||
//
|
||||
// Update weights
|
||||
//
|
||||
double alpha_t;
|
||||
bool terminate;
|
||||
std::tie(weights, alpha_t, terminate) = update_weights(y_train, ypred, weights);
|
||||
//
|
||||
// Restore the models if needed
|
||||
//
|
||||
// 7. Insert classifiers in models_bak to be the first n models
|
||||
// if n_models_bak == k, don't restore any, because none of them were moved
|
||||
if (k != n_models_bak) {
|
||||
// Insert in the same order as they were extracted
|
||||
int bak_size = models_bak.size();
|
||||
for (int i = 0; i < bak_size; ++i) {
|
||||
model = std::move(models_bak[bak_size - 1 - i]);
|
||||
models_bak.erase(models_bak.end() - 1);
|
||||
models.insert(models.begin(), std::move(model));
|
||||
}
|
||||
}
|
||||
// 8. significances <- significances_bak
|
||||
significanceModels = significance_bak;
|
||||
//
|
||||
// Update the significance of the last k models
|
||||
//
|
||||
// 9. Update last k significances
|
||||
for (int i = 0; i < k; ++i) {
|
||||
significanceModels[n_models_bak - k + i] = alpha_t;
|
||||
}
|
||||
// 10. n_models <- n_models_bak
|
||||
n_models = n_models_bak;
|
||||
return {weights, alpha_t, terminate};
|
||||
}
|
||||
} // namespace bayesnet
|
57
bayesnet/ensembles/Boost.h
Normal file
57
bayesnet/ensembles/Boost.h
Normal file
@@ -0,0 +1,57 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef BOOST_H
|
||||
#define BOOST_H
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
#include <nlohmann/json.hpp>
|
||||
#include <torch/torch.h>
|
||||
#include "Ensemble.h"
|
||||
#include "bayesnet/feature_selection/FeatureSelect.h"
|
||||
namespace bayesnet {
|
||||
const struct {
|
||||
std::string CFS = "CFS";
|
||||
std::string FCBF = "FCBF";
|
||||
std::string IWSS = "IWSS";
|
||||
}SelectFeatures;
|
||||
const struct {
|
||||
std::string ASC = "asc";
|
||||
std::string DESC = "desc";
|
||||
std::string RAND = "rand";
|
||||
}Orders;
|
||||
class Boost : public Ensemble {
|
||||
public:
|
||||
explicit Boost(bool predict_voting = false);
|
||||
virtual ~Boost() override = default;
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters_) override;
|
||||
protected:
|
||||
std::vector<int> featureSelection(torch::Tensor& weights_);
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
std::tuple<torch::Tensor&, double, bool> update_weights(torch::Tensor& ytrain, torch::Tensor& ypred, torch::Tensor& weights);
|
||||
std::tuple<torch::Tensor&, double, bool> update_weights_block(int k, torch::Tensor& ytrain, torch::Tensor& weights);
|
||||
void add_model(std::unique_ptr<Classifier> model, double significance);
|
||||
void remove_last_model();
|
||||
//
|
||||
// Attributes
|
||||
//
|
||||
torch::Tensor X_train, y_train, X_test, y_test;
|
||||
// Hyperparameters
|
||||
bool bisection = true; // if true, use bisection stratety to add k models at once to the ensemble
|
||||
int maxTolerance = 3;
|
||||
std::string order_algorithm = Orders.DESC; // order to process the KBest features asc, desc, rand
|
||||
bool convergence = true; //if true, stop when the model does not improve
|
||||
bool convergence_best = false; // wether to keep the best accuracy to the moment or the last accuracy as prior accuracy
|
||||
bool selectFeatures = false; // if true, use feature selection
|
||||
std::string select_features_algorithm; // Selected feature selection algorithm
|
||||
FeatureSelect* featureSelector = nullptr;
|
||||
double threshold = -1;
|
||||
bool block_update = false; // if true, use block update algorithm, only meaningful if bisection is true
|
||||
bool alpha_block = false; // if true, the alpha is computed with the ensemble built so far and the new model
|
||||
};
|
||||
}
|
||||
#endif
|
165
bayesnet/ensembles/BoostA2DE.cc
Normal file
165
bayesnet/ensembles/BoostA2DE.cc
Normal file
@@ -0,0 +1,165 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include <limits.h>
|
||||
#include <tuple>
|
||||
#include <folding.hpp>
|
||||
#include "BoostA2DE.h"
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
BoostA2DE::BoostA2DE(bool predict_voting) : Boost(predict_voting)
|
||||
{
|
||||
}
|
||||
std::vector<int> BoostA2DE::initializeModels(const Smoothing_t smoothing)
|
||||
{
|
||||
torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);
|
||||
std::vector<int> featuresSelected = featureSelection(weights_);
|
||||
if (featuresSelected.size() < 2) {
|
||||
notes.push_back("No features selected in initialization");
|
||||
status = ERROR;
|
||||
return std::vector<int>();
|
||||
}
|
||||
for (int i = 0; i < featuresSelected.size() - 1; i++) {
|
||||
for (int j = i + 1; j < featuresSelected.size(); j++) {
|
||||
auto parents = { featuresSelected[i], featuresSelected[j] };
|
||||
std::unique_ptr<Classifier> model = std::make_unique<SPnDE>(parents);
|
||||
model->fit(dataset, features, className, states, weights_, smoothing);
|
||||
models.push_back(std::move(model));
|
||||
significanceModels.push_back(1.0); // They will be updated later in trainModel
|
||||
n_models++;
|
||||
}
|
||||
}
|
||||
notes.push_back("Used features in initialization: " + std::to_string(featuresSelected.size()) + " of " + std::to_string(features.size()) + " with " + select_features_algorithm);
|
||||
return featuresSelected;
|
||||
}
|
||||
void BoostA2DE::trainModel(const torch::Tensor& weights, const Smoothing_t smoothing)
|
||||
{
|
||||
//
|
||||
// Logging setup
|
||||
//
|
||||
// loguru::set_thread_name("BoostA2DE");
|
||||
// loguru::g_stderr_verbosity = loguru::Verbosity_OFF;
|
||||
// loguru::add_file("boostA2DE.log", loguru::Truncate, loguru::Verbosity_MAX);
|
||||
|
||||
// Algorithm based on the adaboost algorithm for classification
|
||||
// as explained in Ensemble methods (Zhi-Hua Zhou, 2012)
|
||||
fitted = true;
|
||||
double alpha_t = 0;
|
||||
torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);
|
||||
bool finished = false;
|
||||
std::vector<int> featuresUsed;
|
||||
if (selectFeatures) {
|
||||
featuresUsed = initializeModels(smoothing);
|
||||
if (featuresUsed.size() == 0) {
|
||||
return;
|
||||
}
|
||||
auto ypred = predict(X_train);
|
||||
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
|
||||
// Update significance of the models
|
||||
for (int i = 0; i < n_models; ++i) {
|
||||
significanceModels[i] = alpha_t;
|
||||
}
|
||||
if (finished) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
int numItemsPack = 0; // The counter of the models inserted in the current pack
|
||||
// Variables to control the accuracy finish condition
|
||||
double priorAccuracy = 0.0;
|
||||
double improvement = 1.0;
|
||||
double convergence_threshold = 1e-4;
|
||||
int tolerance = 0; // number of times the accuracy is lower than the convergence_threshold
|
||||
// Step 0: Set the finish condition
|
||||
// epsilon sub t > 0.5 => inverse the weights policy
|
||||
// validation error is not decreasing
|
||||
// run out of features
|
||||
bool ascending = order_algorithm == Orders.ASC;
|
||||
std::mt19937 g{ 173 };
|
||||
std::vector<std::pair<int, int>> pairSelection;
|
||||
while (!finished) {
|
||||
// Step 1: Build ranking with mutual information
|
||||
pairSelection = metrics.SelectKPairs(weights_, featuresUsed, ascending, 0); // Get all the pairs sorted
|
||||
if (order_algorithm == Orders.RAND) {
|
||||
std::shuffle(pairSelection.begin(), pairSelection.end(), g);
|
||||
}
|
||||
int k = bisection ? pow(2, tolerance) : 1;
|
||||
int counter = 0; // The model counter of the current pack
|
||||
// VLOG_SCOPE_F(1, "counter=%d k=%d featureSelection.size: %zu", counter, k, featureSelection.size());
|
||||
while (counter++ < k && pairSelection.size() > 0) {
|
||||
auto feature_pair = pairSelection[0];
|
||||
pairSelection.erase(pairSelection.begin());
|
||||
std::unique_ptr<Classifier> model;
|
||||
model = std::make_unique<SPnDE>(std::vector<int>({ feature_pair.first, feature_pair.second }));
|
||||
model->fit(dataset, features, className, states, weights_, smoothing);
|
||||
alpha_t = 0.0;
|
||||
if (!block_update) {
|
||||
auto ypred = model->predict(X_train);
|
||||
// Step 3.1: Compute the classifier amout of say
|
||||
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
|
||||
}
|
||||
// Step 3.4: Store classifier and its accuracy to weigh its future vote
|
||||
numItemsPack++;
|
||||
models.push_back(std::move(model));
|
||||
significanceModels.push_back(alpha_t);
|
||||
n_models++;
|
||||
// VLOG_SCOPE_F(2, "numItemsPack: %d n_models: %d featuresUsed: %zu", numItemsPack, n_models, featuresUsed.size());
|
||||
}
|
||||
if (block_update) {
|
||||
std::tie(weights_, alpha_t, finished) = update_weights_block(k, y_train, weights_);
|
||||
}
|
||||
if (convergence && !finished) {
|
||||
auto y_val_predict = predict(X_test);
|
||||
double accuracy = (y_val_predict == y_test).sum().item<double>() / (double)y_test.size(0);
|
||||
if (priorAccuracy == 0) {
|
||||
priorAccuracy = accuracy;
|
||||
} else {
|
||||
improvement = accuracy - priorAccuracy;
|
||||
}
|
||||
if (improvement < convergence_threshold) {
|
||||
// VLOG_SCOPE_F(3, " (improvement<threshold) tolerance: %d numItemsPack: %d improvement: %f prior: %f current: %f", tolerance, numItemsPack, improvement, priorAccuracy, accuracy);
|
||||
tolerance++;
|
||||
} else {
|
||||
// VLOG_SCOPE_F(3, "* (improvement>=threshold) Reset. tolerance: %d numItemsPack: %d improvement: %f prior: %f current: %f", tolerance, numItemsPack, improvement, priorAccuracy, accuracy);
|
||||
tolerance = 0; // Reset the counter if the model performs better
|
||||
numItemsPack = 0;
|
||||
}
|
||||
if (convergence_best) {
|
||||
// Keep the best accuracy until now as the prior accuracy
|
||||
priorAccuracy = std::max(accuracy, priorAccuracy);
|
||||
} else {
|
||||
// Keep the last accuray obtained as the prior accuracy
|
||||
priorAccuracy = accuracy;
|
||||
}
|
||||
}
|
||||
// VLOG_SCOPE_F(1, "tolerance: %d featuresUsed.size: %zu features.size: %zu", tolerance, featuresUsed.size(), features.size());
|
||||
finished = finished || tolerance > maxTolerance || pairSelection.size() == 0;
|
||||
}
|
||||
if (tolerance > maxTolerance) {
|
||||
if (numItemsPack < n_models) {
|
||||
notes.push_back("Convergence threshold reached & " + std::to_string(numItemsPack) + " models eliminated");
|
||||
// VLOG_SCOPE_F(4, "Convergence threshold reached & %d models eliminated of %d", numItemsPack, n_models);
|
||||
for (int i = 0; i < numItemsPack; ++i) {
|
||||
significanceModels.pop_back();
|
||||
models.pop_back();
|
||||
n_models--;
|
||||
}
|
||||
} else {
|
||||
notes.push_back("Convergence threshold reached & 0 models eliminated");
|
||||
// VLOG_SCOPE_F(4, "Convergence threshold reached & 0 models eliminated n_models=%d numItemsPack=%d", n_models, numItemsPack);
|
||||
}
|
||||
}
|
||||
if (pairSelection.size() > 0) {
|
||||
notes.push_back("Pairs not used in train: " + std::to_string(pairSelection.size()));
|
||||
status = WARNING;
|
||||
}
|
||||
notes.push_back("Number of models: " + std::to_string(n_models));
|
||||
}
|
||||
std::vector<std::string> BoostA2DE::graph(const std::string& title) const
|
||||
{
|
||||
return Ensemble::graph(title);
|
||||
}
|
||||
}
|
25
bayesnet/ensembles/BoostA2DE.h
Normal file
25
bayesnet/ensembles/BoostA2DE.h
Normal file
@@ -0,0 +1,25 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef BOOSTA2DE_H
|
||||
#define BOOSTA2DE_H
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "bayesnet/classifiers/SPnDE.h"
|
||||
#include "Boost.h"
|
||||
namespace bayesnet {
|
||||
class BoostA2DE : public Boost {
|
||||
public:
|
||||
explicit BoostA2DE(bool predict_voting = false);
|
||||
virtual ~BoostA2DE() = default;
|
||||
std::vector<std::string> graph(const std::string& title = "BoostA2DE") const override;
|
||||
protected:
|
||||
void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
|
||||
private:
|
||||
std::vector<int> initializeModels(const Smoothing_t smoothing);
|
||||
};
|
||||
}
|
||||
#endif
|
181
bayesnet/ensembles/BoostAODE.cc
Normal file
181
bayesnet/ensembles/BoostAODE.cc
Normal file
@@ -0,0 +1,181 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include "BoostAODE.h"
|
||||
#include "bayesnet/classifiers/SPODE.h"
|
||||
#include <limits.h>
|
||||
// #include <loguru.cpp>
|
||||
// #include <loguru.hpp>
|
||||
#include <random>
|
||||
#include <set>
|
||||
#include <tuple>
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
BoostAODE::BoostAODE(bool predict_voting) : Boost(predict_voting)
|
||||
{
|
||||
}
|
||||
std::vector<int> BoostAODE::initializeModels(const Smoothing_t smoothing)
|
||||
{
|
||||
torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);
|
||||
std::vector<int> featuresSelected = featureSelection(weights_);
|
||||
for (const int& feature : featuresSelected) {
|
||||
std::unique_ptr<Classifier> model = std::make_unique<SPODE>(feature);
|
||||
model->fit(dataset, features, className, states, weights_, smoothing);
|
||||
models.push_back(std::move(model));
|
||||
significanceModels.push_back(1.0); // They will be updated later in trainModel
|
||||
n_models++;
|
||||
}
|
||||
notes.push_back("Used features in initialization: " + std::to_string(featuresSelected.size()) + " of " + std::to_string(features.size()) + " with " + select_features_algorithm);
|
||||
return featuresSelected;
|
||||
}
|
||||
void BoostAODE::trainModel(const torch::Tensor& weights, const Smoothing_t smoothing)
|
||||
{
|
||||
//
|
||||
// Logging setup
|
||||
//
|
||||
// loguru::set_thread_name("BoostAODE");
|
||||
// loguru::g_stderr_verbosity = loguru::Verbosity_OFF;
|
||||
// loguru::add_file("boostAODE.log", loguru::Truncate, loguru::Verbosity_MAX);
|
||||
|
||||
// Algorithm based on the adaboost algorithm for classification
|
||||
// as explained in Ensemble methods (Zhi-Hua Zhou, 2012)
|
||||
fitted = true;
|
||||
double alpha_t = 0;
|
||||
torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);
|
||||
bool finished = false;
|
||||
std::vector<int> featuresUsed;
|
||||
n_models = 0;
|
||||
if (selectFeatures) {
|
||||
featuresUsed = initializeModels(smoothing);
|
||||
auto ypred = predict(X_train);
|
||||
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
|
||||
// Update significance of the models
|
||||
for (int i = 0; i < n_models; ++i) {
|
||||
significanceModels.push_back(alpha_t);
|
||||
}
|
||||
// VLOG_SCOPE_F(1, "SelectFeatures. alpha_t: %f n_models: %d", alpha_t, n_models);
|
||||
if (finished) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
int numItemsPack = 0; // The counter of the models inserted in the current pack
|
||||
// Variables to control the accuracy finish condition
|
||||
double priorAccuracy = 0.0;
|
||||
double improvement = 1.0;
|
||||
double convergence_threshold = 1e-4;
|
||||
int tolerance = 0; // number of times the accuracy is lower than the convergence_threshold
|
||||
// Step 0: Set the finish condition
|
||||
// epsilon sub t > 0.5 => inverse the weights policy
|
||||
// validation error is not decreasing
|
||||
// run out of features
|
||||
bool ascending = order_algorithm == Orders.ASC;
|
||||
std::mt19937 g{ 173 };
|
||||
while (!finished) {
|
||||
// Step 1: Build ranking with mutual information
|
||||
auto featureSelection = metrics.SelectKBestWeighted(weights_, ascending, n); // Get all the features sorted
|
||||
if (order_algorithm == Orders.RAND) {
|
||||
std::shuffle(featureSelection.begin(), featureSelection.end(), g);
|
||||
}
|
||||
// Remove used features
|
||||
featureSelection.erase(remove_if(begin(featureSelection), end(featureSelection), [&](auto x) { return std::find(begin(featuresUsed), end(featuresUsed), x) != end(featuresUsed); }),
|
||||
end(featureSelection));
|
||||
int k = bisection ? pow(2, tolerance) : 1;
|
||||
int counter = 0; // The model counter of the current pack
|
||||
// VLOG_SCOPE_F(1, "counter=%d k=%d featureSelection.size: %zu", counter, k, featureSelection.size());
|
||||
while (counter++ < k && featureSelection.size() > 0) {
|
||||
auto feature = featureSelection[0];
|
||||
featureSelection.erase(featureSelection.begin());
|
||||
std::unique_ptr<Classifier> model;
|
||||
model = std::make_unique<SPODE>(feature);
|
||||
model->fit(dataset, features, className, states, weights_, smoothing);
|
||||
alpha_t = 0.0;
|
||||
if (!block_update) {
|
||||
torch::Tensor ypred;
|
||||
if (alpha_block) {
|
||||
//
|
||||
// Compute the prediction with the current ensemble + model
|
||||
//
|
||||
// Add the model to the ensemble
|
||||
n_models++;
|
||||
models.push_back(std::move(model));
|
||||
significanceModels.push_back(1);
|
||||
// Compute the prediction
|
||||
ypred = predict(X_train);
|
||||
// Remove the model from the ensemble
|
||||
model = std::move(models.back());
|
||||
models.pop_back();
|
||||
significanceModels.pop_back();
|
||||
n_models--;
|
||||
} else {
|
||||
ypred = model->predict(X_train);
|
||||
}
|
||||
// Step 3.1: Compute the classifier amout of say
|
||||
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
|
||||
}
|
||||
// Step 3.4: Store classifier and its accuracy to weigh its future vote
|
||||
numItemsPack++;
|
||||
featuresUsed.push_back(feature);
|
||||
models.push_back(std::move(model));
|
||||
significanceModels.push_back(alpha_t);
|
||||
n_models++;
|
||||
// VLOG_SCOPE_F(2, "finished: %d numItemsPack: %d n_models: %d featuresUsed: %zu", finished, numItemsPack, n_models, featuresUsed.size());
|
||||
}
|
||||
if (block_update) {
|
||||
std::tie(weights_, alpha_t, finished) = update_weights_block(k, y_train, weights_);
|
||||
}
|
||||
if (convergence && !finished) {
|
||||
auto y_val_predict = predict(X_test);
|
||||
double accuracy = (y_val_predict == y_test).sum().item<double>() / (double)y_test.size(0);
|
||||
if (priorAccuracy == 0) {
|
||||
priorAccuracy = accuracy;
|
||||
} else {
|
||||
improvement = accuracy - priorAccuracy;
|
||||
}
|
||||
if (improvement < convergence_threshold) {
|
||||
// VLOG_SCOPE_F(3, " (improvement<threshold) tolerance: %d numItemsPack: %d improvement: %f prior: %f current: %f", tolerance, numItemsPack, improvement, priorAccuracy, accuracy);
|
||||
tolerance++;
|
||||
} else {
|
||||
// VLOG_SCOPE_F(3, "* (improvement>=threshold) Reset. tolerance: %d numItemsPack: %d improvement: %f prior: %f current: %f", tolerance, numItemsPack, improvement, priorAccuracy, accuracy);
|
||||
tolerance = 0; // Reset the counter if the model performs better
|
||||
numItemsPack = 0;
|
||||
}
|
||||
if (convergence_best) {
|
||||
// Keep the best accuracy until now as the prior accuracy
|
||||
priorAccuracy = std::max(accuracy, priorAccuracy);
|
||||
} else {
|
||||
// Keep the last accuray obtained as the prior accuracy
|
||||
priorAccuracy = accuracy;
|
||||
}
|
||||
}
|
||||
// VLOG_SCOPE_F(1, "tolerance: %d featuresUsed.size: %zu features.size: %zu", tolerance, featuresUsed.size(), features.size());
|
||||
finished = finished || tolerance > maxTolerance || featuresUsed.size() == features.size();
|
||||
}
|
||||
if (tolerance > maxTolerance) {
|
||||
if (numItemsPack < n_models) {
|
||||
notes.push_back("Convergence threshold reached & " + std::to_string(numItemsPack) + " models eliminated");
|
||||
// VLOG_SCOPE_F(4, "Convergence threshold reached & %d models eliminated of %d", numItemsPack, n_models);
|
||||
for (int i = 0; i < numItemsPack; ++i) {
|
||||
significanceModels.pop_back();
|
||||
models.pop_back();
|
||||
n_models--;
|
||||
}
|
||||
} else {
|
||||
notes.push_back("Convergence threshold reached & 0 models eliminated");
|
||||
// VLG_SCOPE_F(4, "Convergence threshold reached & 0 models eliminated n_models=%d numItemsPack=%d", n_models, numItemsPack);
|
||||
}
|
||||
}
|
||||
if (featuresUsed.size() != features.size()) {
|
||||
notes.push_back("Used features in train: " + std::to_string(featuresUsed.size()) + " of " + std::to_string(features.size()));
|
||||
status = WARNING;
|
||||
}
|
||||
notes.push_back("Number of models: " + std::to_string(n_models));
|
||||
}
|
||||
std::vector<std::string> BoostAODE::graph(const std::string& title) const
|
||||
{
|
||||
return Ensemble::graph(title);
|
||||
}
|
||||
}
|
25
bayesnet/ensembles/BoostAODE.h
Normal file
25
bayesnet/ensembles/BoostAODE.h
Normal file
@@ -0,0 +1,25 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef BOOSTAODE_H
|
||||
#define BOOSTAODE_H
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "Boost.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class BoostAODE : public Boost {
|
||||
public:
|
||||
explicit BoostAODE(bool predict_voting = false);
|
||||
virtual ~BoostAODE() = default;
|
||||
std::vector<std::string> graph(const std::string& title = "BoostAODE") const override;
|
||||
protected:
|
||||
void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
|
||||
private:
|
||||
std::vector<int> initializeModels(const Smoothing_t smoothing);
|
||||
};
|
||||
}
|
||||
#endif
|
197
bayesnet/ensembles/Ensemble.cc
Normal file
197
bayesnet/ensembles/Ensemble.cc
Normal file
@@ -0,0 +1,197 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
#include "Ensemble.h"
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
Ensemble::Ensemble(bool predict_voting) : Classifier(Network()), n_models(0), predict_voting(predict_voting)
|
||||
{
|
||||
};
|
||||
const std::string ENSEMBLE_NOT_FITTED = "Ensemble has not been fitted";
|
||||
void Ensemble::trainModel(const torch::Tensor& weights, const Smoothing_t smoothing)
|
||||
{
|
||||
n_models = models.size();
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
// fit with std::vectors
|
||||
models[i]->fit(dataset, features, className, states, smoothing);
|
||||
}
|
||||
}
|
||||
std::vector<int> Ensemble::compute_arg_max(std::vector<std::vector<double>>& X)
|
||||
{
|
||||
std::vector<int> y_pred;
|
||||
for (auto i = 0; i < X.size(); ++i) {
|
||||
auto max = std::max_element(X[i].begin(), X[i].end());
|
||||
y_pred.push_back(std::distance(X[i].begin(), max));
|
||||
}
|
||||
return y_pred;
|
||||
}
|
||||
torch::Tensor Ensemble::compute_arg_max(torch::Tensor& X)
|
||||
{
|
||||
auto y_pred = torch::argmax(X, 1);
|
||||
return y_pred;
|
||||
}
|
||||
torch::Tensor Ensemble::voting(torch::Tensor& votes)
|
||||
{
|
||||
// Convert m x n_models tensor to a m x n_class_states with voting probabilities
|
||||
auto y_pred_ = votes.accessor<int, 2>();
|
||||
std::vector<int> y_pred_final;
|
||||
int numClasses = states.at(className).size();
|
||||
// votes is m x n_models with the prediction of every model for each sample
|
||||
auto result = torch::zeros({ votes.size(0), numClasses }, torch::kFloat32);
|
||||
auto sum = std::reduce(significanceModels.begin(), significanceModels.end());
|
||||
for (int i = 0; i < votes.size(0); ++i) {
|
||||
// n_votes store in each index (value of class) the significance added by each model
|
||||
// i.e. n_votes[0] contains how much value has the value 0 of class. That value is generated by the models predictions
|
||||
std::vector<double> n_votes(numClasses, 0.0);
|
||||
for (int j = 0; j < n_models; ++j) {
|
||||
n_votes[y_pred_[i][j]] += significanceModels.at(j);
|
||||
}
|
||||
result[i] = torch::tensor(n_votes);
|
||||
}
|
||||
// To only do one division and gain precision
|
||||
result /= sum;
|
||||
return result;
|
||||
}
|
||||
std::vector<std::vector<double>> Ensemble::predict_proba(std::vector<std::vector<int>>& X)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(ENSEMBLE_NOT_FITTED);
|
||||
}
|
||||
return predict_voting ? predict_average_voting(X) : predict_average_proba(X);
|
||||
}
|
||||
torch::Tensor Ensemble::predict_proba(torch::Tensor& X)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(ENSEMBLE_NOT_FITTED);
|
||||
}
|
||||
return predict_voting ? predict_average_voting(X) : predict_average_proba(X);
|
||||
}
|
||||
std::vector<int> Ensemble::predict(std::vector<std::vector<int>>& X)
|
||||
{
|
||||
auto res = predict_proba(X);
|
||||
return compute_arg_max(res);
|
||||
}
|
||||
torch::Tensor Ensemble::predict(torch::Tensor& X)
|
||||
{
|
||||
auto res = predict_proba(X);
|
||||
return compute_arg_max(res);
|
||||
}
|
||||
torch::Tensor Ensemble::predict_average_proba(torch::Tensor& X)
|
||||
{
|
||||
auto n_states = models[0]->getClassNumStates();
|
||||
torch::Tensor y_pred = torch::zeros({ X.size(1), n_states }, torch::kFloat32);
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
auto ypredict = models[i]->predict_proba(X);
|
||||
/*std::cout << "model " << i << " prediction: " << ypredict << " significance " << significanceModels[i] << std::endl;*/
|
||||
y_pred += ypredict * significanceModels[i];
|
||||
}
|
||||
auto sum = std::reduce(significanceModels.begin(), significanceModels.end());
|
||||
y_pred /= sum;
|
||||
return y_pred;
|
||||
}
|
||||
std::vector<std::vector<double>> Ensemble::predict_average_proba(std::vector<std::vector<int>>& X)
|
||||
{
|
||||
auto n_states = models[0]->getClassNumStates();
|
||||
std::vector<std::vector<double>> y_pred(X[0].size(), std::vector<double>(n_states, 0.0));
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
auto ypredict = models[i]->predict_proba(X);
|
||||
assert(ypredict.size() == y_pred.size());
|
||||
assert(ypredict[0].size() == y_pred[0].size());
|
||||
// Multiply each prediction by the significance of the model and then add it to the final prediction
|
||||
for (auto j = 0; j < ypredict.size(); ++j) {
|
||||
std::transform(y_pred[j].begin(), y_pred[j].end(), ypredict[j].begin(), y_pred[j].begin(),
|
||||
[significanceModels = significanceModels[i]](double x, double y) { return x + y * significanceModels; });
|
||||
}
|
||||
}
|
||||
auto sum = std::reduce(significanceModels.begin(), significanceModels.end());
|
||||
//Divide each element of the prediction by the sum of the significances
|
||||
for (auto j = 0; j < y_pred.size(); ++j) {
|
||||
std::transform(y_pred[j].begin(), y_pred[j].end(), y_pred[j].begin(), [sum](double x) { return x / sum; });
|
||||
}
|
||||
return y_pred;
|
||||
}
|
||||
std::vector<std::vector<double>> Ensemble::predict_average_voting(std::vector<std::vector<int>>& X)
|
||||
{
|
||||
torch::Tensor Xt = bayesnet::vectorToTensor(X, false);
|
||||
auto y_pred = predict_average_voting(Xt);
|
||||
std::vector<std::vector<double>> result = tensorToVectorDouble(y_pred);
|
||||
return result;
|
||||
}
|
||||
torch::Tensor Ensemble::predict_average_voting(torch::Tensor& X)
|
||||
{
|
||||
// Build a m x n_models tensor with the predictions of each model
|
||||
torch::Tensor y_pred = torch::zeros({ X.size(1), n_models }, torch::kInt32);
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
auto ypredict = models[i]->predict(X);
|
||||
y_pred.index_put_({ "...", i }, ypredict);
|
||||
}
|
||||
return voting(y_pred);
|
||||
}
|
||||
float Ensemble::score(torch::Tensor& X, torch::Tensor& y)
|
||||
{
|
||||
auto y_pred = predict(X);
|
||||
int correct = 0;
|
||||
for (int i = 0; i < y_pred.size(0); ++i) {
|
||||
if (y_pred[i].item<int>() == y[i].item<int>()) {
|
||||
correct++;
|
||||
}
|
||||
}
|
||||
return (double)correct / y_pred.size(0);
|
||||
}
|
||||
float Ensemble::score(std::vector<std::vector<int>>& X, std::vector<int>& y)
|
||||
{
|
||||
auto y_pred = predict(X);
|
||||
int correct = 0;
|
||||
for (int i = 0; i < y_pred.size(); ++i) {
|
||||
if (y_pred[i] == y[i]) {
|
||||
correct++;
|
||||
}
|
||||
}
|
||||
return (double)correct / y_pred.size();
|
||||
}
|
||||
std::vector<std::string> Ensemble::show() const
|
||||
{
|
||||
auto result = std::vector<std::string>();
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
auto res = models[i]->show();
|
||||
result.insert(result.end(), res.begin(), res.end());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
std::vector<std::string> Ensemble::graph(const std::string& title) const
|
||||
{
|
||||
auto result = std::vector<std::string>();
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
auto res = models[i]->graph(title + "_" + std::to_string(i));
|
||||
result.insert(result.end(), res.begin(), res.end());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
int Ensemble::getNumberOfNodes() const
|
||||
{
|
||||
int nodes = 0;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
nodes += models[i]->getNumberOfNodes();
|
||||
}
|
||||
return nodes;
|
||||
}
|
||||
int Ensemble::getNumberOfEdges() const
|
||||
{
|
||||
int edges = 0;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
edges += models[i]->getNumberOfEdges();
|
||||
}
|
||||
return edges;
|
||||
}
|
||||
int Ensemble::getNumberOfStates() const
|
||||
{
|
||||
int nstates = 0;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
nstates += models[i]->getNumberOfStates();
|
||||
}
|
||||
return nstates;
|
||||
}
|
||||
}
|
59
bayesnet/ensembles/Ensemble.h
Normal file
59
bayesnet/ensembles/Ensemble.h
Normal file
@@ -0,0 +1,59 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef ENSEMBLE_H
|
||||
#define ENSEMBLE_H
|
||||
#include <torch/torch.h>
|
||||
#include "bayesnet/utils/BayesMetrics.h"
|
||||
#include "bayesnet/utils/bayesnetUtils.h"
|
||||
#include "bayesnet/classifiers/Classifier.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class Ensemble : public Classifier {
|
||||
public:
|
||||
Ensemble(bool predict_voting = true);
|
||||
virtual ~Ensemble() = default;
|
||||
torch::Tensor predict(torch::Tensor& X) override;
|
||||
std::vector<int> predict(std::vector<std::vector<int>>& X) override;
|
||||
torch::Tensor predict_proba(torch::Tensor& X) override;
|
||||
std::vector<std::vector<double>> predict_proba(std::vector<std::vector<int>>& X) override;
|
||||
float score(torch::Tensor& X, torch::Tensor& y) override;
|
||||
float score(std::vector<std::vector<int>>& X, std::vector<int>& y) override;
|
||||
int getNumberOfNodes() const override;
|
||||
int getNumberOfEdges() const override;
|
||||
int getNumberOfStates() const override;
|
||||
std::vector<std::string> show() const override;
|
||||
std::vector<std::string> graph(const std::string& title) const override;
|
||||
std::vector<std::string> topological_order() override
|
||||
{
|
||||
return std::vector<std::string>();
|
||||
}
|
||||
std::string dump_cpt() const override
|
||||
{
|
||||
std::string output;
|
||||
for (auto& model : models) {
|
||||
output += model->dump_cpt();
|
||||
output += std::string(80, '-') + "\n";
|
||||
}
|
||||
return output;
|
||||
}
|
||||
protected:
|
||||
void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
|
||||
torch::Tensor predict_average_voting(torch::Tensor& X);
|
||||
std::vector<std::vector<double>> predict_average_voting(std::vector<std::vector<int>>& X);
|
||||
torch::Tensor predict_average_proba(torch::Tensor& X);
|
||||
std::vector<std::vector<double>> predict_average_proba(std::vector<std::vector<int>>& X);
|
||||
torch::Tensor compute_arg_max(torch::Tensor& X);
|
||||
std::vector<int> compute_arg_max(std::vector<std::vector<double>>& X);
|
||||
torch::Tensor voting(torch::Tensor& votes);
|
||||
// Attributes
|
||||
unsigned n_models;
|
||||
std::vector<std::unique_ptr<Classifier>> models;
|
||||
std::vector<double> significanceModels;
|
||||
bool predict_voting;
|
||||
};
|
||||
}
|
||||
#endif
|
168
bayesnet/ensembles/XBA2DE.cc
Normal file
168
bayesnet/ensembles/XBA2DE.cc
Normal file
@@ -0,0 +1,168 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include <folding.hpp>
|
||||
#include <limits.h>
|
||||
#include "XBA2DE.h"
|
||||
#include "bayesnet/classifiers/XSP2DE.h"
|
||||
#include "bayesnet/utils/TensorUtils.h"
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
XBA2DE::XBA2DE(bool predict_voting) : Boost(predict_voting) {}
|
||||
std::vector<int> XBA2DE::initializeModels(const Smoothing_t smoothing) {
|
||||
torch::Tensor weights_ = torch::full({m}, 1.0 / m, torch::kFloat64);
|
||||
std::vector<int> featuresSelected = featureSelection(weights_);
|
||||
if (featuresSelected.size() < 2) {
|
||||
notes.push_back("No features selected in initialization");
|
||||
status = ERROR;
|
||||
return std::vector<int>();
|
||||
}
|
||||
for (int i = 0; i < featuresSelected.size() - 1; i++) {
|
||||
for (int j = i + 1; j < featuresSelected.size(); j++) {
|
||||
std::unique_ptr<Classifier> model = std::make_unique<XSp2de>(featuresSelected[i], featuresSelected[j]);
|
||||
model->fit(dataset, features, className, states, weights_, smoothing);
|
||||
add_model(std::move(model), 1.0);
|
||||
}
|
||||
}
|
||||
notes.push_back("Used features in initialization: " + std::to_string(featuresSelected.size()) + " of " +
|
||||
std::to_string(features.size()) + " with " + select_features_algorithm);
|
||||
return featuresSelected;
|
||||
}
|
||||
void XBA2DE::trainModel(const torch::Tensor &weights, const Smoothing_t smoothing) {
|
||||
//
|
||||
// Logging setup
|
||||
//
|
||||
// loguru::set_thread_name("XBA2DE");
|
||||
// loguru::g_stderr_verbosity = loguru::Verbosity_OFF;
|
||||
// loguru::add_file("boostA2DE.log", loguru::Truncate, loguru::Verbosity_MAX);
|
||||
|
||||
// Algorithm based on the adaboost algorithm for classification
|
||||
// as explained in Ensemble methods (Zhi-Hua Zhou, 2012)
|
||||
X_train_ = TensorUtils::to_matrix(X_train);
|
||||
y_train_ = TensorUtils::to_vector<int>(y_train);
|
||||
if (convergence) {
|
||||
X_test_ = TensorUtils::to_matrix(X_test);
|
||||
y_test_ = TensorUtils::to_vector<int>(y_test);
|
||||
}
|
||||
fitted = true;
|
||||
double alpha_t = 0;
|
||||
torch::Tensor weights_ = torch::full({m}, 1.0 / m, torch::kFloat64);
|
||||
bool finished = false;
|
||||
std::vector<int> featuresUsed;
|
||||
if (selectFeatures) {
|
||||
featuresUsed = initializeModels(smoothing);
|
||||
if (featuresUsed.size() == 0) {
|
||||
return;
|
||||
}
|
||||
auto ypred = predict(X_train);
|
||||
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
|
||||
// Update significance of the models
|
||||
for (int i = 0; i < n_models; ++i) {
|
||||
significanceModels[i] = alpha_t;
|
||||
}
|
||||
if (finished) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
int numItemsPack = 0; // The counter of the models inserted in the current pack
|
||||
// Variables to control the accuracy finish condition
|
||||
double priorAccuracy = 0.0;
|
||||
double improvement = 1.0;
|
||||
double convergence_threshold = 1e-4;
|
||||
int tolerance = 0; // number of times the accuracy is lower than the convergence_threshold
|
||||
// Step 0: Set the finish condition
|
||||
// epsilon sub t > 0.5 => inverse the weights policy
|
||||
// validation error is not decreasing
|
||||
// run out of features
|
||||
bool ascending = order_algorithm == Orders.ASC;
|
||||
std::mt19937 g{173};
|
||||
std::vector<std::pair<int, int>> pairSelection;
|
||||
while (!finished) {
|
||||
// Step 1: Build ranking with mutual information
|
||||
pairSelection = metrics.SelectKPairs(weights_, featuresUsed, ascending, 0); // Get all the pairs sorted
|
||||
if (order_algorithm == Orders.RAND) {
|
||||
std::shuffle(pairSelection.begin(), pairSelection.end(), g);
|
||||
}
|
||||
int k = bisection ? pow(2, tolerance) : 1;
|
||||
int counter = 0; // The model counter of the current pack
|
||||
// VLOG_SCOPE_F(1, "counter=%d k=%d featureSelection.size: %zu", counter, k, featureSelection.size());
|
||||
while (counter++ < k && pairSelection.size() > 0) {
|
||||
auto feature_pair = pairSelection[0];
|
||||
pairSelection.erase(pairSelection.begin());
|
||||
std::unique_ptr<Classifier> model;
|
||||
model = std::make_unique<XSp2de>(feature_pair.first, feature_pair.second);
|
||||
model->fit(dataset, features, className, states, weights_, smoothing);
|
||||
alpha_t = 0.0;
|
||||
if (!block_update) {
|
||||
auto ypred = model->predict(X_train);
|
||||
// Step 3.1: Compute the classifier amout of say
|
||||
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
|
||||
}
|
||||
// Step 3.4: Store classifier and its accuracy to weigh its future vote
|
||||
numItemsPack++;
|
||||
models.push_back(std::move(model));
|
||||
significanceModels.push_back(alpha_t);
|
||||
n_models++;
|
||||
// VLOG_SCOPE_F(2, "numItemsPack: %d n_models: %d featuresUsed: %zu", numItemsPack, n_models,
|
||||
// featuresUsed.size());
|
||||
}
|
||||
if (block_update) {
|
||||
std::tie(weights_, alpha_t, finished) = update_weights_block(k, y_train, weights_);
|
||||
}
|
||||
if (convergence && !finished) {
|
||||
auto y_val_predict = predict(X_test);
|
||||
double accuracy = (y_val_predict == y_test).sum().item<double>() / (double)y_test.size(0);
|
||||
if (priorAccuracy == 0) {
|
||||
priorAccuracy = accuracy;
|
||||
} else {
|
||||
improvement = accuracy - priorAccuracy;
|
||||
}
|
||||
if (improvement < convergence_threshold) {
|
||||
// VLOG_SCOPE_F(3, " (improvement<threshold) tolerance: %d numItemsPack: %d improvement: %f prior: %f
|
||||
// current: %f", tolerance, numItemsPack, improvement, priorAccuracy, accuracy);
|
||||
tolerance++;
|
||||
} else {
|
||||
// VLOG_SCOPE_F(3, "* (improvement>=threshold) Reset. tolerance: %d numItemsPack: %d improvement: %f
|
||||
// prior: %f current: %f", tolerance, numItemsPack, improvement, priorAccuracy, accuracy);
|
||||
tolerance = 0; // Reset the counter if the model performs better
|
||||
numItemsPack = 0;
|
||||
}
|
||||
if (convergence_best) {
|
||||
// Keep the best accuracy until now as the prior accuracy
|
||||
priorAccuracy = std::max(accuracy, priorAccuracy);
|
||||
} else {
|
||||
// Keep the last accuray obtained as the prior accuracy
|
||||
priorAccuracy = accuracy;
|
||||
}
|
||||
}
|
||||
// VLOG_SCOPE_F(1, "tolerance: %d featuresUsed.size: %zu features.size: %zu", tolerance, featuresUsed.size(),
|
||||
// features.size());
|
||||
finished = finished || tolerance > maxTolerance || pairSelection.size() == 0;
|
||||
}
|
||||
if (tolerance > maxTolerance) {
|
||||
if (numItemsPack < n_models) {
|
||||
notes.push_back("Convergence threshold reached & " + std::to_string(numItemsPack) + " models eliminated");
|
||||
// VLOG_SCOPE_F(4, "Convergence threshold reached & %d models eliminated of %d", numItemsPack, n_models);
|
||||
for (int i = 0; i < numItemsPack; ++i) {
|
||||
significanceModels.pop_back();
|
||||
models.pop_back();
|
||||
n_models--;
|
||||
}
|
||||
} else {
|
||||
notes.push_back("Convergence threshold reached & 0 models eliminated");
|
||||
// VLOG_SCOPE_F(4, "Convergence threshold reached & 0 models eliminated n_models=%d numItemsPack=%d",
|
||||
// n_models, numItemsPack);
|
||||
}
|
||||
}
|
||||
if (pairSelection.size() > 0) {
|
||||
notes.push_back("Pairs not used in train: " + std::to_string(pairSelection.size()));
|
||||
status = WARNING;
|
||||
}
|
||||
notes.push_back("Number of models: " + std::to_string(n_models));
|
||||
}
|
||||
std::vector<std::string> XBA2DE::graph(const std::string &title) const { return Ensemble::graph(title); }
|
||||
} // namespace bayesnet
|
28
bayesnet/ensembles/XBA2DE.h
Normal file
28
bayesnet/ensembles/XBA2DE.h
Normal file
@@ -0,0 +1,28 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef XBA2DE_H
|
||||
#define XBA2DE_H
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "Boost.h"
|
||||
namespace bayesnet {
|
||||
class XBA2DE : public Boost {
|
||||
public:
|
||||
explicit XBA2DE(bool predict_voting = false);
|
||||
virtual ~XBA2DE() = default;
|
||||
std::vector<std::string> graph(const std::string& title = "XBA2DE") const override;
|
||||
std::string getVersion() override { return version; };
|
||||
protected:
|
||||
void trainModel(const torch::Tensor& weights, const Smoothing_t smoothing) override;
|
||||
private:
|
||||
std::vector<int> initializeModels(const Smoothing_t smoothing);
|
||||
std::vector<std::vector<int>> X_train_, X_test_;
|
||||
std::vector<int> y_train_, y_test_;
|
||||
std::string version = "0.9.7";
|
||||
};
|
||||
}
|
||||
#endif
|
184
bayesnet/ensembles/XBAODE.cc
Normal file
184
bayesnet/ensembles/XBAODE.cc
Normal file
@@ -0,0 +1,184 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
#include "XBAODE.h"
|
||||
#include "bayesnet/classifiers/XSPODE.h"
|
||||
#include "bayesnet/utils/TensorUtils.h"
|
||||
#include <limits.h>
|
||||
#include <random>
|
||||
#include <tuple>
|
||||
|
||||
namespace bayesnet
|
||||
{
|
||||
XBAODE::XBAODE() : Boost(false) {}
|
||||
std::vector<int> XBAODE::initializeModels(const Smoothing_t smoothing)
|
||||
{
|
||||
torch::Tensor weights_ = torch::full({m}, 1.0 / m, torch::kFloat64);
|
||||
std::vector<int> featuresSelected = featureSelection(weights_);
|
||||
for (const int &feature : featuresSelected) {
|
||||
std::unique_ptr<Classifier> model = std::make_unique<XSpode>(feature);
|
||||
model->fit(dataset, features, className, states, weights_, smoothing);
|
||||
add_model(std::move(model), 1.0);
|
||||
}
|
||||
notes.push_back("Used features in initialization: " + std::to_string(featuresSelected.size()) + " of " +
|
||||
std::to_string(features.size()) + " with " + select_features_algorithm);
|
||||
return featuresSelected;
|
||||
}
|
||||
void XBAODE::trainModel(const torch::Tensor &weights, const bayesnet::Smoothing_t smoothing)
|
||||
{
|
||||
X_train_ = TensorUtils::to_matrix(X_train);
|
||||
y_train_ = TensorUtils::to_vector<int>(y_train);
|
||||
if (convergence) {
|
||||
X_test_ = TensorUtils::to_matrix(X_test);
|
||||
y_test_ = TensorUtils::to_vector<int>(y_test);
|
||||
}
|
||||
fitted = true;
|
||||
double alpha_t;
|
||||
torch::Tensor weights_ = torch::full({m}, 1.0 / m, torch::kFloat64);
|
||||
bool finished = false;
|
||||
std::vector<int> featuresUsed;
|
||||
n_models = 0;
|
||||
if (selectFeatures) {
|
||||
featuresUsed = initializeModels(smoothing);
|
||||
auto ypred = predict(X_train_);
|
||||
auto ypred_t = torch::tensor(ypred);
|
||||
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred_t, weights_);
|
||||
// Update significance of the models
|
||||
for (const int &feature : featuresUsed) {
|
||||
significanceModels.pop_back();
|
||||
}
|
||||
for (const int &feature : featuresUsed) {
|
||||
significanceModels.push_back(alpha_t);
|
||||
}
|
||||
// VLOG_SCOPE_F(1, "SelectFeatures. alpha_t: %f n_models: %d", alpha_t,
|
||||
// n_models);
|
||||
if (finished) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
int numItemsPack = 0; // The counter of the models inserted in the current pack
|
||||
// Variables to control the accuracy finish condition
|
||||
double priorAccuracy = 0.0;
|
||||
double improvement = 1.0;
|
||||
double convergence_threshold = 1e-4;
|
||||
int tolerance = 0; // number of times the accuracy is lower than the convergence_threshold
|
||||
// Step 0: Set the finish condition
|
||||
// epsilon sub t > 0.5 => inverse the weights_ policy
|
||||
// validation error is not decreasing
|
||||
// run out of features
|
||||
bool ascending = order_algorithm == bayesnet::Orders.ASC;
|
||||
std::mt19937 g{173};
|
||||
while (!finished) {
|
||||
// Step 1: Build ranking with mutual information
|
||||
auto featureSelection = metrics.SelectKBestWeighted(weights_, ascending, n); // Get all the features sorted
|
||||
if (order_algorithm == bayesnet::Orders.RAND) {
|
||||
std::shuffle(featureSelection.begin(), featureSelection.end(), g);
|
||||
}
|
||||
// Remove used features
|
||||
featureSelection.erase(remove_if(featureSelection.begin(), featureSelection.end(),
|
||||
[&](auto x) {
|
||||
return std::find(featuresUsed.begin(), featuresUsed.end(), x) !=
|
||||
featuresUsed.end();
|
||||
}),
|
||||
featureSelection.end());
|
||||
int k = bisection ? pow(2, tolerance) : 1;
|
||||
int counter = 0; // The model counter of the current pack
|
||||
// VLOG_SCOPE_F(1, "counter=%d k=%d featureSelection.size: %zu", counter, k,
|
||||
// featureSelection.size());
|
||||
while (counter++ < k && featureSelection.size() > 0) {
|
||||
auto feature = featureSelection[0];
|
||||
featureSelection.erase(featureSelection.begin());
|
||||
std::unique_ptr<Classifier> model;
|
||||
model = std::make_unique<XSpode>(feature);
|
||||
model->fit(dataset, features, className, states, weights_, smoothing);
|
||||
/*dynamic_cast<XSpode*>(model.get())->fitx(X_train, y_train, weights_,
|
||||
* smoothing); // using exclusive XSpode fit method*/
|
||||
// DEBUG
|
||||
/*std::cout << dynamic_cast<XSpode*>(model.get())->to_string() <<
|
||||
* std::endl;*/
|
||||
// DEBUG
|
||||
std::vector<int> ypred;
|
||||
if (alpha_block) {
|
||||
//
|
||||
// Compute the prediction with the current ensemble + model
|
||||
//
|
||||
// Add the model to the ensemble
|
||||
add_model(std::move(model), 1.0);
|
||||
// Compute the prediction
|
||||
ypred = predict(X_train_);
|
||||
model = std::move(models.back());
|
||||
// Remove the model from the ensemble
|
||||
remove_last_model();
|
||||
} else {
|
||||
ypred = model->predict(X_train_);
|
||||
}
|
||||
// Step 3.1: Compute the classifier amout of say
|
||||
auto ypred_t = torch::tensor(ypred);
|
||||
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred_t, weights_);
|
||||
// Step 3.4: Store classifier and its accuracy to weigh its future vote
|
||||
numItemsPack++;
|
||||
featuresUsed.push_back(feature);
|
||||
add_model(std::move(model), alpha_t);
|
||||
// VLOG_SCOPE_F(2, "finished: %d numItemsPack: %d n_models: %d
|
||||
// featuresUsed: %zu", finished, numItemsPack, n_models,
|
||||
// featuresUsed.size());
|
||||
} // End of the pack
|
||||
if (convergence && !finished) {
|
||||
auto y_val_predict = predict(X_test);
|
||||
double accuracy = (y_val_predict == y_test).sum().item<double>() / (double)y_test.size(0);
|
||||
if (priorAccuracy == 0) {
|
||||
priorAccuracy = accuracy;
|
||||
} else {
|
||||
improvement = accuracy - priorAccuracy;
|
||||
}
|
||||
if (improvement < convergence_threshold) {
|
||||
// VLOG_SCOPE_F(3, " (improvement<threshold) tolerance: %d
|
||||
// numItemsPack: %d improvement: %f prior: %f current: %f", tolerance,
|
||||
// numItemsPack, improvement, priorAccuracy, accuracy);
|
||||
tolerance++;
|
||||
} else {
|
||||
// VLOG_SCOPE_F(3, "* (improvement>=threshold) Reset. tolerance: %d
|
||||
// numItemsPack: %d improvement: %f prior: %f current: %f", tolerance,
|
||||
// numItemsPack, improvement, priorAccuracy, accuracy);
|
||||
tolerance = 0; // Reset the counter if the model performs better
|
||||
numItemsPack = 0;
|
||||
}
|
||||
if (convergence_best) {
|
||||
// Keep the best accuracy until now as the prior accuracy
|
||||
priorAccuracy = std::max(accuracy, priorAccuracy);
|
||||
} else {
|
||||
// Keep the last accuray obtained as the prior accuracy
|
||||
priorAccuracy = accuracy;
|
||||
}
|
||||
}
|
||||
// VLOG_SCOPE_F(1, "tolerance: %d featuresUsed.size: %zu features.size:
|
||||
// %zu", tolerance, featuresUsed.size(), features.size());
|
||||
finished = finished || tolerance > maxTolerance || featuresUsed.size() == features.size();
|
||||
}
|
||||
if (tolerance > maxTolerance) {
|
||||
if (numItemsPack < n_models) {
|
||||
notes.push_back("Convergence threshold reached & " + std::to_string(numItemsPack) + " models eliminated");
|
||||
// VLOG_SCOPE_F(4, "Convergence threshold reached & %d models eliminated
|
||||
// of %d", numItemsPack, n_models);
|
||||
for (int i = featuresUsed.size() - 1; i >= featuresUsed.size() - numItemsPack; --i) {
|
||||
remove_last_model();
|
||||
}
|
||||
// VLOG_SCOPE_F(4, "*Convergence threshold %d models left & %d features
|
||||
// used.", n_models, featuresUsed.size());
|
||||
} else {
|
||||
notes.push_back("Convergence threshold reached & 0 models eliminated");
|
||||
// VLOG_SCOPE_F(4, "Convergence threshold reached & 0 models eliminated
|
||||
// n_models=%d numItemsPack=%d", n_models, numItemsPack);
|
||||
}
|
||||
}
|
||||
if (featuresUsed.size() != features.size()) {
|
||||
notes.push_back("Used features in train: " + std::to_string(featuresUsed.size()) + " of " +
|
||||
std::to_string(features.size()));
|
||||
status = bayesnet::WARNING;
|
||||
}
|
||||
notes.push_back("Number of models: " + std::to_string(n_models));
|
||||
return;
|
||||
}
|
||||
} // namespace bayesnet
|
27
bayesnet/ensembles/XBAODE.h
Normal file
27
bayesnet/ensembles/XBAODE.h
Normal file
@@ -0,0 +1,27 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef XBAODE_H
|
||||
#define XBAODE_H
|
||||
#include <vector>
|
||||
#include <cmath>
|
||||
#include "Boost.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class XBAODE : public Boost {
|
||||
public:
|
||||
XBAODE();
|
||||
std::string getVersion() override { return version; };
|
||||
protected:
|
||||
void trainModel(const torch::Tensor& weights, const bayesnet::Smoothing_t smoothing) override;
|
||||
private:
|
||||
std::vector<int> initializeModels(const Smoothing_t smoothing);
|
||||
std::vector<std::vector<int>> X_train_, X_test_;
|
||||
std::vector<int> y_train_, y_test_;
|
||||
std::string version = "0.9.7";
|
||||
};
|
||||
}
|
||||
#endif // XBAODE_H
|
78
bayesnet/feature_selection/CFS.cc
Normal file
78
bayesnet/feature_selection/CFS.cc
Normal file
@@ -0,0 +1,78 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include <limits>
|
||||
#include "bayesnet/utils/bayesnetUtils.h"
|
||||
#include "CFS.h"
|
||||
namespace bayesnet {
|
||||
void CFS::fit()
|
||||
{
|
||||
initialize();
|
||||
computeSuLabels();
|
||||
auto featureOrder = argsort(suLabels); // sort descending order
|
||||
auto continueCondition = true;
|
||||
auto feature = featureOrder[0];
|
||||
selectedFeatures.push_back(feature);
|
||||
selectedScores.push_back(suLabels[feature]);
|
||||
featureOrder.erase(featureOrder.begin());
|
||||
while (continueCondition) {
|
||||
double merit = std::numeric_limits<double>::lowest();
|
||||
int bestFeature = -1;
|
||||
for (auto feature : featureOrder) {
|
||||
selectedFeatures.push_back(feature);
|
||||
// Compute merit with selectedFeatures
|
||||
auto meritNew = computeMeritCFS();
|
||||
if (meritNew > merit) {
|
||||
merit = meritNew;
|
||||
bestFeature = feature;
|
||||
}
|
||||
selectedFeatures.pop_back();
|
||||
}
|
||||
if (bestFeature == -1) {
|
||||
// meritNew has to be nan due to constant features
|
||||
break;
|
||||
}
|
||||
selectedFeatures.push_back(bestFeature);
|
||||
selectedScores.push_back(merit);
|
||||
featureOrder.erase(remove(featureOrder.begin(), featureOrder.end(), bestFeature), featureOrder.end());
|
||||
continueCondition = computeContinueCondition(featureOrder);
|
||||
}
|
||||
fitted = true;
|
||||
}
|
||||
bool CFS::computeContinueCondition(const std::vector<int>& featureOrder)
|
||||
{
|
||||
if (selectedFeatures.size() == maxFeatures || featureOrder.size() == 0) {
|
||||
return false;
|
||||
}
|
||||
if (selectedScores.size() >= 5) {
|
||||
/*
|
||||
"To prevent the best first search from exploring the entire
|
||||
feature subset search space, a stopping criterion is imposed.
|
||||
The search will terminate if five consecutive fully expanded
|
||||
subsets show no improvement over the current best subset."
|
||||
as stated in Mark A.Hall Thesis
|
||||
*/
|
||||
double item_ant = std::numeric_limits<double>::lowest();
|
||||
int num = 0;
|
||||
std::vector<double> lastFive(selectedScores.end() - 5, selectedScores.end());
|
||||
for (auto item : lastFive) {
|
||||
if (item_ant == std::numeric_limits<double>::lowest()) {
|
||||
item_ant = item;
|
||||
}
|
||||
if (item > item_ant) {
|
||||
break;
|
||||
} else {
|
||||
num++;
|
||||
item_ant = item;
|
||||
}
|
||||
}
|
||||
if (num == 5) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
26
bayesnet/feature_selection/CFS.h
Normal file
26
bayesnet/feature_selection/CFS.h
Normal file
@@ -0,0 +1,26 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef CFS_H
|
||||
#define CFS_H
|
||||
#include <torch/torch.h>
|
||||
#include <vector>
|
||||
#include "bayesnet/feature_selection/FeatureSelect.h"
|
||||
namespace bayesnet {
|
||||
class CFS : public FeatureSelect {
|
||||
public:
|
||||
// dataset is a n+1xm tensor of integers where dataset[-1] is the y std::vector
|
||||
CFS(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights) :
|
||||
FeatureSelect(samples, features, className, maxFeatures, classNumStates, weights)
|
||||
{
|
||||
}
|
||||
virtual ~CFS() {};
|
||||
void fit() override;
|
||||
private:
|
||||
bool computeContinueCondition(const std::vector<int>& featureOrder);
|
||||
};
|
||||
}
|
||||
#endif
|
50
bayesnet/feature_selection/FCBF.cc
Normal file
50
bayesnet/feature_selection/FCBF.cc
Normal file
@@ -0,0 +1,50 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include "bayesnet/utils/bayesnetUtils.h"
|
||||
#include "FCBF.h"
|
||||
namespace bayesnet {
|
||||
|
||||
FCBF::FCBF(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights, const double threshold) :
|
||||
FeatureSelect(samples, features, className, maxFeatures, classNumStates, weights), threshold(threshold)
|
||||
{
|
||||
if (threshold < 1e-7) {
|
||||
throw std::invalid_argument("Threshold cannot be less than 1e-7");
|
||||
}
|
||||
}
|
||||
void FCBF::fit()
|
||||
{
|
||||
initialize();
|
||||
computeSuLabels();
|
||||
auto featureOrder = argsort(suLabels); // sort descending order
|
||||
auto featureOrderCopy = featureOrder;
|
||||
for (const auto& feature : featureOrder) {
|
||||
// Don't self compare
|
||||
featureOrderCopy.erase(featureOrderCopy.begin());
|
||||
if (suLabels.at(feature) == 0.0) {
|
||||
// The feature has been removed from the list
|
||||
continue;
|
||||
}
|
||||
if (suLabels.at(feature) < threshold) {
|
||||
break;
|
||||
}
|
||||
// Remove redundant features
|
||||
for (const auto& featureCopy : featureOrderCopy) {
|
||||
double value = computeSuFeatures(feature, featureCopy);
|
||||
if (value >= suLabels.at(featureCopy)) {
|
||||
// Remove feature from list
|
||||
suLabels[featureCopy] = 0.0;
|
||||
}
|
||||
}
|
||||
selectedFeatures.push_back(feature);
|
||||
selectedScores.push_back(suLabels[feature]);
|
||||
if (selectedFeatures.size() == maxFeatures) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
fitted = true;
|
||||
}
|
||||
}
|
23
bayesnet/feature_selection/FCBF.h
Normal file
23
bayesnet/feature_selection/FCBF.h
Normal file
@@ -0,0 +1,23 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef FCBF_H
|
||||
#define FCBF_H
|
||||
#include <torch/torch.h>
|
||||
#include <vector>
|
||||
#include "bayesnet/feature_selection/FeatureSelect.h"
|
||||
namespace bayesnet {
|
||||
class FCBF : public FeatureSelect {
|
||||
public:
|
||||
// dataset is a n+1xm tensor of integers where dataset[-1] is the y std::vector
|
||||
FCBF(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights, const double threshold);
|
||||
virtual ~FCBF() {};
|
||||
void fit() override;
|
||||
private:
|
||||
double threshold = -1;
|
||||
};
|
||||
}
|
||||
#endif
|
84
bayesnet/feature_selection/FeatureSelect.cc
Normal file
84
bayesnet/feature_selection/FeatureSelect.cc
Normal file
@@ -0,0 +1,84 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include <limits>
|
||||
#include "bayesnet/utils/bayesnetUtils.h"
|
||||
#include "FeatureSelect.h"
|
||||
namespace bayesnet {
|
||||
FeatureSelect::FeatureSelect(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights) :
|
||||
Metrics(samples, features, className, classNumStates), maxFeatures(maxFeatures == 0 ? samples.size(0) - 1 : maxFeatures), weights(weights)
|
||||
|
||||
{
|
||||
}
|
||||
void FeatureSelect::initialize()
|
||||
{
|
||||
selectedFeatures.clear();
|
||||
selectedScores.clear();
|
||||
}
|
||||
double FeatureSelect::symmetricalUncertainty(int a, int b)
|
||||
{
|
||||
/*
|
||||
Compute symmetrical uncertainty. Normalize* information gain (mutual
|
||||
information) with the entropies of the features in order to compensate
|
||||
the bias due to high cardinality features. *Range [0, 1]
|
||||
(https://www.sciencedirect.com/science/article/pii/S0020025519303603)
|
||||
*/
|
||||
auto x = samples.index({ a, "..." });
|
||||
auto y = samples.index({ b, "..." });
|
||||
auto mu = mutualInformation(x, y, weights);
|
||||
auto hx = entropy(x, weights);
|
||||
auto hy = entropy(y, weights);
|
||||
return 2.0 * mu / (hx + hy);
|
||||
}
|
||||
void FeatureSelect::computeSuLabels()
|
||||
{
|
||||
// Compute Simmetrical Uncertainty between features and labels
|
||||
// https://en.wikipedia.org/wiki/Symmetric_uncertainty
|
||||
for (int i = 0; i < features.size(); ++i) {
|
||||
suLabels.push_back(symmetricalUncertainty(i, -1));
|
||||
}
|
||||
}
|
||||
double FeatureSelect::computeSuFeatures(const int firstFeature, const int secondFeature)
|
||||
{
|
||||
// Compute Simmetrical Uncertainty between features
|
||||
// https://en.wikipedia.org/wiki/Symmetric_uncertainty
|
||||
try {
|
||||
return suFeatures.at({ firstFeature, secondFeature });
|
||||
}
|
||||
catch (const std::out_of_range& e) {
|
||||
double result = symmetricalUncertainty(firstFeature, secondFeature);
|
||||
suFeatures[{firstFeature, secondFeature}] = result;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
double FeatureSelect::computeMeritCFS()
|
||||
{
|
||||
double rcf = 0;
|
||||
for (auto feature : selectedFeatures) {
|
||||
rcf += suLabels[feature];
|
||||
}
|
||||
double rff = 0;
|
||||
int n = selectedFeatures.size();
|
||||
for (const auto& item : doCombinations(selectedFeatures)) {
|
||||
rff += computeSuFeatures(item.first, item.second);
|
||||
}
|
||||
return rcf / sqrt(n + (n * n - n) * rff);
|
||||
}
|
||||
std::vector<int> FeatureSelect::getFeatures() const
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::runtime_error("FeatureSelect not fitted");
|
||||
}
|
||||
return selectedFeatures;
|
||||
}
|
||||
std::vector<double> FeatureSelect::getScores() const
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::runtime_error("FeatureSelect not fitted");
|
||||
}
|
||||
return selectedScores;
|
||||
}
|
||||
}
|
36
bayesnet/feature_selection/FeatureSelect.h
Normal file
36
bayesnet/feature_selection/FeatureSelect.h
Normal file
@@ -0,0 +1,36 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef FEATURE_SELECT_H
|
||||
#define FEATURE_SELECT_H
|
||||
#include <torch/torch.h>
|
||||
#include <vector>
|
||||
#include "bayesnet/utils/BayesMetrics.h"
|
||||
namespace bayesnet {
|
||||
class FeatureSelect : public Metrics {
|
||||
public:
|
||||
// dataset is a n+1xm tensor of integers where dataset[-1] is the y std::vector
|
||||
FeatureSelect(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights);
|
||||
virtual ~FeatureSelect() {};
|
||||
virtual void fit() = 0;
|
||||
std::vector<int> getFeatures() const;
|
||||
std::vector<double> getScores() const;
|
||||
protected:
|
||||
void initialize();
|
||||
void computeSuLabels();
|
||||
double computeSuFeatures(const int a, const int b);
|
||||
double symmetricalUncertainty(int a, int b);
|
||||
double computeMeritCFS();
|
||||
const torch::Tensor& weights;
|
||||
int maxFeatures;
|
||||
std::vector<int> selectedFeatures;
|
||||
std::vector<double> selectedScores;
|
||||
std::vector<double> suLabels;
|
||||
std::map<std::pair<int, int>, double> suFeatures;
|
||||
bool fitted = false;
|
||||
};
|
||||
}
|
||||
#endif
|
53
bayesnet/feature_selection/IWSS.cc
Normal file
53
bayesnet/feature_selection/IWSS.cc
Normal file
@@ -0,0 +1,53 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include <limits>
|
||||
#include "bayesnet/utils/bayesnetUtils.h"
|
||||
#include "IWSS.h"
|
||||
namespace bayesnet {
|
||||
IWSS::IWSS(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights, const double threshold) :
|
||||
FeatureSelect(samples, features, className, maxFeatures, classNumStates, weights), threshold(threshold)
|
||||
{
|
||||
if (threshold < 0 || threshold > .5) {
|
||||
throw std::invalid_argument("Threshold has to be in [0, 0.5]");
|
||||
}
|
||||
}
|
||||
void IWSS::fit()
|
||||
{
|
||||
initialize();
|
||||
computeSuLabels();
|
||||
auto featureOrder = argsort(suLabels); // sort descending order
|
||||
auto featureOrderCopy = featureOrder;
|
||||
// Add first and second features to result
|
||||
// First with its own score
|
||||
auto first_feature = pop_first(featureOrderCopy);
|
||||
selectedFeatures.push_back(first_feature);
|
||||
selectedScores.push_back(suLabels.at(first_feature));
|
||||
// Second with the score of the candidates
|
||||
selectedFeatures.push_back(pop_first(featureOrderCopy));
|
||||
auto merit = computeMeritCFS();
|
||||
selectedScores.push_back(merit);
|
||||
for (const auto feature : featureOrderCopy) {
|
||||
selectedFeatures.push_back(feature);
|
||||
// Compute merit with selectedFeatures
|
||||
auto meritNew = computeMeritCFS();
|
||||
double delta = merit != 0.0 ? std::abs(merit - meritNew) / merit : 0.0;
|
||||
if (meritNew > merit || delta < threshold) {
|
||||
if (meritNew > merit) {
|
||||
merit = meritNew;
|
||||
}
|
||||
selectedScores.push_back(meritNew);
|
||||
} else {
|
||||
selectedFeatures.pop_back();
|
||||
break;
|
||||
}
|
||||
if (selectedFeatures.size() == maxFeatures) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
fitted = true;
|
||||
}
|
||||
}
|
23
bayesnet/feature_selection/IWSS.h
Normal file
23
bayesnet/feature_selection/IWSS.h
Normal file
@@ -0,0 +1,23 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef IWSS_H
|
||||
#define IWSS_H
|
||||
#include <vector>
|
||||
#include <torch/torch.h>
|
||||
#include "FeatureSelect.h"
|
||||
namespace bayesnet {
|
||||
class IWSS : public FeatureSelect {
|
||||
public:
|
||||
// dataset is a n+1xm tensor of integers where dataset[-1] is the y std::vector
|
||||
IWSS(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights, const double threshold);
|
||||
virtual ~IWSS() {};
|
||||
void fit() override;
|
||||
private:
|
||||
double threshold = -1;
|
||||
};
|
||||
}
|
||||
#endif
|
506
bayesnet/network/Network.cc
Normal file
506
bayesnet/network/Network.cc
Normal file
@@ -0,0 +1,506 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include <thread>
|
||||
#include <sstream>
|
||||
#include <numeric>
|
||||
#include <algorithm>
|
||||
#include "Network.h"
|
||||
#include "bayesnet/utils/bayesnetUtils.h"
|
||||
#include "bayesnet/utils/CountingSemaphore.h"
|
||||
#include <pthread.h>
|
||||
#include <fstream>
|
||||
namespace bayesnet {
|
||||
Network::Network() : fitted{ false }, classNumStates{ 0 }
|
||||
{
|
||||
}
|
||||
Network::Network(const Network& other) : features(other.features), className(other.className), classNumStates(other.getClassNumStates()),
|
||||
fitted(other.fitted), samples(other.samples)
|
||||
{
|
||||
if (samples.defined())
|
||||
samples = samples.clone();
|
||||
for (const auto& node : other.nodes) {
|
||||
nodes[node.first] = std::make_unique<Node>(*node.second);
|
||||
}
|
||||
}
|
||||
void Network::initialize()
|
||||
{
|
||||
features.clear();
|
||||
className = "";
|
||||
classNumStates = 0;
|
||||
fitted = false;
|
||||
nodes.clear();
|
||||
samples = torch::Tensor();
|
||||
}
|
||||
torch::Tensor& Network::getSamples()
|
||||
{
|
||||
return samples;
|
||||
}
|
||||
void Network::addNode(const std::string& name)
|
||||
{
|
||||
if (fitted) {
|
||||
throw std::invalid_argument("Cannot add node to a fitted network. Initialize first.");
|
||||
}
|
||||
if (name == "") {
|
||||
throw std::invalid_argument("Node name cannot be empty");
|
||||
}
|
||||
if (nodes.find(name) != nodes.end()) {
|
||||
return;
|
||||
}
|
||||
if (find(features.begin(), features.end(), name) == features.end()) {
|
||||
features.push_back(name);
|
||||
}
|
||||
nodes[name] = std::make_unique<Node>(name);
|
||||
}
|
||||
std::vector<std::string> Network::getFeatures() const
|
||||
{
|
||||
return features;
|
||||
}
|
||||
int Network::getClassNumStates() const
|
||||
{
|
||||
return classNumStates;
|
||||
}
|
||||
int Network::getStates() const
|
||||
{
|
||||
int result = 0;
|
||||
for (auto& node : nodes) {
|
||||
result += node.second->getNumStates();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
std::string Network::getClassName() const
|
||||
{
|
||||
return className;
|
||||
}
|
||||
bool Network::isCyclic(const std::string& nodeId, std::unordered_set<std::string>& visited, std::unordered_set<std::string>& recStack)
|
||||
{
|
||||
if (visited.find(nodeId) == visited.end()) // if node hasn't been visited yet
|
||||
{
|
||||
visited.insert(nodeId);
|
||||
recStack.insert(nodeId);
|
||||
for (Node* child : nodes[nodeId]->getChildren()) {
|
||||
if (visited.find(child->getName()) == visited.end() && isCyclic(child->getName(), visited, recStack))
|
||||
return true;
|
||||
if (recStack.find(child->getName()) != recStack.end())
|
||||
return true;
|
||||
}
|
||||
}
|
||||
recStack.erase(nodeId); // remove node from recursion stack before function ends
|
||||
return false;
|
||||
}
|
||||
void Network::addEdge(const std::string& parent, const std::string& child)
|
||||
{
|
||||
if (fitted) {
|
||||
throw std::invalid_argument("Cannot add edge to a fitted network. Initialize first.");
|
||||
}
|
||||
if (nodes.find(parent) == nodes.end()) {
|
||||
throw std::invalid_argument("Parent node " + parent + " does not exist");
|
||||
}
|
||||
if (nodes.find(child) == nodes.end()) {
|
||||
throw std::invalid_argument("Child node " + child + " does not exist");
|
||||
}
|
||||
// Check if the edge is already in the graph
|
||||
for (auto& node : nodes[parent]->getChildren()) {
|
||||
if (node->getName() == child) {
|
||||
throw std::invalid_argument("Edge " + parent + " -> " + child + " already exists");
|
||||
}
|
||||
}
|
||||
// Temporarily add edge to check for cycles
|
||||
nodes[parent]->addChild(nodes[child].get());
|
||||
nodes[child]->addParent(nodes[parent].get());
|
||||
std::unordered_set<std::string> visited;
|
||||
std::unordered_set<std::string> recStack;
|
||||
if (isCyclic(nodes[child]->getName(), visited, recStack)) // if adding this edge forms a cycle
|
||||
{
|
||||
// remove problematic edge
|
||||
nodes[parent]->removeChild(nodes[child].get());
|
||||
nodes[child]->removeParent(nodes[parent].get());
|
||||
throw std::invalid_argument("Adding this edge forms a cycle in the graph.");
|
||||
}
|
||||
}
|
||||
std::map<std::string, std::unique_ptr<Node>>& Network::getNodes()
|
||||
{
|
||||
return nodes;
|
||||
}
|
||||
void Network::checkFitData(int n_samples, int n_features, int n_samples_y, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights)
|
||||
{
|
||||
if (weights.size(0) != n_samples) {
|
||||
throw std::invalid_argument("Weights (" + std::to_string(weights.size(0)) + ") must have the same number of elements as samples (" + std::to_string(n_samples) + ") in Network::fit");
|
||||
}
|
||||
if (n_samples != n_samples_y) {
|
||||
throw std::invalid_argument("X and y must have the same number of samples in Network::fit (" + std::to_string(n_samples) + " != " + std::to_string(n_samples_y) + ")");
|
||||
}
|
||||
if (n_features != featureNames.size()) {
|
||||
throw std::invalid_argument("X and features must have the same number of features in Network::fit (" + std::to_string(n_features) + " != " + std::to_string(featureNames.size()) + ")");
|
||||
}
|
||||
if (features.size() == 0) {
|
||||
throw std::invalid_argument("The network has not been initialized. You must call addNode() before calling fit()");
|
||||
}
|
||||
if (n_features != features.size() - 1) {
|
||||
throw std::invalid_argument("X and local features must have the same number of features in Network::fit (" + std::to_string(n_features) + " != " + std::to_string(features.size() - 1) + ")");
|
||||
}
|
||||
if (find(features.begin(), features.end(), className) == features.end()) {
|
||||
throw std::invalid_argument("Class Name not found in Network::features");
|
||||
}
|
||||
for (auto& feature : featureNames) {
|
||||
if (find(features.begin(), features.end(), feature) == features.end()) {
|
||||
throw std::invalid_argument("Feature " + feature + " not found in Network::features");
|
||||
}
|
||||
if (states.find(feature) == states.end()) {
|
||||
throw std::invalid_argument("Feature " + feature + " not found in states");
|
||||
}
|
||||
}
|
||||
}
|
||||
void Network::setStates(const std::map<std::string, std::vector<int>>& states)
|
||||
{
|
||||
// Set states to every Node in the network
|
||||
for_each(features.begin(), features.end(), [this, &states](const std::string& feature) {
|
||||
nodes.at(feature)->setNumStates(states.at(feature).size());
|
||||
});
|
||||
classNumStates = nodes.at(className)->getNumStates();
|
||||
}
|
||||
// X comes in nxm, where n is the number of features and m the number of samples
|
||||
void Network::fit(const torch::Tensor& X, const torch::Tensor& y, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing)
|
||||
{
|
||||
checkFitData(X.size(1), X.size(0), y.size(0), featureNames, className, states, weights);
|
||||
this->className = className;
|
||||
torch::Tensor ytmp = torch::transpose(y.view({ y.size(0), 1 }), 0, 1);
|
||||
samples = torch::cat({ X , ytmp }, 0);
|
||||
for (int i = 0; i < featureNames.size(); ++i) {
|
||||
auto row_feature = X.index({ i, "..." });
|
||||
}
|
||||
completeFit(states, weights, smoothing);
|
||||
}
|
||||
void Network::fit(const torch::Tensor& samples, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing)
|
||||
{
|
||||
checkFitData(samples.size(1), samples.size(0) - 1, samples.size(1), featureNames, className, states, weights);
|
||||
this->className = className;
|
||||
this->samples = samples;
|
||||
completeFit(states, weights, smoothing);
|
||||
}
|
||||
// input_data comes in nxm, where n is the number of features and m the number of samples
|
||||
void Network::fit(const std::vector<std::vector<int>>& input_data, const std::vector<int>& labels, const std::vector<double>& weights_, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing)
|
||||
{
|
||||
const torch::Tensor weights = torch::tensor(weights_, torch::kFloat64);
|
||||
checkFitData(input_data[0].size(), input_data.size(), labels.size(), featureNames, className, states, weights);
|
||||
this->className = className;
|
||||
// Build tensor of samples (nxm) (n+1 because of the class)
|
||||
samples = torch::zeros({ static_cast<int>(input_data.size() + 1), static_cast<int>(input_data[0].size()) }, torch::kInt32);
|
||||
for (int i = 0; i < featureNames.size(); ++i) {
|
||||
samples.index_put_({ i, "..." }, torch::tensor(input_data[i], torch::kInt32));
|
||||
}
|
||||
samples.index_put_({ -1, "..." }, torch::tensor(labels, torch::kInt32));
|
||||
completeFit(states, weights, smoothing);
|
||||
}
|
||||
void Network::completeFit(const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing)
|
||||
{
|
||||
setStates(states);
|
||||
std::vector<std::thread> threads;
|
||||
auto& semaphore = CountingSemaphore::getInstance();
|
||||
const double n_samples = static_cast<double>(samples.size(1));
|
||||
auto worker = [&](std::pair<const std::string, std::unique_ptr<Node>>& node, int i) {
|
||||
std::string threadName = "FitWorker-" + std::to_string(i);
|
||||
#if defined(__linux__)
|
||||
pthread_setname_np(pthread_self(), threadName.c_str());
|
||||
#else
|
||||
pthread_setname_np(threadName.c_str());
|
||||
#endif
|
||||
double numStates = static_cast<double>(node.second->getNumStates());
|
||||
double smoothing_factor;
|
||||
switch (smoothing) {
|
||||
case Smoothing_t::ORIGINAL:
|
||||
smoothing_factor = 1.0 / n_samples;
|
||||
break;
|
||||
case Smoothing_t::LAPLACE:
|
||||
smoothing_factor = 1.0;
|
||||
break;
|
||||
case Smoothing_t::CESTNIK:
|
||||
smoothing_factor = 1 / numStates;
|
||||
break;
|
||||
default:
|
||||
smoothing_factor = 0.0; // No smoothing
|
||||
}
|
||||
node.second->computeCPT(samples, features, smoothing_factor, weights);
|
||||
semaphore.release();
|
||||
};
|
||||
int i = 0;
|
||||
for (auto& node : nodes) {
|
||||
semaphore.acquire();
|
||||
threads.emplace_back(worker, std::ref(node), i++);
|
||||
}
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
fitted = true;
|
||||
}
|
||||
torch::Tensor Network::predict_tensor(const torch::Tensor& samples, const bool proba)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error("You must call fit() before calling predict()");
|
||||
}
|
||||
// Ensure the sample size is equal to the number of features
|
||||
if (samples.size(0) != features.size() - 1) {
|
||||
throw std::invalid_argument("(T) Sample size (" + std::to_string(samples.size(0)) +
|
||||
") does not match the number of features (" + std::to_string(features.size() - 1) + ")");
|
||||
}
|
||||
torch::Tensor result;
|
||||
std::vector<std::thread> threads;
|
||||
std::mutex mtx;
|
||||
auto& semaphore = CountingSemaphore::getInstance();
|
||||
result = torch::zeros({ samples.size(1), classNumStates }, torch::kFloat64);
|
||||
auto worker = [&](const torch::Tensor& sample, int i) {
|
||||
std::string threadName = "PredictWorker-" + std::to_string(i);
|
||||
#if defined(__linux__)
|
||||
pthread_setname_np(pthread_self(), threadName.c_str());
|
||||
#else
|
||||
pthread_setname_np(threadName.c_str());
|
||||
#endif
|
||||
auto psample = predict_sample(sample);
|
||||
auto temp = torch::tensor(psample, torch::kFloat64);
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mtx);
|
||||
result.index_put_({ i, "..." }, temp);
|
||||
}
|
||||
semaphore.release();
|
||||
};
|
||||
for (int i = 0; i < samples.size(1); ++i) {
|
||||
semaphore.acquire();
|
||||
const torch::Tensor sample = samples.index({ "...", i });
|
||||
threads.emplace_back(worker, sample, i);
|
||||
}
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
if (proba)
|
||||
return result;
|
||||
return result.argmax(1);
|
||||
}
|
||||
// Return mxn tensor of probabilities
|
||||
torch::Tensor Network::predict_proba(const torch::Tensor& samples)
|
||||
{
|
||||
return predict_tensor(samples, true);
|
||||
}
|
||||
|
||||
// Return mxn tensor of probabilities
|
||||
torch::Tensor Network::predict(const torch::Tensor& samples)
|
||||
{
|
||||
return predict_tensor(samples, false);
|
||||
}
|
||||
|
||||
// Return mx1 std::vector of predictions
|
||||
// tsamples is nxm std::vector of samples
|
||||
std::vector<int> Network::predict(const std::vector<std::vector<int>>& tsamples)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error("You must call fit() before calling predict()");
|
||||
}
|
||||
// Ensure the sample size is equal to the number of features
|
||||
if (tsamples.size() != features.size() - 1) {
|
||||
throw std::invalid_argument("(V) Sample size (" + std::to_string(tsamples.size()) +
|
||||
") does not match the number of features (" + std::to_string(features.size() - 1) + ")");
|
||||
}
|
||||
std::vector<int> predictions(tsamples[0].size(), 0);
|
||||
std::vector<int> sample;
|
||||
std::vector<std::thread> threads;
|
||||
auto& semaphore = CountingSemaphore::getInstance();
|
||||
auto worker = [&](const std::vector<int>& sample, const int row, int& prediction) {
|
||||
std::string threadName = "(V)PWorker-" + std::to_string(row);
|
||||
#if defined(__linux__)
|
||||
pthread_setname_np(pthread_self(), threadName.c_str());
|
||||
#else
|
||||
pthread_setname_np(threadName.c_str());
|
||||
#endif
|
||||
auto classProbabilities = predict_sample(sample);
|
||||
auto maxElem = max_element(classProbabilities.begin(), classProbabilities.end());
|
||||
int predictedClass = distance(classProbabilities.begin(), maxElem);
|
||||
prediction = predictedClass;
|
||||
semaphore.release();
|
||||
};
|
||||
for (int row = 0; row < tsamples[0].size(); ++row) {
|
||||
sample.clear();
|
||||
for (int col = 0; col < tsamples.size(); ++col) {
|
||||
sample.push_back(tsamples[col][row]);
|
||||
}
|
||||
semaphore.acquire();
|
||||
threads.emplace_back(worker, sample, row, std::ref(predictions[row]));
|
||||
}
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
return predictions;
|
||||
}
|
||||
// Return mxn std::vector of probabilities
|
||||
// tsamples is nxm std::vector of samples
|
||||
std::vector<std::vector<double>> Network::predict_proba(const std::vector<std::vector<int>>& tsamples)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error("You must call fit() before calling predict_proba()");
|
||||
}
|
||||
// Ensure the sample size is equal to the number of features
|
||||
if (tsamples.size() != features.size() - 1) {
|
||||
throw std::invalid_argument("(V) Sample size (" + std::to_string(tsamples.size()) +
|
||||
") does not match the number of features (" + std::to_string(features.size() - 1) + ")");
|
||||
}
|
||||
std::vector<std::vector<double>> predictions(tsamples[0].size(), std::vector<double>(classNumStates, 0.0));
|
||||
std::vector<int> sample;
|
||||
std::vector<std::thread> threads;
|
||||
auto& semaphore = CountingSemaphore::getInstance();
|
||||
auto worker = [&](const std::vector<int>& sample, int row, std::vector<double>& predictions) {
|
||||
std::string threadName = "(V)PWorker-" + std::to_string(row);
|
||||
#if defined(__linux__)
|
||||
pthread_setname_np(pthread_self(), threadName.c_str());
|
||||
#else
|
||||
pthread_setname_np(threadName.c_str());
|
||||
#endif
|
||||
std::vector<double> classProbabilities = predict_sample(sample);
|
||||
predictions = classProbabilities;
|
||||
semaphore.release();
|
||||
};
|
||||
for (int row = 0; row < tsamples[0].size(); ++row) {
|
||||
sample.clear();
|
||||
for (int col = 0; col < tsamples.size(); ++col) {
|
||||
sample.push_back(tsamples[col][row]);
|
||||
}
|
||||
semaphore.acquire();
|
||||
threads.emplace_back(worker, sample, row, std::ref(predictions[row]));
|
||||
}
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
return predictions;
|
||||
}
|
||||
double Network::score(const std::vector<std::vector<int>>& tsamples, const std::vector<int>& labels)
|
||||
{
|
||||
std::vector<int> y_pred = predict(tsamples);
|
||||
int correct = 0;
|
||||
for (int i = 0; i < y_pred.size(); ++i) {
|
||||
if (y_pred[i] == labels[i]) {
|
||||
correct++;
|
||||
}
|
||||
}
|
||||
return (double)correct / y_pred.size();
|
||||
}
|
||||
// Return 1xn std::vector of probabilities
|
||||
std::vector<double> Network::predict_sample(const std::vector<int>& sample)
|
||||
{
|
||||
std::map<std::string, int> evidence;
|
||||
for (int i = 0; i < sample.size(); ++i) {
|
||||
evidence[features[i]] = sample[i];
|
||||
}
|
||||
return exactInference(evidence);
|
||||
}
|
||||
// Return 1xn std::vector of probabilities
|
||||
std::vector<double> Network::predict_sample(const torch::Tensor& sample)
|
||||
{
|
||||
std::map<std::string, int> evidence;
|
||||
for (int i = 0; i < sample.size(0); ++i) {
|
||||
evidence[features[i]] = sample[i].item<int>();
|
||||
}
|
||||
return exactInference(evidence);
|
||||
}
|
||||
std::vector<double> Network::exactInference(std::map<std::string, int>& evidence)
|
||||
{
|
||||
std::vector<double> result(classNumStates, 0.0);
|
||||
auto completeEvidence = std::map<std::string, int>(evidence);
|
||||
for (int i = 0; i < classNumStates; ++i) {
|
||||
completeEvidence[getClassName()] = i;
|
||||
double partial = 1.0;
|
||||
for (auto& node : getNodes()) {
|
||||
partial *= node.second->getFactorValue(completeEvidence);
|
||||
}
|
||||
result[i] = partial;
|
||||
}
|
||||
// Normalize result
|
||||
double sum = std::accumulate(result.begin(), result.end(), 0.0);
|
||||
transform(result.begin(), result.end(), result.begin(), [sum](const double& value) { return value / sum; });
|
||||
return result;
|
||||
}
|
||||
std::vector<std::string> Network::show() const
|
||||
{
|
||||
std::vector<std::string> result;
|
||||
// Draw the network
|
||||
for (auto& node : nodes) {
|
||||
std::string line = node.first + " -> ";
|
||||
for (auto child : node.second->getChildren()) {
|
||||
line += child->getName() + ", ";
|
||||
}
|
||||
result.push_back(line);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
std::vector<std::string> Network::graph(const std::string& title) const
|
||||
{
|
||||
auto output = std::vector<std::string>();
|
||||
auto prefix = "digraph BayesNet {\nlabel=<BayesNet ";
|
||||
auto suffix = ">\nfontsize=30\nfontcolor=blue\nlabelloc=t\nlayout=circo\n";
|
||||
std::string header = prefix + title + suffix;
|
||||
output.push_back(header);
|
||||
for (auto& node : nodes) {
|
||||
auto result = node.second->graph(className);
|
||||
output.insert(output.end(), result.begin(), result.end());
|
||||
}
|
||||
output.push_back("}\n");
|
||||
return output;
|
||||
}
|
||||
std::vector<std::pair<std::string, std::string>> Network::getEdges() const
|
||||
{
|
||||
auto edges = std::vector<std::pair<std::string, std::string>>();
|
||||
for (const auto& node : nodes) {
|
||||
auto head = node.first;
|
||||
for (const auto& child : node.second->getChildren()) {
|
||||
auto tail = child->getName();
|
||||
edges.push_back({ head, tail });
|
||||
}
|
||||
}
|
||||
return edges;
|
||||
}
|
||||
int Network::getNumEdges() const
|
||||
{
|
||||
return getEdges().size();
|
||||
}
|
||||
std::vector<std::string> Network::topological_sort()
|
||||
{
|
||||
/* Check if al the fathers of every node are before the node */
|
||||
auto result = features;
|
||||
result.erase(remove(result.begin(), result.end(), className), result.end());
|
||||
bool ending{ false };
|
||||
while (!ending) {
|
||||
ending = true;
|
||||
for (auto feature : features) {
|
||||
auto fathers = nodes[feature]->getParents();
|
||||
for (const auto& father : fathers) {
|
||||
auto fatherName = father->getName();
|
||||
if (fatherName == className) {
|
||||
continue;
|
||||
}
|
||||
// Check if father is placed before the actual feature
|
||||
auto it = find(result.begin(), result.end(), fatherName);
|
||||
if (it != result.end()) {
|
||||
auto it2 = find(result.begin(), result.end(), feature);
|
||||
if (it2 != result.end()) {
|
||||
if (distance(it, it2) < 0) {
|
||||
// if it is not, insert it before the feature
|
||||
result.erase(remove(result.begin(), result.end(), fatherName), result.end());
|
||||
result.insert(it2, fatherName);
|
||||
ending = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
std::string Network::dump_cpt() const
|
||||
{
|
||||
std::stringstream oss;
|
||||
for (auto& node : nodes) {
|
||||
oss << "* " << node.first << ": (" << node.second->getNumStates() << ") : " << node.second->getCPT().sizes() << std::endl;
|
||||
oss << node.second->getCPT() << std::endl;
|
||||
}
|
||||
return oss.str();
|
||||
}
|
||||
}
|
66
bayesnet/network/Network.h
Normal file
66
bayesnet/network/Network.h
Normal file
@@ -0,0 +1,66 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef NETWORK_H
|
||||
#define NETWORK_H
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include "bayesnet/config.h"
|
||||
#include "Node.h"
|
||||
#include "Smoothing.h"
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
class Network {
|
||||
public:
|
||||
Network();
|
||||
explicit Network(const Network&);
|
||||
~Network() = default;
|
||||
torch::Tensor& getSamples();
|
||||
void addNode(const std::string&);
|
||||
void addEdge(const std::string&, const std::string&);
|
||||
std::map<std::string, std::unique_ptr<Node>>& getNodes();
|
||||
std::vector<std::string> getFeatures() const;
|
||||
int getStates() const;
|
||||
std::vector<std::pair<std::string, std::string>> getEdges() const;
|
||||
int getNumEdges() const;
|
||||
int getClassNumStates() const;
|
||||
std::string getClassName() const;
|
||||
/*
|
||||
Notice: Nodes have to be inserted in the same order as they are in the dataset, i.e., first node is first column and so on.
|
||||
*/
|
||||
void fit(const std::vector<std::vector<int>>& input_data, const std::vector<int>& labels, const std::vector<double>& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing);
|
||||
void fit(const torch::Tensor& X, const torch::Tensor& y, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing);
|
||||
void fit(const torch::Tensor& samples, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const Smoothing_t smoothing);
|
||||
std::vector<int> predict(const std::vector<std::vector<int>>&); // Return mx1 std::vector of predictions
|
||||
torch::Tensor predict(const torch::Tensor&); // Return mx1 tensor of predictions
|
||||
torch::Tensor predict_tensor(const torch::Tensor& samples, const bool proba);
|
||||
std::vector<std::vector<double>> predict_proba(const std::vector<std::vector<int>>&); // Return mxn std::vector of probabilities
|
||||
torch::Tensor predict_proba(const torch::Tensor&); // Return mxn tensor of probabilities
|
||||
double score(const std::vector<std::vector<int>>&, const std::vector<int>&);
|
||||
std::vector<std::string> topological_sort();
|
||||
std::vector<std::string> show() const;
|
||||
std::vector<std::string> graph(const std::string& title) const; // Returns a std::vector of std::strings representing the graph in graphviz format
|
||||
void initialize();
|
||||
std::string dump_cpt() const;
|
||||
inline std::string version() { return { project_version.begin(), project_version.end() }; }
|
||||
private:
|
||||
std::map<std::string, std::unique_ptr<Node>> nodes;
|
||||
bool fitted;
|
||||
int classNumStates;
|
||||
std::vector<std::string> features; // Including classname
|
||||
std::string className;
|
||||
torch::Tensor samples; // n+1xm tensor used to fit the model
|
||||
bool isCyclic(const std::string&, std::unordered_set<std::string>&, std::unordered_set<std::string>&);
|
||||
std::vector<double> predict_sample(const std::vector<int>&);
|
||||
std::vector<double> predict_sample(const torch::Tensor&);
|
||||
std::vector<double> exactInference(std::map<std::string, int>&);
|
||||
void completeFit(const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights, const Smoothing_t smoothing);
|
||||
void checkFitData(int n_samples, int n_features, int n_samples_y, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights);
|
||||
void setStates(const std::map<std::string, std::vector<int>>&);
|
||||
};
|
||||
}
|
||||
#endif
|
149
bayesnet/network/Node.cc
Normal file
149
bayesnet/network/Node.cc
Normal file
@@ -0,0 +1,149 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include "Node.h"
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
Node::Node(const std::string& name)
|
||||
: name(name)
|
||||
{
|
||||
}
|
||||
void Node::clear()
|
||||
{
|
||||
parents.clear();
|
||||
children.clear();
|
||||
cpTable = torch::Tensor();
|
||||
dimensions.clear();
|
||||
numStates = 0;
|
||||
}
|
||||
std::string Node::getName() const
|
||||
{
|
||||
return name;
|
||||
}
|
||||
void Node::addParent(Node* parent)
|
||||
{
|
||||
parents.push_back(parent);
|
||||
}
|
||||
void Node::removeParent(Node* parent)
|
||||
{
|
||||
parents.erase(std::remove(parents.begin(), parents.end(), parent), parents.end());
|
||||
}
|
||||
void Node::removeChild(Node* child)
|
||||
{
|
||||
children.erase(std::remove(children.begin(), children.end(), child), children.end());
|
||||
}
|
||||
void Node::addChild(Node* child)
|
||||
{
|
||||
children.push_back(child);
|
||||
}
|
||||
std::vector<Node*>& Node::getParents()
|
||||
{
|
||||
return parents;
|
||||
}
|
||||
std::vector<Node*>& Node::getChildren()
|
||||
{
|
||||
return children;
|
||||
}
|
||||
int Node::getNumStates() const
|
||||
{
|
||||
return numStates;
|
||||
}
|
||||
void Node::setNumStates(int numStates)
|
||||
{
|
||||
this->numStates = numStates;
|
||||
}
|
||||
torch::Tensor& Node::getCPT()
|
||||
{
|
||||
return cpTable;
|
||||
}
|
||||
/*
|
||||
The MinFill criterion is a heuristic for variable elimination.
|
||||
The variable that minimizes the number of edges that need to be added to the graph to make it triangulated.
|
||||
This is done by counting the number of edges that need to be added to the graph if the variable is eliminated.
|
||||
The variable with the minimum number of edges is chosen.
|
||||
Here this is done computing the length of the combinations of the node neighbors taken 2 by 2.
|
||||
*/
|
||||
unsigned Node::minFill()
|
||||
{
|
||||
std::unordered_set<std::string> neighbors;
|
||||
for (auto child : children) {
|
||||
neighbors.emplace(child->getName());
|
||||
}
|
||||
for (auto parent : parents) {
|
||||
neighbors.emplace(parent->getName());
|
||||
}
|
||||
auto source = std::vector<std::string>(neighbors.begin(), neighbors.end());
|
||||
return combinations(source).size();
|
||||
}
|
||||
std::vector<std::pair<std::string, std::string>> Node::combinations(const std::vector<std::string>& source)
|
||||
{
|
||||
std::vector<std::pair<std::string, std::string>> result;
|
||||
for (int i = 0; i < source.size(); ++i) {
|
||||
std::string temp = source[i];
|
||||
for (int j = i + 1; j < source.size(); ++j) {
|
||||
result.push_back({ temp, source[j] });
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
void Node::computeCPT(const torch::Tensor& dataset, const std::vector<std::string>& features, const double smoothing, const torch::Tensor& weights)
|
||||
{
|
||||
dimensions.clear();
|
||||
dimensions.reserve(parents.size() + 1);
|
||||
// Get dimensions of the CPT
|
||||
dimensions.push_back(numStates);
|
||||
for (const auto& parent : parents) {
|
||||
dimensions.push_back(parent->getNumStates());
|
||||
}
|
||||
//transform(parents.begin(), parents.end(), back_inserter(dimensions), [](const auto& parent) { return parent->getNumStates(); });
|
||||
// Create a tensor initialized with smoothing
|
||||
cpTable = torch::full(dimensions, smoothing, torch::kDouble);
|
||||
// Create a map for quick feature index lookup
|
||||
std::unordered_map<std::string, int> featureIndexMap;
|
||||
for (size_t i = 0; i < features.size(); ++i) {
|
||||
featureIndexMap[features[i]] = i;
|
||||
}
|
||||
// Fill table with counts
|
||||
// Get the index of this node's feature
|
||||
int name_index = featureIndexMap[name];
|
||||
// Get parent indices in dataset
|
||||
std::vector<int> parent_indices;
|
||||
parent_indices.reserve(parents.size());
|
||||
for (const auto& parent : parents) {
|
||||
parent_indices.push_back(featureIndexMap[parent->getName()]);
|
||||
}
|
||||
c10::List<c10::optional<at::Tensor>> coordinates;
|
||||
for (int n_sample = 0; n_sample < dataset.size(1); ++n_sample) {
|
||||
coordinates.clear();
|
||||
auto sample = dataset.index({ "...", n_sample });
|
||||
coordinates.push_back(sample[name_index]);
|
||||
for (size_t i = 0; i < parent_indices.size(); ++i) {
|
||||
coordinates.push_back(sample[parent_indices[i]]);
|
||||
}
|
||||
// Increment the count of the corresponding coordinate
|
||||
cpTable.index_put_({ coordinates }, weights.index({ n_sample }), true);
|
||||
}
|
||||
// Normalize the counts (dividing each row by the sum of the row)
|
||||
cpTable /= cpTable.sum(0, true);
|
||||
}
|
||||
double Node::getFactorValue(std::map<std::string, int>& evidence)
|
||||
{
|
||||
c10::List<c10::optional<at::Tensor>> coordinates;
|
||||
// following predetermined order of indices in the cpTable (see Node.h)
|
||||
coordinates.push_back(at::tensor(evidence[name]));
|
||||
transform(parents.begin(), parents.end(), std::back_inserter(coordinates), [&evidence](const auto& parent) { return at::tensor(evidence[parent->getName()]); });
|
||||
return cpTable.index({ coordinates }).item<double>();
|
||||
}
|
||||
std::vector<std::string> Node::graph(const std::string& className)
|
||||
{
|
||||
auto output = std::vector<std::string>();
|
||||
auto suffix = name == className ? ", fontcolor=red, fillcolor=lightblue, style=filled " : "";
|
||||
output.push_back("\"" + name + "\" [shape=circle" + suffix + "] \n");
|
||||
transform(children.begin(), children.end(), back_inserter(output), [this](const auto& child) { return "\"" + name + "\" -> \"" + child->getName() + "\""; });
|
||||
return output;
|
||||
}
|
||||
}
|
42
bayesnet/network/Node.h
Normal file
42
bayesnet/network/Node.h
Normal file
@@ -0,0 +1,42 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef NODE_H
|
||||
#define NODE_H
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <torch/torch.h>
|
||||
namespace bayesnet {
|
||||
class Node {
|
||||
public:
|
||||
explicit Node(const std::string&);
|
||||
void clear();
|
||||
void addParent(Node*);
|
||||
void addChild(Node*);
|
||||
void removeParent(Node*);
|
||||
void removeChild(Node*);
|
||||
std::string getName() const;
|
||||
std::vector<Node*>& getParents();
|
||||
std::vector<Node*>& getChildren();
|
||||
torch::Tensor& getCPT();
|
||||
void computeCPT(const torch::Tensor& dataset, const std::vector<std::string>& features, const double smoothing, const torch::Tensor& weights);
|
||||
int getNumStates() const;
|
||||
void setNumStates(int);
|
||||
unsigned minFill();
|
||||
std::vector<std::string> graph(const std::string& clasName); // Returns a std::vector of std::strings representing the graph in graphviz format
|
||||
double getFactorValue(std::map<std::string, int>&);
|
||||
private:
|
||||
std::string name;
|
||||
std::vector<Node*> parents;
|
||||
std::vector<Node*> children;
|
||||
int numStates = 0; // number of states of the variable
|
||||
torch::Tensor cpTable; // Order of indices is 0-> node variable, 1-> 1st parent, 2-> 2nd parent, ...
|
||||
std::vector<int64_t> dimensions; // dimensions of the cpTable
|
||||
std::vector<std::pair<std::string, std::string>> combinations(const std::vector<std::string>&);
|
||||
};
|
||||
}
|
||||
#endif
|
17
bayesnet/network/Smoothing.h
Normal file
17
bayesnet/network/Smoothing.h
Normal file
@@ -0,0 +1,17 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef SMOOTHING_H
|
||||
#define SMOOTHING_H
|
||||
namespace bayesnet {
|
||||
enum class Smoothing_t {
|
||||
NONE = -1,
|
||||
ORIGINAL = 0,
|
||||
LAPLACE,
|
||||
CESTNIK
|
||||
};
|
||||
}
|
||||
#endif // SMOOTHING_H
|
260
bayesnet/utils/BayesMetrics.cc
Normal file
260
bayesnet/utils/BayesMetrics.cc
Normal file
@@ -0,0 +1,260 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include <map>
|
||||
#include <unordered_map>
|
||||
#include <tuple>
|
||||
#include "Mst.h"
|
||||
#include "BayesMetrics.h"
|
||||
namespace bayesnet {
|
||||
//samples is n+1xm tensor used to fit the model
|
||||
Metrics::Metrics(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int classNumStates)
|
||||
: samples(samples)
|
||||
, className(className)
|
||||
, features(features)
|
||||
, classNumStates(classNumStates)
|
||||
{
|
||||
}
|
||||
//samples is n+1xm std::vector used to fit the model
|
||||
Metrics::Metrics(const std::vector<std::vector<int>>& vsamples, const std::vector<int>& labels, const std::vector<std::string>& features, const std::string& className, const int classNumStates)
|
||||
: samples(torch::zeros({ static_cast<int>(vsamples.size() + 1), static_cast<int>(vsamples[0].size()) }, torch::kInt32))
|
||||
, className(className)
|
||||
, features(features)
|
||||
, classNumStates(classNumStates)
|
||||
{
|
||||
for (int i = 0; i < vsamples.size(); ++i) {
|
||||
samples.index_put_({ i, "..." }, torch::tensor(vsamples[i], torch::kInt32));
|
||||
}
|
||||
samples.index_put_({ -1, "..." }, torch::tensor(labels, torch::kInt32));
|
||||
}
|
||||
std::vector<std::pair<int, int>> Metrics::SelectKPairs(const torch::Tensor& weights, std::vector<int>& featuresExcluded, bool ascending, unsigned k)
|
||||
{
|
||||
// Return the K Best features
|
||||
auto n = features.size();
|
||||
// compute scores
|
||||
scoresKPairs.clear();
|
||||
pairsKBest.clear();
|
||||
auto labels = samples.index({ -1, "..." });
|
||||
for (int i = 0; i < n - 1; ++i) {
|
||||
if (std::find(featuresExcluded.begin(), featuresExcluded.end(), i) != featuresExcluded.end()) {
|
||||
continue;
|
||||
}
|
||||
for (int j = i + 1; j < n; ++j) {
|
||||
if (std::find(featuresExcluded.begin(), featuresExcluded.end(), j) != featuresExcluded.end()) {
|
||||
continue;
|
||||
}
|
||||
auto key = std::make_pair(i, j);
|
||||
auto value = conditionalMutualInformation(samples.index({ i, "..." }), samples.index({ j, "..." }), labels, weights);
|
||||
scoresKPairs.push_back({ key, value });
|
||||
}
|
||||
}
|
||||
// sort scores
|
||||
if (ascending) {
|
||||
sort(scoresKPairs.begin(), scoresKPairs.end(), [](auto& a, auto& b)
|
||||
{ return a.second < b.second; });
|
||||
|
||||
} else {
|
||||
sort(scoresKPairs.begin(), scoresKPairs.end(), [](auto& a, auto& b)
|
||||
{ return a.second > b.second; });
|
||||
}
|
||||
for (auto& [pairs, score] : scoresKPairs) {
|
||||
pairsKBest.push_back(pairs);
|
||||
}
|
||||
if (k != 0 && k < pairsKBest.size()) {
|
||||
if (ascending) {
|
||||
int limit = pairsKBest.size() - k;
|
||||
for (int i = 0; i < limit; i++) {
|
||||
pairsKBest.erase(pairsKBest.begin());
|
||||
scoresKPairs.erase(scoresKPairs.begin());
|
||||
}
|
||||
} else {
|
||||
pairsKBest.resize(k);
|
||||
scoresKPairs.resize(k);
|
||||
}
|
||||
}
|
||||
return pairsKBest;
|
||||
}
|
||||
std::vector<int> Metrics::SelectKBestWeighted(const torch::Tensor& weights, bool ascending, unsigned k)
|
||||
{
|
||||
// Return the K Best features
|
||||
auto n = features.size();
|
||||
if (k == 0) {
|
||||
k = n;
|
||||
}
|
||||
// compute scores
|
||||
scoresKBest.clear();
|
||||
featuresKBest.clear();
|
||||
auto label = samples.index({ -1, "..." });
|
||||
for (int i = 0; i < n; ++i) {
|
||||
scoresKBest.push_back(mutualInformation(label, samples.index({ i, "..." }), weights));
|
||||
featuresKBest.push_back(i);
|
||||
}
|
||||
// sort & reduce scores and features
|
||||
if (ascending) {
|
||||
sort(featuresKBest.begin(), featuresKBest.end(), [&](int i, int j)
|
||||
{ return scoresKBest[i] < scoresKBest[j]; });
|
||||
sort(scoresKBest.begin(), scoresKBest.end(), std::less<double>());
|
||||
if (k < n) {
|
||||
for (int i = 0; i < n - k; ++i) {
|
||||
featuresKBest.erase(featuresKBest.begin());
|
||||
scoresKBest.erase(scoresKBest.begin());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
sort(featuresKBest.begin(), featuresKBest.end(), [&](int i, int j)
|
||||
{ return scoresKBest[i] > scoresKBest[j]; });
|
||||
sort(scoresKBest.begin(), scoresKBest.end(), std::greater<double>());
|
||||
featuresKBest.resize(k);
|
||||
scoresKBest.resize(k);
|
||||
}
|
||||
return featuresKBest;
|
||||
}
|
||||
std::vector<double> Metrics::getScoresKBest() const
|
||||
{
|
||||
return scoresKBest;
|
||||
}
|
||||
std::vector<std::pair<std::pair<int, int>, double>> Metrics::getScoresKPairs() const
|
||||
{
|
||||
return scoresKPairs;
|
||||
}
|
||||
torch::Tensor Metrics::conditionalEdge(const torch::Tensor& weights)
|
||||
{
|
||||
auto result = std::vector<double>();
|
||||
auto source = std::vector<std::string>(features);
|
||||
source.push_back(className);
|
||||
auto combinations = doCombinations(source);
|
||||
// Compute class prior
|
||||
auto margin = torch::zeros({ classNumStates }, torch::kFloat);
|
||||
for (int value = 0; value < classNumStates; ++value) {
|
||||
auto mask = samples.index({ -1, "..." }) == value;
|
||||
margin[value] = mask.sum().item<double>() / samples.size(1);
|
||||
}
|
||||
for (auto [first, second] : combinations) {
|
||||
int index_first = find(features.begin(), features.end(), first) - features.begin();
|
||||
int index_second = find(features.begin(), features.end(), second) - features.begin();
|
||||
double accumulated = 0;
|
||||
for (int value = 0; value < classNumStates; ++value) {
|
||||
auto mask = samples.index({ -1, "..." }) == value;
|
||||
auto first_dataset = samples.index({ index_first, mask });
|
||||
auto second_dataset = samples.index({ index_second, mask });
|
||||
auto weights_dataset = weights.index({ mask });
|
||||
auto mi = mutualInformation(first_dataset, second_dataset, weights_dataset);
|
||||
auto pb = margin[value].item<double>();
|
||||
accumulated += pb * mi;
|
||||
}
|
||||
result.push_back(accumulated);
|
||||
}
|
||||
long n_vars = source.size();
|
||||
auto matrix = torch::zeros({ n_vars, n_vars });
|
||||
auto indices = torch::triu_indices(n_vars, n_vars, 1);
|
||||
for (auto i = 0; i < result.size(); ++i) {
|
||||
auto x = indices[0][i];
|
||||
auto y = indices[1][i];
|
||||
matrix[x][y] = result[i];
|
||||
matrix[y][x] = result[i];
|
||||
}
|
||||
return matrix;
|
||||
}
|
||||
// Measured in nats (natural logarithm (log) base e)
|
||||
// Elements of Information Theory, 2nd Edition, Thomas M. Cover, Joy A. Thomas p. 14
|
||||
double Metrics::entropy(const torch::Tensor& feature, const torch::Tensor& weights)
|
||||
{
|
||||
torch::Tensor counts = feature.bincount(weights);
|
||||
double totalWeight = counts.sum().item<double>();
|
||||
torch::Tensor probs = counts.to(torch::kFloat) / totalWeight;
|
||||
torch::Tensor logProbs = torch::log(probs);
|
||||
torch::Tensor entropy = -probs * logProbs;
|
||||
return entropy.nansum().item<double>();
|
||||
}
|
||||
// H(Y|X) = sum_{x in X} p(x) H(Y|X=x)
|
||||
double Metrics::conditionalEntropy(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights)
|
||||
{
|
||||
int numSamples = firstFeature.sizes()[0];
|
||||
torch::Tensor featureCounts = secondFeature.bincount(weights);
|
||||
std::unordered_map<int, std::unordered_map<int, double>> jointCounts;
|
||||
double totalWeight = 0;
|
||||
for (auto i = 0; i < numSamples; i++) {
|
||||
jointCounts[secondFeature[i].item<int>()][firstFeature[i].item<int>()] += weights[i].item<double>();
|
||||
totalWeight += weights[i].item<float>();
|
||||
}
|
||||
if (totalWeight == 0)
|
||||
return 0;
|
||||
double entropyValue = 0;
|
||||
for (int value = 0; value < featureCounts.sizes()[0]; ++value) {
|
||||
double p_f = featureCounts[value].item<double>() / totalWeight;
|
||||
double entropy_f = 0;
|
||||
for (auto& [label, jointCount] : jointCounts[value]) {
|
||||
double p_l_f = jointCount / featureCounts[value].item<double>();
|
||||
if (p_l_f > 0) {
|
||||
entropy_f -= p_l_f * log(p_l_f);
|
||||
} else {
|
||||
entropy_f = 0;
|
||||
}
|
||||
}
|
||||
entropyValue += p_f * entropy_f;
|
||||
}
|
||||
return entropyValue;
|
||||
}
|
||||
// H(X|Y,C) = sum_{y in Y, c in C} p(x,c) H(X|Y=y,C=c)
|
||||
double Metrics::conditionalEntropy(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& labels, const torch::Tensor& weights)
|
||||
{
|
||||
// Ensure the tensors are of the same length
|
||||
assert(firstFeature.size(0) == secondFeature.size(0) && firstFeature.size(0) == labels.size(0) && firstFeature.size(0) == weights.size(0));
|
||||
// Convert tensors to vectors for easier processing
|
||||
auto firstFeatureData = firstFeature.accessor<int, 1>();
|
||||
auto secondFeatureData = secondFeature.accessor<int, 1>();
|
||||
auto labelsData = labels.accessor<int, 1>();
|
||||
auto weightsData = weights.accessor<double, 1>();
|
||||
int numSamples = firstFeature.size(0);
|
||||
// Maps for joint and marginal probabilities
|
||||
std::map<std::tuple<int, int, int>, double> jointCount;
|
||||
std::map<std::tuple<int, int>, double> marginalCount;
|
||||
// Compute joint and marginal counts
|
||||
for (int i = 0; i < numSamples; ++i) {
|
||||
auto keyJoint = std::make_tuple(firstFeatureData[i], labelsData[i], secondFeatureData[i]);
|
||||
auto keyMarginal = std::make_tuple(firstFeatureData[i], labelsData[i]);
|
||||
|
||||
jointCount[keyJoint] += weightsData[i];
|
||||
marginalCount[keyMarginal] += weightsData[i];
|
||||
}
|
||||
// Total weight sum
|
||||
double totalWeight = torch::sum(weights).item<double>();
|
||||
if (totalWeight == 0)
|
||||
return 0;
|
||||
// Compute the conditional entropy
|
||||
double conditionalEntropy = 0.0;
|
||||
for (const auto& [keyJoint, jointFreq] : jointCount) {
|
||||
auto [x, c, y] = keyJoint;
|
||||
auto keyMarginal = std::make_tuple(x, c);
|
||||
//double p_xc = marginalCount[keyMarginal] / totalWeight;
|
||||
double p_y_given_xc = jointFreq / marginalCount[keyMarginal];
|
||||
if (p_y_given_xc > 0) {
|
||||
conditionalEntropy -= (jointFreq / totalWeight) * std::log(p_y_given_xc);
|
||||
}
|
||||
}
|
||||
return conditionalEntropy;
|
||||
}
|
||||
// I(X;Y) = H(Y) - H(Y|X) ; I(X;Y) >= 0
|
||||
double Metrics::mutualInformation(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights)
|
||||
{
|
||||
return std::max(entropy(firstFeature, weights) - conditionalEntropy(firstFeature, secondFeature, weights), 0.0);
|
||||
}
|
||||
// I(X;Y|C) = H(X|C) - H(X|Y,C) >= 0
|
||||
double Metrics::conditionalMutualInformation(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& labels, const torch::Tensor& weights)
|
||||
{
|
||||
return std::max(conditionalEntropy(firstFeature, labels, weights) - conditionalEntropy(firstFeature, secondFeature, labels, weights), 0.0);
|
||||
}
|
||||
/*
|
||||
Compute the maximum spanning tree considering the weights as distances
|
||||
and the indices of the weights as nodes of this square matrix using
|
||||
Kruskal algorithm
|
||||
*/
|
||||
std::vector<std::pair<int, int>> Metrics::maximumSpanningTree(const std::vector<std::string>& features, const torch::Tensor& weights, const int root)
|
||||
{
|
||||
auto mst = MST(features, weights, root);
|
||||
return mst.maximumSpanningTree();
|
||||
}
|
||||
}
|
62
bayesnet/utils/BayesMetrics.h
Normal file
62
bayesnet/utils/BayesMetrics.h
Normal file
@@ -0,0 +1,62 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef BAYESNET_METRICS_H
|
||||
#define BAYESNET_METRICS_H
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <torch/torch.h>
|
||||
namespace bayesnet {
|
||||
class Metrics {
|
||||
public:
|
||||
Metrics() = default;
|
||||
Metrics(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int classNumStates);
|
||||
Metrics(const std::vector<std::vector<int>>& vsamples, const std::vector<int>& labels, const std::vector<std::string>& features, const std::string& className, const int classNumStates);
|
||||
std::vector<int> SelectKBestWeighted(const torch::Tensor& weights, bool ascending = false, unsigned k = 0);
|
||||
std::vector<std::pair<int, int>> SelectKPairs(const torch::Tensor& weights, std::vector<int>& featuresExcluded, bool ascending = false, unsigned k = 0);
|
||||
std::vector<double> getScoresKBest() const;
|
||||
std::vector<std::pair<std::pair<int, int>, double>> getScoresKPairs() const;
|
||||
double mutualInformation(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights);
|
||||
double conditionalMutualInformation(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& labels, const torch::Tensor& weights);
|
||||
torch::Tensor conditionalEdge(const torch::Tensor& weights);
|
||||
std::vector<std::pair<int, int>> maximumSpanningTree(const std::vector<std::string>& features, const torch::Tensor& weights, const int root);
|
||||
// Measured in nats (natural logarithm (log) base e)
|
||||
// Elements of Information Theory, 2nd Edition, Thomas M. Cover, Joy A. Thomas p. 14
|
||||
double entropy(const torch::Tensor& feature, const torch::Tensor& weights);
|
||||
double conditionalEntropy(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& labels, const torch::Tensor& weights);
|
||||
protected:
|
||||
torch::Tensor samples; // n+1xm torch::Tensor used to fit the model where samples[-1] is the y std::vector
|
||||
std::string className;
|
||||
std::vector<std::string> features;
|
||||
template <class T>
|
||||
std::vector<std::pair<T, T>> doCombinations(const std::vector<T>& source)
|
||||
{
|
||||
std::vector<std::pair<T, T>> result;
|
||||
for (int i = 0; i < source.size() - 1; ++i) {
|
||||
T temp = source[i];
|
||||
for (int j = i + 1; j < source.size(); ++j) {
|
||||
result.push_back({ temp, source[j] });
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
template <class T>
|
||||
T pop_first(std::vector<T>& v)
|
||||
{
|
||||
T temp = v[0];
|
||||
v.erase(v.begin());
|
||||
return temp;
|
||||
}
|
||||
private:
|
||||
int classNumStates = 0;
|
||||
std::vector<double> scoresKBest;
|
||||
std::vector<int> featuresKBest; // sorted indices of the features
|
||||
std::vector<std::pair<int, int>> pairsKBest; // sorted indices of the pairs
|
||||
std::vector<std::pair<std::pair<int, int>, double>> scoresKPairs;
|
||||
double conditionalEntropy(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights);
|
||||
};
|
||||
}
|
||||
#endif
|
54
bayesnet/utils/CountingSemaphore.h
Normal file
54
bayesnet/utils/CountingSemaphore.h
Normal file
@@ -0,0 +1,54 @@
|
||||
#ifndef COUNTING_SEMAPHORE_H
|
||||
#define COUNTING_SEMAPHORE_H
|
||||
#include <mutex>
|
||||
#include <condition_variable>
|
||||
#include <algorithm>
|
||||
#include <thread>
|
||||
#include <mutex>
|
||||
#include <condition_variable>
|
||||
#include <thread>
|
||||
|
||||
class CountingSemaphore {
|
||||
public:
|
||||
static CountingSemaphore& getInstance()
|
||||
{
|
||||
static CountingSemaphore instance;
|
||||
return instance;
|
||||
}
|
||||
// Delete copy constructor and assignment operator
|
||||
CountingSemaphore(const CountingSemaphore&) = delete;
|
||||
CountingSemaphore& operator=(const CountingSemaphore&) = delete;
|
||||
void acquire()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mtx_);
|
||||
cv_.wait(lock, [this]() { return count_ > 0; });
|
||||
--count_;
|
||||
}
|
||||
void release()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mtx_);
|
||||
++count_;
|
||||
if (count_ <= max_count_) {
|
||||
cv_.notify_one();
|
||||
}
|
||||
}
|
||||
uint getCount() const
|
||||
{
|
||||
return count_;
|
||||
}
|
||||
uint getMaxCount() const
|
||||
{
|
||||
return max_count_;
|
||||
}
|
||||
private:
|
||||
CountingSemaphore()
|
||||
: max_count_(std::max(1u, static_cast<uint>(0.95 * std::thread::hardware_concurrency()))),
|
||||
count_(max_count_)
|
||||
{
|
||||
}
|
||||
std::mutex mtx_;
|
||||
std::condition_variable cv_;
|
||||
const uint max_count_;
|
||||
uint count_;
|
||||
};
|
||||
#endif
|
120
bayesnet/utils/Mst.cc
Normal file
120
bayesnet/utils/Mst.cc
Normal file
@@ -0,0 +1,120 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include <sstream>
|
||||
#include <vector>
|
||||
#include <list>
|
||||
#include "Mst.h"
|
||||
/*
|
||||
Based on the code from https://www.softwaretestinghelp.com/minimum-spanning-tree-tutorial/
|
||||
|
||||
*/
|
||||
|
||||
namespace bayesnet {
|
||||
Graph::Graph(int V) : V(V), parent(std::vector<int>(V))
|
||||
{
|
||||
for (int i = 0; i < V; i++)
|
||||
parent[i] = i;
|
||||
G.clear();
|
||||
T.clear();
|
||||
}
|
||||
void Graph::addEdge(int u, int v, float wt)
|
||||
{
|
||||
G.push_back({ wt, { u, v } });
|
||||
}
|
||||
int Graph::find_set(int i)
|
||||
{
|
||||
// If i is the parent of itself
|
||||
if (i == parent[i])
|
||||
return i;
|
||||
else
|
||||
//else recursively find the parent of i
|
||||
return find_set(parent[i]);
|
||||
}
|
||||
void Graph::union_set(int u, int v)
|
||||
{
|
||||
parent[u] = parent[v];
|
||||
}
|
||||
void Graph::kruskal_algorithm()
|
||||
{
|
||||
// sort the edges ordered on decreasing weight
|
||||
stable_sort(G.begin(), G.end(), [](const auto& left, const auto& right) {return left.first > right.first;});
|
||||
for (int i = 0; i < G.size(); i++) {
|
||||
int uSt, vEd;
|
||||
uSt = find_set(G[i].second.first);
|
||||
vEd = find_set(G[i].second.second);
|
||||
if (uSt != vEd) {
|
||||
T.push_back(G[i]); // add to mst std::vector
|
||||
union_set(uSt, vEd);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MST::insertElement(std::list<int>& variables, int variable)
|
||||
{
|
||||
if (std::find(variables.begin(), variables.end(), variable) == variables.end()) {
|
||||
variables.push_front(variable);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::pair<int, int>> MST::reorder(std::vector<std::pair<float, std::pair<int, int>>> T, int root_original)
|
||||
{
|
||||
// Create the edges of a DAG from the MST
|
||||
// replacing unordered_set with list because unordered_set cannot guarantee the order of the elements inserted
|
||||
auto result = std::vector<std::pair<int, int>>();
|
||||
auto visited = std::vector<int>();
|
||||
auto nextVariables = std::list<int>();
|
||||
nextVariables.push_front(root_original);
|
||||
while (nextVariables.size() > 0) {
|
||||
int root = nextVariables.front();
|
||||
nextVariables.pop_front();
|
||||
for (int i = 0; i < T.size(); ++i) {
|
||||
auto [weight, edge] = T[i];
|
||||
auto [from, to] = edge;
|
||||
if (from == root || to == root) {
|
||||
visited.insert(visited.begin(), i);
|
||||
if (from == root) {
|
||||
result.push_back({ from, to });
|
||||
insertElement(nextVariables, to);
|
||||
} else {
|
||||
result.push_back({ to, from });
|
||||
insertElement(nextVariables, from);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Remove visited
|
||||
for (int i = 0; i < visited.size(); ++i) {
|
||||
T.erase(T.begin() + visited[i]);
|
||||
}
|
||||
visited.clear();
|
||||
}
|
||||
if (T.size() > 0) {
|
||||
for (int i = 0; i < T.size(); ++i) {
|
||||
auto [weight, edge] = T[i];
|
||||
auto [from, to] = edge;
|
||||
result.push_back({ from, to });
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
MST::MST(const std::vector<std::string>& features, const torch::Tensor& weights, const int root) : features(features), weights(weights), root(root) {}
|
||||
std::vector<std::pair<int, int>> MST::maximumSpanningTree()
|
||||
{
|
||||
auto num_features = features.size();
|
||||
Graph g(num_features);
|
||||
// Make a complete graph
|
||||
for (int i = 0; i < num_features - 1; ++i) {
|
||||
for (int j = i + 1; j < num_features; ++j) {
|
||||
g.addEdge(i, j, weights[i][j].item<float>());
|
||||
}
|
||||
}
|
||||
g.kruskal_algorithm();
|
||||
auto mst = g.get_mst();
|
||||
return reorder(mst, root);
|
||||
}
|
||||
|
||||
}
|
40
bayesnet/utils/Mst.h
Normal file
40
bayesnet/utils/Mst.h
Normal file
@@ -0,0 +1,40 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef MST_H
|
||||
#define MST_H
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <torch/torch.h>
|
||||
namespace bayesnet {
|
||||
class MST {
|
||||
public:
|
||||
MST() = default;
|
||||
MST(const std::vector<std::string>& features, const torch::Tensor& weights, const int root);
|
||||
void insertElement(std::list<int>& variables, int variable);
|
||||
std::vector<std::pair<int, int>> reorder(std::vector<std::pair<float, std::pair<int, int>>> T, int root_original);
|
||||
std::vector<std::pair<int, int>> maximumSpanningTree();
|
||||
private:
|
||||
torch::Tensor weights;
|
||||
std::vector<std::string> features;
|
||||
int root = 0;
|
||||
};
|
||||
class Graph {
|
||||
public:
|
||||
explicit Graph(int V);
|
||||
void addEdge(int u, int v, float wt);
|
||||
int find_set(int i);
|
||||
void union_set(int u, int v);
|
||||
void kruskal_algorithm();
|
||||
std::vector <std::pair<float, std::pair<int, int>>> get_mst() { return T; }
|
||||
private:
|
||||
int V; // number of nodes in graph
|
||||
std::vector <std::pair<float, std::pair<int, int>>> G; // std::vector for graph
|
||||
std::vector <std::pair<float, std::pair<int, int>>> T; // std::vector for mst
|
||||
std::vector<int> parent;
|
||||
};
|
||||
}
|
||||
#endif
|
51
bayesnet/utils/TensorUtils.h
Normal file
51
bayesnet/utils/TensorUtils.h
Normal file
@@ -0,0 +1,51 @@
|
||||
#ifndef TENSORUTILS_H
|
||||
#define TENSORUTILS_H
|
||||
#include <torch/torch.h>
|
||||
#include <vector>
|
||||
namespace bayesnet {
|
||||
class TensorUtils {
|
||||
public:
|
||||
static std::vector<std::vector<int>> to_matrix(const torch::Tensor& X)
|
||||
{
|
||||
// Ensure tensor is contiguous in memory
|
||||
auto X_contig = X.contiguous();
|
||||
|
||||
// Access tensor data pointer directly
|
||||
auto data_ptr = X_contig.data_ptr<int>();
|
||||
|
||||
// IF you are using int64_t as the data type, use the following line
|
||||
//auto data_ptr = X_contig.data_ptr<int64_t>();
|
||||
//std::vector<std::vector<int64_t>> data(X.size(0), std::vector<int64_t>(X.size(1)));
|
||||
|
||||
// Prepare output container
|
||||
std::vector<std::vector<int>> data(X.size(0), std::vector<int>(X.size(1)));
|
||||
|
||||
// Fill the 2D vector in a single loop using pointer arithmetic
|
||||
int rows = X.size(0);
|
||||
int cols = X.size(1);
|
||||
for (int i = 0; i < rows; ++i) {
|
||||
std::copy(data_ptr + i * cols, data_ptr + (i + 1) * cols, data[i].begin());
|
||||
}
|
||||
return data;
|
||||
}
|
||||
template <typename T>
|
||||
static std::vector<T> to_vector(const torch::Tensor& y)
|
||||
{
|
||||
// Ensure the tensor is contiguous in memory
|
||||
auto y_contig = y.contiguous();
|
||||
|
||||
// Access data pointer
|
||||
auto data_ptr = y_contig.data_ptr<T>();
|
||||
|
||||
// Prepare output container
|
||||
std::vector<T> data(y.size(0));
|
||||
|
||||
// Copy data efficiently
|
||||
std::copy(data_ptr, data_ptr + y.size(0), data.begin());
|
||||
|
||||
return data;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#endif // TENSORUTILS_H
|
44
bayesnet/utils/bayesnetUtils.cc
Normal file
44
bayesnet/utils/bayesnetUtils.cc
Normal file
@@ -0,0 +1,44 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
|
||||
#include "bayesnetUtils.h"
|
||||
namespace bayesnet {
|
||||
// Return the indices in descending order
|
||||
std::vector<int> argsort(std::vector<double>& nums)
|
||||
{
|
||||
int n = nums.size();
|
||||
std::vector<int> indices(n);
|
||||
iota(indices.begin(), indices.end(), 0);
|
||||
sort(indices.begin(), indices.end(), [&nums](int i, int j) {return nums[i] > nums[j];});
|
||||
return indices;
|
||||
}
|
||||
std::vector<std::vector<double>> tensorToVectorDouble(torch::Tensor& dtensor)
|
||||
{
|
||||
// convert mxn tensor to mxn std::vector
|
||||
std::vector<std::vector<double>> result;
|
||||
// Iterate over cols
|
||||
for (int i = 0; i < dtensor.size(0); ++i) {
|
||||
auto col_tensor = dtensor.index({ i, "..." });
|
||||
auto col = std::vector<double>(col_tensor.data_ptr<float>(), col_tensor.data_ptr<float>() + dtensor.size(1));
|
||||
result.push_back(col);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
torch::Tensor vectorToTensor(std::vector<std::vector<int>>& vector, bool transpose)
|
||||
{
|
||||
// convert nxm std::vector to mxn tensor if transpose
|
||||
long int m = transpose ? vector[0].size() : vector.size();
|
||||
long int n = transpose ? vector.size() : vector[0].size();
|
||||
auto tensor = torch::zeros({ m, n }, torch::kInt32);
|
||||
for (int i = 0; i < m; ++i) {
|
||||
for (int j = 0; j < n; ++j) {
|
||||
tensor[i][j] = transpose ? vector[j][i] : vector[i][j];
|
||||
}
|
||||
}
|
||||
return tensor;
|
||||
}
|
||||
}
|
16
bayesnet/utils/bayesnetUtils.h
Normal file
16
bayesnet/utils/bayesnetUtils.h
Normal file
@@ -0,0 +1,16 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef BAYESNET_UTILS_H
|
||||
#define BAYESNET_UTILS_H
|
||||
#include <vector>
|
||||
#include <torch/torch.h>
|
||||
namespace bayesnet {
|
||||
std::vector<int> argsort(std::vector<double>& nums);
|
||||
std::vector<std::vector<double>> tensorToVectorDouble(torch::Tensor& dtensor);
|
||||
torch::Tensor vectorToTensor(std::vector<std::vector<int>>& vector, bool transpose = true);
|
||||
}
|
||||
#endif //BAYESNET_UTILS_H
|
Reference in New Issue
Block a user