Implement the proba branch and begin with the voting one
This commit is contained in:
@@ -1,194 +0,0 @@
|
||||
#include "Ensemble.h"
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
Ensemble::Ensemble(bool predict_voting) : Classifier(Network()), n_models(0), predict_voting(predict_voting)
|
||||
{
|
||||
|
||||
};
|
||||
const std::string ENSEMBLE_NOT_FITTED = "Ensemble has not been fitted";
|
||||
void Ensemble::trainModel(const torch::Tensor& weights)
|
||||
{
|
||||
n_models = models.size();
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
// fit with std::vectors
|
||||
models[i]->fit(dataset, features, className, states);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<int> Ensemble::voting(torch::Tensor& y_pred)
|
||||
{
|
||||
auto y_pred_ = y_pred.accessor<int, 2>();
|
||||
std::vector<int> y_pred_final;
|
||||
int numClasses = states.at(className).size();
|
||||
// y_pred is m x n_models with the prediction of every model for each sample
|
||||
for (int i = 0; i < y_pred.size(0); ++i) {
|
||||
// votes store in each index (value of class) the significance added by each model
|
||||
// i.e. votes[0] contains how much value has the value 0 of class. That value is generated by the models predictions
|
||||
std::vector<double> votes(numClasses, 0.0);
|
||||
for (int j = 0; j < n_models; ++j) {
|
||||
votes[y_pred_[i][j]] += significanceModels.at(j);
|
||||
}
|
||||
// argsort in descending order
|
||||
auto indices = argsort(votes);
|
||||
y_pred_final.push_back(indices[0]);
|
||||
}
|
||||
return y_pred_final;
|
||||
}
|
||||
std::vector<int> Ensemble::predict(std::vector<std::vector<int>>& X)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(ENSEMBLE_NOT_FITTED);
|
||||
}
|
||||
return do_predict_voting(X);
|
||||
|
||||
}
|
||||
torch::Tensor Ensemble::predict(torch::Tensor& X)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(ENSEMBLE_NOT_FITTED);
|
||||
}
|
||||
return do_predict_voting(X);
|
||||
}
|
||||
torch::Tensor Ensemble::predict_proba(torch::Tensor& X)
|
||||
{
|
||||
auto n_states = getClassNumStates();
|
||||
torch::Tensor y_pred = torch::zeros({ X.size(1), n_states }, torch::kFloat32);
|
||||
auto threads{ std::vector<std::thread>() };
|
||||
std::mutex mtx;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
threads.push_back(std::thread([&, i]() {
|
||||
auto ypredict = models[i]->predict_proba(X);
|
||||
ypredict *= significanceModels[i];
|
||||
std::lock_guard<std::mutex> lock(mtx);
|
||||
y_pred.index_put_({ "...", i }, ypredict);
|
||||
}));
|
||||
}
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
auto sum = std::reduce(significanceModels.begin(), significanceModels.end());
|
||||
y_pred /= sum;
|
||||
return y_pred;
|
||||
}
|
||||
std::vector<std::vector<double>> Ensemble::predict_proba(std::vector<std::vector<int>>& X)
|
||||
{
|
||||
|
||||
// long m_ = X[0].size();
|
||||
// long n_ = X.size();
|
||||
// vector<vector<int>> Xd(n_, vector<int>(m_, 0));
|
||||
// for (auto i = 0; i < n_; i++) {
|
||||
// Xd[i] = vector<int>(X[i].begin(), X[i].end());
|
||||
// }
|
||||
// torch::Tensor y_pred = torch::zeros({ m_, n_models }, torch::kInt32);
|
||||
// for (auto i = 0; i < n_models; ++i) {
|
||||
// y_pred.index_put_({ "...", i }, torch::tensor(models[i]->predict(Xd), torch::kInt32));
|
||||
// }
|
||||
// return voting(y_pred);
|
||||
return std::vector<std::vector<double>>();
|
||||
}
|
||||
torch::Tensor Ensemble::do_predict_voting(torch::Tensor& X)
|
||||
{
|
||||
torch::Tensor y_pred = torch::zeros({ X.size(1), n_models }, torch::kInt32);
|
||||
auto threads{ std::vector<std::thread>() };
|
||||
std::mutex mtx;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
threads.push_back(std::thread([&, i]() {
|
||||
auto ypredict = models[i]->predict(X);
|
||||
std::lock_guard<std::mutex> lock(mtx);
|
||||
y_pred.index_put_({ "...", i }, ypredict);
|
||||
}));
|
||||
}
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
return torch::tensor(voting(y_pred));
|
||||
}
|
||||
std::vector<int> Ensemble::do_predict_voting(std::vector<std::vector<int>>& X)
|
||||
{
|
||||
long m_ = X[0].size();
|
||||
long n_ = X.size();
|
||||
std::vector<std::vector<int>> Xd(n_, std::vector<int>(m_, 0));
|
||||
for (auto i = 0; i < n_; i++) {
|
||||
Xd[i] = std::vector<int>(X[i].begin(), X[i].end());
|
||||
}
|
||||
torch::Tensor y_pred = torch::zeros({ m_, n_models }, torch::kInt32);
|
||||
auto threads{ std::vector<std::thread>() };
|
||||
std::mutex mtx;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
threads.push_back(std::thread([&, i]() {
|
||||
auto ypredict = models[i]->predict(Xd);
|
||||
std::lock_guard<std::mutex> lock(mtx);
|
||||
y_pred.index_put_({ "...", i }, torch::tensor(ypredict, torch::kInt32));
|
||||
}));
|
||||
}
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
return voting(y_pred);
|
||||
}
|
||||
float Ensemble::score(torch::Tensor& X, torch::Tensor& y)
|
||||
{
|
||||
auto y_pred = do_predict_voting(X);
|
||||
int correct = 0;
|
||||
for (int i = 0; i < y_pred.size(0); ++i) {
|
||||
if (y_pred[i].item<int>() == y[i].item<int>()) {
|
||||
correct++;
|
||||
}
|
||||
}
|
||||
return (double)correct / y_pred.size(0);
|
||||
}
|
||||
float Ensemble::score(std::vector<std::vector<int>>& X, std::vector<int>& y)
|
||||
{
|
||||
auto y_pred = do_predict_voting(X);
|
||||
int correct = 0;
|
||||
for (int i = 0; i < y_pred.size(); ++i) {
|
||||
if (y_pred[i] == y[i]) {
|
||||
correct++;
|
||||
}
|
||||
}
|
||||
return (double)correct / y_pred.size();
|
||||
}
|
||||
std::vector<std::string> Ensemble::show() const
|
||||
{
|
||||
auto result = std::vector<std::string>();
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
auto res = models[i]->show();
|
||||
result.insert(result.end(), res.begin(), res.end());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
std::vector<std::string> Ensemble::graph(const std::string& title) const
|
||||
{
|
||||
auto result = std::vector<std::string>();
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
auto res = models[i]->graph(title + "_" + std::to_string(i));
|
||||
result.insert(result.end(), res.begin(), res.end());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
int Ensemble::getNumberOfNodes() const
|
||||
{
|
||||
int nodes = 0;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
nodes += models[i]->getNumberOfNodes();
|
||||
}
|
||||
return nodes;
|
||||
}
|
||||
int Ensemble::getNumberOfEdges() const
|
||||
{
|
||||
int edges = 0;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
edges += models[i]->getNumberOfEdges();
|
||||
}
|
||||
return edges;
|
||||
}
|
||||
int Ensemble::getNumberOfStates() const
|
||||
{
|
||||
int nstates = 0;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
nstates += models[i]->getNumberOfStates();
|
||||
}
|
||||
return nstates;
|
||||
}
|
||||
}
|
@@ -1,25 +0,0 @@
|
||||
|
||||
#include "bayesnetUtils.h"
|
||||
namespace bayesnet {
|
||||
// Return the indices in descending order
|
||||
std::vector<int> argsort(std::vector<double>& nums)
|
||||
{
|
||||
int n = nums.size();
|
||||
std::vector<int> indices(n);
|
||||
iota(indices.begin(), indices.end(), 0);
|
||||
sort(indices.begin(), indices.end(), [&nums](int i, int j) {return nums[i] > nums[j];});
|
||||
return indices;
|
||||
}
|
||||
std::vector<std::vector<int>> tensorToVector(torch::Tensor& tensor)
|
||||
{
|
||||
// convert mxn tensor to nxm std::vector
|
||||
std::vector<std::vector<int>> result;
|
||||
// Iterate over cols
|
||||
for (int i = 0; i < tensor.size(1); ++i) {
|
||||
auto col_tensor = tensor.index({ "...", i });
|
||||
auto col = std::vector<int>(col_tensor.data_ptr<int>(), col_tensor.data_ptr<int>() + tensor.size(0));
|
||||
result.push_back(col);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
@@ -3,7 +3,7 @@ include_directories(
|
||||
${BayesNet_SOURCE_DIR}/lib/Files
|
||||
${BayesNet_SOURCE_DIR}/lib/folding
|
||||
${BayesNet_SOURCE_DIR}/lib/json/include
|
||||
${BayesNet_SOURCE_DIR}/src/BayesNet
|
||||
${BayesNet_SOURCE_DIR}/src
|
||||
${CMAKE_BINARY_DIR}/configured_files/include
|
||||
)
|
||||
|
@@ -121,6 +121,7 @@ namespace bayesnet {
|
||||
auto m_ = X[0].size();
|
||||
auto n_ = X.size();
|
||||
std::vector<std::vector<int>> Xd(n_, std::vector<int>(m_, 0));
|
||||
// Convert to nxm vector
|
||||
for (auto i = 0; i < n_; i++) {
|
||||
Xd[i] = std::vector<int>(X[i].begin(), X[i].end());
|
||||
}
|
||||
@@ -129,9 +130,6 @@ namespace bayesnet {
|
||||
}
|
||||
float Classifier::score(torch::Tensor& X, torch::Tensor& y)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(CLASSIFIER_NOT_FITTED);
|
||||
}
|
||||
torch::Tensor y_pred = predict(X);
|
||||
return (y_pred == y).sum().item<float>() / y.size(0);
|
||||
}
|
@@ -34,7 +34,7 @@ namespace bayesnet {
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters) override; //For classifiers that don't have hyperparameters
|
||||
protected:
|
||||
bool fitted;
|
||||
int m, n; // m: number of samples, n: number of features
|
||||
unsigned int m, n; // m: number of samples, n: number of features
|
||||
Network model;
|
||||
Metrics metrics;
|
||||
std::vector<std::string> features;
|
251
src/Ensemble.cc
Normal file
251
src/Ensemble.cc
Normal file
@@ -0,0 +1,251 @@
|
||||
#include "Ensemble.h"
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
Ensemble::Ensemble(bool predict_voting) : Classifier(Network()), n_models(0), predict_voting(predict_voting)
|
||||
{
|
||||
|
||||
};
|
||||
const std::string ENSEMBLE_NOT_FITTED = "Ensemble has not been fitted";
|
||||
void Ensemble::trainModel(const torch::Tensor& weights)
|
||||
{
|
||||
n_models = models.size();
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
// fit with std::vectors
|
||||
models[i]->fit(dataset, features, className, states);
|
||||
}
|
||||
}
|
||||
std::vector<int> Ensemble::compute_arg_max(std::vector<std::vector<double>>& X)
|
||||
{
|
||||
std::vector<int> y_pred;
|
||||
for (auto i = 0; i < X.size(); ++i) {
|
||||
auto max = std::max_element(X[i].begin(), X[i].end());
|
||||
y_pred.push_back(std::distance(X[i].begin(), max));
|
||||
}
|
||||
return y_pred;
|
||||
}
|
||||
torch::Tensor Ensemble::compute_arg_max(torch::Tensor& X)
|
||||
{
|
||||
auto y_pred = torch::argmax(X, 1);
|
||||
return y_pred;
|
||||
}
|
||||
torch::Tensor Ensemble::voting(torch::Tensor& votes)
|
||||
{
|
||||
// Convert m x n_models tensor to a m x n_class_states with voting probabilities
|
||||
auto y_pred_ = votes.accessor<int, 2>();
|
||||
std::vector<int> y_pred_final;
|
||||
int numClasses = states.at(className).size();
|
||||
// votes is m x n_models with the prediction of every model for each sample
|
||||
auto result = torch::zeros({ votes.size(0), numClasses }, torch::kFloat32);
|
||||
auto sum = std::reduce(significanceModels.begin(), significanceModels.end());
|
||||
for (int i = 0; i < votes.size(0); ++i) {
|
||||
// n_votes store in each index (value of class) the significance added by each model
|
||||
// i.e. n_votes[0] contains how much value has the value 0 of class. That value is generated by the models predictions
|
||||
std::vector<double> n_votes(numClasses, 0.0);
|
||||
for (int j = 0; j < n_models; ++j) {
|
||||
n_votes[y_pred_[i][j]] += significanceModels.at(j);
|
||||
}
|
||||
result[i] = torch::tensor(n_votes);
|
||||
}
|
||||
// To only do one division and gain precision
|
||||
result /= sum;
|
||||
return result;
|
||||
}
|
||||
std::vector<std::vector<double>> Ensemble::voting(std::vector<std::vector<int>>& votes)
|
||||
{
|
||||
// Convert n_models x m matrix to a m x n_class_states matrix
|
||||
std::vector<std::vector<double>> y_pred_final;
|
||||
int numClasses = states.at(className).size();
|
||||
auto sum = std::reduce(significanceModels.begin(), significanceModels.end());
|
||||
// y_pred is m x n_models with the prediction of every model for each sample
|
||||
std::cout << std::string(80, '*') << std::endl;
|
||||
for (int i = 0; i < votes.size(); ++i) {
|
||||
// n_votes store in each index (value of class) the significance added by each model
|
||||
// i.e. n_votes[0] contains how much value has the value 0 of class. That value is generated by the models predictions
|
||||
std::vector<double> n_votes(numClasses, 0.0);
|
||||
for (int j = 0; j < n_models; ++j) {
|
||||
n_votes[votes[i][j]] += significanceModels.at(j);
|
||||
}
|
||||
for (auto& x : n_votes) {
|
||||
std::cout << x << " ";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
// To only do one division per result and gain precision
|
||||
std::transform(n_votes.begin(), n_votes.end(), n_votes.begin(), [sum](double x) { return x / sum; });
|
||||
y_pred_final.push_back(n_votes);
|
||||
}
|
||||
std::cout << std::string(80, '*') << std::endl;
|
||||
return y_pred_final;
|
||||
}
|
||||
std::vector<std::vector<double>> Ensemble::predict_proba(std::vector<std::vector<int>>& X)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(ENSEMBLE_NOT_FITTED);
|
||||
}
|
||||
return predict_voting ? predict_average_voting(X) : predict_average_proba(X);
|
||||
}
|
||||
torch::Tensor Ensemble::predict_proba(torch::Tensor& X)
|
||||
{
|
||||
if (!fitted) {
|
||||
throw std::logic_error(ENSEMBLE_NOT_FITTED);
|
||||
}
|
||||
return predict_voting ? predict_average_voting(X) : predict_average_proba(X);
|
||||
}
|
||||
std::vector<int> Ensemble::predict(std::vector<std::vector<int>>& X)
|
||||
{
|
||||
auto res = predict_proba(X);
|
||||
std::cout << "res: " << res.size() << ", " << res[0].size() << std::endl;
|
||||
return compute_arg_max(res);
|
||||
}
|
||||
torch::Tensor Ensemble::predict(torch::Tensor& X)
|
||||
{
|
||||
auto res = predict_proba(X);
|
||||
return compute_arg_max(res);
|
||||
}
|
||||
torch::Tensor Ensemble::predict_average_proba(torch::Tensor& X)
|
||||
{
|
||||
auto n_states = models[0]->getClassNumStates();
|
||||
torch::Tensor y_pred = torch::zeros({ X.size(1), n_states }, torch::kFloat32);
|
||||
auto threads{ std::vector<std::thread>() };
|
||||
std::mutex mtx;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
threads.push_back(std::thread([&, i]() {
|
||||
auto ypredict = models[i]->predict_proba(X);
|
||||
std::lock_guard<std::mutex> lock(mtx);
|
||||
y_pred += ypredict * significanceModels[i];
|
||||
}));
|
||||
}
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
auto sum = std::reduce(significanceModels.begin(), significanceModels.end());
|
||||
y_pred /= sum;
|
||||
return y_pred;
|
||||
}
|
||||
std::vector<std::vector<double>> Ensemble::predict_average_proba(std::vector<std::vector<int>>& X)
|
||||
{
|
||||
auto n_states = models[0]->getClassNumStates();
|
||||
std::vector<std::vector<double>> y_pred(X[0].size(), std::vector<double>(n_states, 0.0));
|
||||
auto threads{ std::vector<std::thread>() };
|
||||
std::mutex mtx;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
threads.push_back(std::thread([&, i]() {
|
||||
auto ypredict = models[i]->predict_proba(X);
|
||||
assert(ypredict.size() == y_pred.size());
|
||||
assert(ypredict[0].size() == y_pred[0].size());
|
||||
std::lock_guard<std::mutex> lock(mtx);
|
||||
// Multiply each prediction by the significance of the model and then add it to the final prediction
|
||||
for (auto j = 0; j < ypredict.size(); ++j) {
|
||||
std::transform(y_pred[j].begin(), y_pred[j].end(), ypredict[j].begin(), y_pred[j].begin(),
|
||||
[significanceModels = significanceModels[i]](double x, double y) { return x + y * significanceModels; });
|
||||
}
|
||||
}));
|
||||
}
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
auto sum = std::reduce(significanceModels.begin(), significanceModels.end());
|
||||
//Divide each element of the prediction by the sum of the significances
|
||||
for (auto j = 0; j < y_pred.size(); ++j) {
|
||||
std::transform(y_pred[j].begin(), y_pred[j].end(), y_pred[j].begin(), [sum](double x) { return x / sum; });
|
||||
}
|
||||
return y_pred;
|
||||
}
|
||||
torch::Tensor Ensemble::predict_average_voting(torch::Tensor& X)
|
||||
{
|
||||
// Build a m x n_models tensor with the predictions of each model
|
||||
torch::Tensor y_pred = torch::zeros({ X.size(1), n_models }, torch::kInt32);
|
||||
auto threads{ std::vector<std::thread>() };
|
||||
std::mutex mtx;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
threads.push_back(std::thread([&, i]() {
|
||||
auto ypredict = models[i]->predict(X);
|
||||
std::lock_guard<std::mutex> lock(mtx);
|
||||
y_pred.index_put_({ "...", i }, ypredict);
|
||||
}));
|
||||
}
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
return voting(y_pred);
|
||||
}
|
||||
std::vector<std::vector<double>> Ensemble::predict_average_voting(std::vector<std::vector<int>>& X)
|
||||
{
|
||||
auto Xt = vectorToTensor(X);
|
||||
auto y_pred = predict_average_voting(Xt);
|
||||
auto res = voting(y_pred);
|
||||
std::vector<std::vector<double>> result;
|
||||
// Iterate over cols
|
||||
for (int i = 0; i < res.size(1); ++i) {
|
||||
auto col_tensor = res.index({ "...", i });
|
||||
auto col = std::vector<double>(col_tensor.data_ptr<double>(), col_tensor.data_ptr<double>() + res.size(0));
|
||||
result.push_back(col);
|
||||
}
|
||||
return result;
|
||||
//return tensorToVector<double>(res);
|
||||
}
|
||||
float Ensemble::score(torch::Tensor& X, torch::Tensor& y)
|
||||
{
|
||||
auto y_pred = predict(X);
|
||||
int correct = 0;
|
||||
for (int i = 0; i < y_pred.size(0); ++i) {
|
||||
if (y_pred[i].item<int>() == y[i].item<int>()) {
|
||||
correct++;
|
||||
}
|
||||
}
|
||||
return (double)correct / y_pred.size(0);
|
||||
}
|
||||
float Ensemble::score(std::vector<std::vector<int>>& X, std::vector<int>& y)
|
||||
{
|
||||
auto y_pred = predict(X);
|
||||
int correct = 0;
|
||||
for (int i = 0; i < y_pred.size(); ++i) {
|
||||
if (y_pred[i] == y[i]) {
|
||||
correct++;
|
||||
}
|
||||
}
|
||||
return (double)correct / y_pred.size();
|
||||
}
|
||||
std::vector<std::string> Ensemble::show() const
|
||||
{
|
||||
auto result = std::vector<std::string>();
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
auto res = models[i]->show();
|
||||
result.insert(result.end(), res.begin(), res.end());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
std::vector<std::string> Ensemble::graph(const std::string& title) const
|
||||
{
|
||||
auto result = std::vector<std::string>();
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
auto res = models[i]->graph(title + "_" + std::to_string(i));
|
||||
result.insert(result.end(), res.begin(), res.end());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
int Ensemble::getNumberOfNodes() const
|
||||
{
|
||||
int nodes = 0;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
nodes += models[i]->getNumberOfNodes();
|
||||
}
|
||||
return nodes;
|
||||
}
|
||||
int Ensemble::getNumberOfEdges() const
|
||||
{
|
||||
int edges = 0;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
edges += models[i]->getNumberOfEdges();
|
||||
}
|
||||
return edges;
|
||||
}
|
||||
int Ensemble::getNumberOfStates() const
|
||||
{
|
||||
int nstates = 0;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
nstates += models[i]->getNumberOfStates();
|
||||
}
|
||||
return nstates;
|
||||
}
|
||||
}
|
@@ -14,8 +14,6 @@ namespace bayesnet {
|
||||
std::vector<int> predict(std::vector<std::vector<int>>& X) override;
|
||||
torch::Tensor predict_proba(torch::Tensor& X) override;
|
||||
std::vector<std::vector<double>> predict_proba(std::vector<std::vector<int>>& X) override;
|
||||
torch::Tensor do_predict_voting(torch::Tensor& X);
|
||||
std::vector<int> do_predict_voting(std::vector<std::vector<int>>& X);
|
||||
float score(torch::Tensor& X, torch::Tensor& y) override;
|
||||
float score(std::vector<std::vector<int>>& X, std::vector<int>& y) override;
|
||||
int getNumberOfNodes() const override;
|
||||
@@ -31,11 +29,18 @@ namespace bayesnet {
|
||||
{
|
||||
}
|
||||
protected:
|
||||
torch::Tensor predict_average_voting(torch::Tensor& X);
|
||||
std::vector<std::vector<double>> predict_average_voting(std::vector<std::vector<int>>& X);
|
||||
torch::Tensor predict_average_proba(torch::Tensor& X);
|
||||
std::vector<std::vector<double>> predict_average_proba(std::vector<std::vector<int>>& X);
|
||||
torch::Tensor compute_arg_max(torch::Tensor& X);
|
||||
std::vector<int> compute_arg_max(std::vector<std::vector<double>>& X);
|
||||
torch::Tensor voting(torch::Tensor& votes);
|
||||
std::vector<std::vector<double>> voting(std::vector<std::vector<int>>& votes);
|
||||
unsigned n_models;
|
||||
std::vector<std::unique_ptr<Classifier>> models;
|
||||
std::vector<double> significanceModels;
|
||||
void trainModel(const torch::Tensor& weights) override;
|
||||
std::vector<int> voting(torch::Tensor& y_pred);
|
||||
bool predict_voting;
|
||||
};
|
||||
}
|
@@ -238,6 +238,7 @@ namespace bayesnet {
|
||||
return predictions;
|
||||
}
|
||||
// Return mxn std::vector of probabilities
|
||||
// tsamples is nxm std::vector of samples
|
||||
std::vector<std::vector<double>> Network::predict_proba(const std::vector<std::vector<int>>& tsamples)
|
||||
{
|
||||
if (!fitted) {
|
39
src/bayesnetUtils.cc
Normal file
39
src/bayesnetUtils.cc
Normal file
@@ -0,0 +1,39 @@
|
||||
|
||||
#include "bayesnetUtils.h"
|
||||
namespace bayesnet {
|
||||
// Return the indices in descending order
|
||||
std::vector<int> argsort(std::vector<double>& nums)
|
||||
{
|
||||
int n = nums.size();
|
||||
std::vector<int> indices(n);
|
||||
iota(indices.begin(), indices.end(), 0);
|
||||
sort(indices.begin(), indices.end(), [&nums](int i, int j) {return nums[i] > nums[j];});
|
||||
return indices;
|
||||
}
|
||||
template<typename T>
|
||||
std::vector<std::vector<T>> tensorToVector(torch::Tensor& dtensor)
|
||||
{
|
||||
// convert mxn tensor to nxm std::vector
|
||||
std::vector<std::vector<T>> result;
|
||||
// Iterate over cols
|
||||
for (int i = 0; i < dtensor.size(1); ++i) {
|
||||
auto col_tensor = dtensor.index({ "...", i });
|
||||
auto col = std::vector<T>(col_tensor.data_ptr<T>(), col_tensor.data_ptr<T>() + dtensor.size(0));
|
||||
result.push_back(col);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
torch::Tensor vectorToTensor(std::vector<std::vector<int>>& vector)
|
||||
{
|
||||
// convert nxm std::vector to mxn tensor
|
||||
long int m = vector[0].size();
|
||||
long int n = vector.size();
|
||||
auto tensor = torch::zeros({ m, n }, torch::kInt32);
|
||||
for (int i = 0; i < m; ++i) {
|
||||
for (int j = 0; j < n; ++j) {
|
||||
tensor[i][j] = vector[j][i];
|
||||
}
|
||||
}
|
||||
return tensor;
|
||||
}
|
||||
}
|
@@ -4,6 +4,8 @@
|
||||
#include <vector>
|
||||
namespace bayesnet {
|
||||
std::vector<int> argsort(std::vector<double>& nums);
|
||||
std::vector<std::vector<int>> tensorToVector(torch::Tensor& tensor);
|
||||
template<typename T>
|
||||
std::vector<std::vector<T>> tensorToVector(torch::Tensor& dtensor);
|
||||
torch::Tensor vectorToTensor(std::vector<std::vector<int>>& vector);
|
||||
}
|
||||
#endif //BAYESNET_UTILS_H
|
Reference in New Issue
Block a user