Files
BayesNet/html/bayesnet/ensembles/Ensemble.cc.gcov.html

33 KiB

<html lang="en"> <head> </head>
LCOV - code coverage report
Current view: top level - bayesnet/ensembles - Ensemble.cc (source / functions) Coverage Total Hit
Test: coverage.info Lines: 98.1 % 154 151
Test Date: 2024-04-30 13:59:18 Functions: 100.0 % 25 25

            Line data    Source code
       1              : // ***************************************************************
       2              : // SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
       3              : // SPDX-FileType: SOURCE
       4              : // SPDX-License-Identifier: MIT
       5              : // ***************************************************************
       6              : 
       7              : #include "Ensemble.h"
       8              : 
       9              : namespace bayesnet {
      10              : 
      11          468 :     Ensemble::Ensemble(bool predict_voting) : Classifier(Network()), n_models(0), predict_voting(predict_voting)
      12              :     {
      13              : 
      14          468 :     };
      15              :     const std::string ENSEMBLE_NOT_FITTED = "Ensemble has not been fitted";
      16           36 :     void Ensemble::trainModel(const torch::Tensor& weights)
      17              :     {
      18           36 :         n_models = models.size();
      19          282 :         for (auto i = 0; i < n_models; ++i) {
      20              :             // fit with std::vectors
      21          246 :             models[i]->fit(dataset, features, className, states);
      22              :         }
      23           36 :     }
      24           66 :     std::vector<int> Ensemble::compute_arg_max(std::vector<std::vector<double>>& X)
      25              :     {
      26           66 :         std::vector<int> y_pred;
      27        14730 :         for (auto i = 0; i < X.size(); ++i) {
      28        14664 :             auto max = std::max_element(X[i].begin(), X[i].end());
      29        29328 :             y_pred.push_back(std::distance(X[i].begin(), max));
      30              :         }
      31           66 :         return y_pred;
      32            0 :     }
      33          636 :     torch::Tensor Ensemble::compute_arg_max(torch::Tensor& X)
      34              :     {
      35          636 :         auto y_pred = torch::argmax(X, 1);
      36          636 :         return y_pred;
      37              :     }
      38          240 :     torch::Tensor Ensemble::voting(torch::Tensor& votes)
      39              :     {
      40              :         // Convert m x n_models tensor to a m x n_class_states with voting probabilities
      41          240 :         auto y_pred_ = votes.accessor<int, 2>();
      42          240 :         std::vector<int> y_pred_final;
      43          240 :         int numClasses = states.at(className).size();
      44              :         // votes is m x n_models with the prediction of every model for each sample
      45          240 :         auto result = torch::zeros({ votes.size(0), numClasses }, torch::kFloat32);
      46          240 :         auto sum = std::reduce(significanceModels.begin(), significanceModels.end());
      47        61836 :         for (int i = 0; i < votes.size(0); ++i) {
      48              :             // n_votes store in each index (value of class) the significance added by each model
      49              :             // i.e. n_votes[0] contains how much value has the value 0 of class. That value is generated by the models predictions
      50        61596 :             std::vector<double> n_votes(numClasses, 0.0);
      51       515400 :             for (int j = 0; j < n_models; ++j) {
      52       453804 :                 n_votes[y_pred_[i][j]] += significanceModels.at(j);
      53              :             }
      54        61596 :             result[i] = torch::tensor(n_votes);
      55        61596 :         }
      56              :         // To only do one division and gain precision
      57          240 :         result /= sum;
      58          480 :         return result;
      59          240 :     }
      60          132 :     std::vector<std::vector<double>> Ensemble::predict_proba(std::vector<std::vector<int>>& X)
      61              :     {
      62          132 :         if (!fitted) {
      63           36 :             throw std::logic_error(ENSEMBLE_NOT_FITTED);
      64              :         }
      65           96 :         return predict_voting ? predict_average_voting(X) : predict_average_proba(X);
      66              :     }
      67          678 :     torch::Tensor Ensemble::predict_proba(torch::Tensor& X)
      68              :     {
      69          678 :         if (!fitted) {
      70           36 :             throw std::logic_error(ENSEMBLE_NOT_FITTED);
      71              :         }
      72          642 :         return predict_voting ? predict_average_voting(X) : predict_average_proba(X);
      73              :     }
      74           84 :     std::vector<int> Ensemble::predict(std::vector<std::vector<int>>& X)
      75              :     {
      76           84 :         auto res = predict_proba(X);
      77          120 :         return compute_arg_max(res);
      78           60 :     }
      79          654 :     torch::Tensor Ensemble::predict(torch::Tensor& X)
      80              :     {
      81          654 :         auto res = predict_proba(X);
      82         1260 :         return compute_arg_max(res);
      83          630 :     }
      84          444 :     torch::Tensor Ensemble::predict_average_proba(torch::Tensor& X)
      85              :     {
      86          444 :         auto n_states = models[0]->getClassNumStates();
      87          444 :         torch::Tensor y_pred = torch::zeros({ X.size(1), n_states }, torch::kFloat32);
      88          444 :         auto threads{ std::vector<std::thread>() };
      89          444 :         std::mutex mtx;
      90         2646 :         for (auto i = 0; i < n_models; ++i) {
      91         2202 :             threads.push_back(std::thread([&, i]() {
      92         2202 :                 auto ypredict = models[i]->predict_proba(X);
      93         2202 :                 std::lock_guard<std::mutex> lock(mtx);
      94         2202 :                 y_pred += ypredict * significanceModels[i];
      95         2202 :                 }));
      96              :         }
      97         2646 :         for (auto& thread : threads) {
      98         2202 :             thread.join();
      99              :         }
     100          444 :         auto sum = std::reduce(significanceModels.begin(), significanceModels.end());
     101          444 :         y_pred /= sum;
     102          888 :         return y_pred;
     103          444 :     }
     104           54 :     std::vector<std::vector<double>> Ensemble::predict_average_proba(std::vector<std::vector<int>>& X)
     105              :     {
     106           54 :         auto n_states = models[0]->getClassNumStates();
     107           54 :         std::vector<std::vector<double>> y_pred(X[0].size(), std::vector<double>(n_states, 0.0));
     108           54 :         auto threads{ std::vector<std::thread>() };
     109           54 :         std::mutex mtx;
     110          420 :         for (auto i = 0; i < n_models; ++i) {
     111          366 :             threads.push_back(std::thread([&, i]() {
     112          366 :                 auto ypredict = models[i]->predict_proba(X);
     113          366 :                 assert(ypredict.size() == y_pred.size());
     114          366 :                 assert(ypredict[0].size() == y_pred[0].size());
     115          366 :                 std::lock_guard<std::mutex> lock(mtx);
     116              :                 // Multiply each prediction by the significance of the model and then add it to the final prediction
     117        72546 :                 for (auto j = 0; j < ypredict.size(); ++j) {
     118        72180 :                     std::transform(y_pred[j].begin(), y_pred[j].end(), ypredict[j].begin(), y_pred[j].begin(),
     119       462060 :                         [significanceModels = significanceModels[i]](double x, double y) { return x + y * significanceModels; });
     120              :                 }
     121          366 :                 }));
     122              :         }
     123          420 :         for (auto& thread : threads) {
     124          366 :             thread.join();
     125              :         }
     126           54 :         auto sum = std::reduce(significanceModels.begin(), significanceModels.end());
     127              :         //Divide each element of the prediction by the sum of the significances
     128        10074 :         for (auto j = 0; j < y_pred.size(); ++j) {
     129        59340 :             std::transform(y_pred[j].begin(), y_pred[j].end(), y_pred[j].begin(), [sum](double x) { return x / sum; });
     130              :         }
     131          108 :         return y_pred;
     132           54 :     }
     133           42 :     std::vector<std::vector<double>> Ensemble::predict_average_voting(std::vector<std::vector<int>>& X)
     134              :     {
     135           42 :         torch::Tensor Xt = bayesnet::vectorToTensor(X, false);
     136           42 :         auto y_pred = predict_average_voting(Xt);
     137           42 :         std::vector<std::vector<double>> result = tensorToVectorDouble(y_pred);
     138           84 :         return result;
     139           42 :     }
     140          240 :     torch::Tensor Ensemble::predict_average_voting(torch::Tensor& X)
     141              :     {
     142              :         // Build a m x n_models tensor with the predictions of each model
     143          240 :         torch::Tensor y_pred = torch::zeros({ X.size(1), n_models }, torch::kInt32);
     144          240 :         auto threads{ std::vector<std::thread>() };
     145          240 :         std::mutex mtx;
     146         1848 :         for (auto i = 0; i < n_models; ++i) {
     147         1608 :             threads.push_back(std::thread([&, i]() {
     148         1608 :                 auto ypredict = models[i]->predict(X);
     149         1608 :                 std::lock_guard<std::mutex> lock(mtx);
     150         4824 :                 y_pred.index_put_({ "...", i }, ypredict);
     151         3216 :                 }));
     152              :         }
     153         1848 :         for (auto& thread : threads) {
     154         1608 :             thread.join();
     155              :         }
     156          480 :         return voting(y_pred);
     157          240 :     }
     158          120 :     float Ensemble::score(torch::Tensor& X, torch::Tensor& y)
     159              :     {
     160          120 :         auto y_pred = predict(X);
     161          108 :         int correct = 0;
     162        33876 :         for (int i = 0; i < y_pred.size(0); ++i) {
     163        33768 :             if (y_pred[i].item<int>() == y[i].item<int>()) {
     164        29502 :                 correct++;
     165              :             }
     166              :         }
     167          216 :         return (double)correct / y_pred.size(0);
     168          108 :     }
     169           60 :     float Ensemble::score(std::vector<std::vector<int>>& X, std::vector<int>& y)
     170              :     {
     171           60 :         auto y_pred = predict(X);
     172           48 :         int correct = 0;
     173        12876 :         for (int i = 0; i < y_pred.size(); ++i) {
     174        12828 :             if (y_pred[i] == y[i]) {
     175        10722 :                 correct++;
     176              :             }
     177              :         }
     178           96 :         return (double)correct / y_pred.size();
     179           48 :     }
     180            6 :     std::vector<std::string> Ensemble::show() const
     181              :     {
     182            6 :         auto result = std::vector<std::string>();
     183           30 :         for (auto i = 0; i < n_models; ++i) {
     184           24 :             auto res = models[i]->show();
     185           24 :             result.insert(result.end(), res.begin(), res.end());
     186           24 :         }
     187            6 :         return result;
     188            0 :     }
     189           18 :     std::vector<std::string> Ensemble::graph(const std::string& title) const
     190              :     {
     191           18 :         auto result = std::vector<std::string>();
     192          120 :         for (auto i = 0; i < n_models; ++i) {
     193          102 :             auto res = models[i]->graph(title + "_" + std::to_string(i));
     194          102 :             result.insert(result.end(), res.begin(), res.end());
     195          102 :         }
     196           18 :         return result;
     197            0 :     }
     198           36 :     int Ensemble::getNumberOfNodes() const
     199              :     {
     200           36 :         int nodes = 0;
     201          300 :         for (auto i = 0; i < n_models; ++i) {
     202          264 :             nodes += models[i]->getNumberOfNodes();
     203              :         }
     204           36 :         return nodes;
     205              :     }
     206           36 :     int Ensemble::getNumberOfEdges() const
     207              :     {
     208           36 :         int edges = 0;
     209          300 :         for (auto i = 0; i < n_models; ++i) {
     210          264 :             edges += models[i]->getNumberOfEdges();
     211              :         }
     212           36 :         return edges;
     213              :     }
     214            6 :     int Ensemble::getNumberOfStates() const
     215              :     {
     216            6 :         int nstates = 0;
     217           30 :         for (auto i = 0; i < n_models; ++i) {
     218           24 :             nstates += models[i]->getNumberOfStates();
     219              :         }
     220            6 :         return nstates;
     221              :     }
     222              : }
        

Generated by: LCOV version 2.0-1

</html>