Line data Source code
1 : // ***************************************************************
2 : // SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
3 : // SPDX-FileType: SOURCE
4 : // SPDX-License-Identifier: MIT
5 : // ***************************************************************
6 :
7 : #include <ArffFiles.h>
8 : #include "Proposal.h"
9 :
10 : namespace bayesnet {
11 212 : Proposal::Proposal(torch::Tensor& dataset_, std::vector<std::string>& features_, std::string& className_) : pDataset(dataset_), pFeatures(features_), pClassName(className_) {}
12 100 : Proposal::~Proposal()
13 : {
14 948 : for (auto& [key, value] : discretizers) {
15 848 : delete value;
16 : }
17 100 : }
18 114 : void Proposal::checkInput(const torch::Tensor& X, const torch::Tensor& y)
19 : {
20 114 : if (!torch::is_floating_point(X)) {
21 0 : throw std::invalid_argument("X must be a floating point tensor");
22 : }
23 114 : if (torch::is_floating_point(y)) {
24 0 : throw std::invalid_argument("y must be an integer tensor");
25 : }
26 114 : }
27 106 : map<std::string, std::vector<int>> Proposal::localDiscretizationProposal(const map<std::string, std::vector<int>>& oldStates, Network& model)
28 : {
29 : // order of local discretization is important. no good 0, 1, 2...
30 : // although we rediscretize features after the local discretization of every feature
31 106 : auto order = model.topological_sort();
32 106 : auto& nodes = model.getNodes();
33 106 : map<std::string, std::vector<int>> states = oldStates;
34 106 : std::vector<int> indicesToReDiscretize;
35 106 : bool upgrade = false; // Flag to check if we need to upgrade the model
36 888 : for (auto feature : order) {
37 782 : auto nodeParents = nodes[feature]->getParents();
38 782 : if (nodeParents.size() < 2) continue; // Only has class as parent
39 662 : upgrade = true;
40 662 : int index = find(pFeatures.begin(), pFeatures.end(), feature) - pFeatures.begin();
41 662 : indicesToReDiscretize.push_back(index); // We need to re-discretize this feature
42 662 : std::vector<std::string> parents;
43 2010 : transform(nodeParents.begin(), nodeParents.end(), back_inserter(parents), [](const auto& p) { return p->getName(); });
44 : // Remove class as parent as it will be added later
45 662 : parents.erase(remove(parents.begin(), parents.end(), pClassName), parents.end());
46 : // Get the indices of the parents
47 662 : std::vector<int> indices;
48 662 : indices.push_back(-1); // Add class index
49 1348 : transform(parents.begin(), parents.end(), back_inserter(indices), [&](const auto& p) {return find(pFeatures.begin(), pFeatures.end(), p) - pFeatures.begin(); });
50 : // Now we fit the discretizer of the feature, conditioned on its parents and the class i.e. discretizer.fit(X[index], X[indices] + y)
51 662 : std::vector<std::string> yJoinParents(Xf.size(1));
52 2010 : for (auto idx : indices) {
53 479320 : for (int i = 0; i < Xf.size(1); ++i) {
54 1433916 : yJoinParents[i] += to_string(pDataset.index({ idx, i }).item<int>());
55 : }
56 : }
57 662 : auto arff = ArffFiles();
58 662 : auto yxv = arff.factorize(yJoinParents);
59 1324 : auto xvf_ptr = Xf.index({ index }).data_ptr<float>();
60 662 : auto xvf = std::vector<mdlp::precision_t>(xvf_ptr, xvf_ptr + Xf.size(1));
61 662 : discretizers[feature]->fit(xvf, yxv);
62 902 : }
63 106 : if (upgrade) {
64 : // Discretize again X (only the affected indices) with the new fitted discretizers
65 768 : for (auto index : indicesToReDiscretize) {
66 1324 : auto Xt_ptr = Xf.index({ index }).data_ptr<float>();
67 662 : auto Xt = std::vector<float>(Xt_ptr, Xt_ptr + Xf.size(1));
68 2648 : pDataset.index_put_({ index, "..." }, torch::tensor(discretizers[pFeatures[index]]->transform(Xt)));
69 662 : auto xStates = std::vector<int>(discretizers[pFeatures[index]]->getCutPoints().size() + 1);
70 662 : iota(xStates.begin(), xStates.end(), 0);
71 : //Update new states of the feature/node
72 662 : states[pFeatures[index]] = xStates;
73 662 : }
74 106 : const torch::Tensor weights = torch::full({ pDataset.size(1) }, 1.0 / pDataset.size(1), torch::kDouble);
75 106 : model.fit(pDataset, weights, pFeatures, pClassName, states);
76 106 : }
77 212 : return states;
78 480064 : }
79 116 : map<std::string, std::vector<int>> Proposal::fit_local_discretization(const torch::Tensor& y)
80 : {
81 : // Discretize the continuous input data and build pDataset (Classifier::dataset)
82 116 : int m = Xf.size(1);
83 116 : int n = Xf.size(0);
84 116 : map<std::string, std::vector<int>> states;
85 116 : pDataset = torch::zeros({ n + 1, m }, torch::kInt32);
86 116 : auto yv = std::vector<int>(y.data_ptr<int>(), y.data_ptr<int>() + y.size(0));
87 : // discretize input data by feature(row)
88 972 : for (auto i = 0; i < pFeatures.size(); ++i) {
89 856 : auto* discretizer = new mdlp::CPPFImdlp();
90 1712 : auto Xt_ptr = Xf.index({ i }).data_ptr<float>();
91 856 : auto Xt = std::vector<float>(Xt_ptr, Xt_ptr + Xf.size(1));
92 856 : discretizer->fit(Xt, yv);
93 3424 : pDataset.index_put_({ i, "..." }, torch::tensor(discretizer->transform(Xt)));
94 856 : auto xStates = std::vector<int>(discretizer->getCutPoints().size() + 1);
95 856 : iota(xStates.begin(), xStates.end(), 0);
96 856 : states[pFeatures[i]] = xStates;
97 856 : discretizers[pFeatures[i]] = discretizer;
98 856 : }
99 116 : int n_classes = torch::max(y).item<int>() + 1;
100 116 : auto yStates = std::vector<int>(n_classes);
101 116 : iota(yStates.begin(), yStates.end(), 0);
102 116 : states[pClassName] = yStates;
103 348 : pDataset.index_put_({ n, "..." }, y);
104 232 : return states;
105 1944 : }
106 84 : torch::Tensor Proposal::prepareX(torch::Tensor& X)
107 : {
108 84 : auto Xtd = torch::zeros_like(X, torch::kInt32);
109 688 : for (int i = 0; i < X.size(0); ++i) {
110 604 : auto Xt = std::vector<float>(X[i].data_ptr<float>(), X[i].data_ptr<float>() + X.size(1));
111 604 : auto Xd = discretizers[pFeatures[i]]->transform(Xt);
112 1812 : Xtd.index_put_({ i }, torch::tensor(Xd, torch::kInt32));
113 604 : }
114 84 : return Xtd;
115 604 : }
116 : }
|