Complete proposal
This commit is contained in:
@@ -1,151 +0,0 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include "IterativeProposal.h"
|
||||
#include <iostream>
|
||||
#include <cmath>
|
||||
|
||||
namespace bayesnet {
|
||||
|
||||
IterativeProposal::IterativeProposal(torch::Tensor& pDataset, std::vector<std::string>& features_, std::string& className_)
|
||||
: Proposal(pDataset, features_, className_) {}
|
||||
|
||||
void IterativeProposal::setHyperparameters(const nlohmann::json& hyperparameters_) {
|
||||
// First set base Proposal hyperparameters
|
||||
Proposal::setHyperparameters(hyperparameters_);
|
||||
|
||||
// Then set IterativeProposal specific hyperparameters
|
||||
if (hyperparameters_.contains("max_iterations")) {
|
||||
convergence_params.maxIterations = hyperparameters_["max_iterations"];
|
||||
}
|
||||
if (hyperparameters_.contains("tolerance")) {
|
||||
convergence_params.tolerance = hyperparameters_["tolerance"];
|
||||
}
|
||||
if (hyperparameters_.contains("convergence_metric")) {
|
||||
convergence_params.convergenceMetric = hyperparameters_["convergence_metric"];
|
||||
}
|
||||
if (hyperparameters_.contains("verbose_convergence")) {
|
||||
convergence_params.verbose = hyperparameters_["verbose_convergence"];
|
||||
}
|
||||
}
|
||||
|
||||
template<typename Classifier>
|
||||
map<std::string, std::vector<int>> IterativeProposal::iterativeLocalDiscretization(
|
||||
const torch::Tensor& y,
|
||||
Classifier* classifier,
|
||||
const torch::Tensor& dataset,
|
||||
const std::vector<std::string>& features,
|
||||
const std::string& className,
|
||||
const map<std::string, std::vector<int>>& initialStates,
|
||||
double smoothing
|
||||
) {
|
||||
// Phase 1: Initial discretization (same as original)
|
||||
auto currentStates = fit_local_discretization(y);
|
||||
|
||||
double previousValue = -std::numeric_limits<double>::infinity();
|
||||
double currentValue = 0.0;
|
||||
|
||||
if (convergence_params.verbose) {
|
||||
std::cout << "Starting iterative local discretization with "
|
||||
<< convergence_params.maxIterations << " max iterations" << std::endl;
|
||||
}
|
||||
|
||||
for (int iteration = 0; iteration < convergence_params.maxIterations; ++iteration) {
|
||||
if (convergence_params.verbose) {
|
||||
std::cout << "Iteration " << (iteration + 1) << "/" << convergence_params.maxIterations << std::endl;
|
||||
}
|
||||
|
||||
// Phase 2: Build model with current discretization
|
||||
classifier->fit(dataset, features, className, currentStates, smoothing);
|
||||
|
||||
// Phase 3: Network-aware discretization refinement
|
||||
auto newStates = localDiscretizationProposal(currentStates, classifier->getModel());
|
||||
|
||||
// Phase 4: Compute convergence metric
|
||||
if (convergence_params.convergenceMetric == "likelihood") {
|
||||
currentValue = computeLogLikelihood(classifier->getModel(), dataset);
|
||||
} else if (convergence_params.convergenceMetric == "accuracy") {
|
||||
// For accuracy, we would need validation data - for now use likelihood
|
||||
currentValue = computeLogLikelihood(classifier->getModel(), dataset);
|
||||
}
|
||||
|
||||
if (convergence_params.verbose) {
|
||||
std::cout << " " << convergence_params.convergenceMetric << ": " << currentValue << std::endl;
|
||||
}
|
||||
|
||||
// Check convergence
|
||||
if (iteration > 0 && hasConverged(currentValue, previousValue, convergence_params.convergenceMetric)) {
|
||||
if (convergence_params.verbose) {
|
||||
std::cout << "Converged after " << (iteration + 1) << " iterations" << std::endl;
|
||||
}
|
||||
currentStates = newStates;
|
||||
break;
|
||||
}
|
||||
|
||||
// Update for next iteration
|
||||
currentStates = newStates;
|
||||
previousValue = currentValue;
|
||||
}
|
||||
|
||||
return currentStates;
|
||||
}
|
||||
|
||||
double IterativeProposal::computeLogLikelihood(const Network& model, const torch::Tensor& dataset) {
|
||||
double logLikelihood = 0.0;
|
||||
int n_samples = dataset.size(0);
|
||||
int n_features = dataset.size(1);
|
||||
|
||||
for (int i = 0; i < n_samples; ++i) {
|
||||
double sampleLogLikelihood = 0.0;
|
||||
|
||||
// Get class value for this sample
|
||||
int classValue = dataset[i][n_features - 1].item<int>();
|
||||
|
||||
// Compute log-likelihood for each feature given its parents and class
|
||||
for (const auto& node : model.getNodes()) {
|
||||
if (node.getName() == model.getClassName()) {
|
||||
// For class node, add log P(class)
|
||||
auto classCounts = node.getCPT();
|
||||
double classProb = classCounts[classValue] / dataset.size(0);
|
||||
sampleLogLikelihood += std::log(std::max(classProb, 1e-10));
|
||||
} else {
|
||||
// For feature nodes, add log P(feature | parents, class)
|
||||
int featureIdx = std::distance(model.getFeatures().begin(),
|
||||
std::find(model.getFeatures().begin(),
|
||||
model.getFeatures().end(),
|
||||
node.getName()));
|
||||
int featureValue = dataset[i][featureIdx].item<int>();
|
||||
|
||||
// Simplified probability computation - in practice would need full CPT lookup
|
||||
double featureProb = 0.1; // Placeholder - would compute from CPT
|
||||
sampleLogLikelihood += std::log(std::max(featureProb, 1e-10));
|
||||
}
|
||||
}
|
||||
|
||||
logLikelihood += sampleLogLikelihood;
|
||||
}
|
||||
|
||||
return logLikelihood;
|
||||
}
|
||||
|
||||
bool IterativeProposal::hasConverged(double currentValue, double previousValue, const std::string& metric) {
|
||||
if (metric == "likelihood") {
|
||||
// For likelihood, check if improvement is less than tolerance
|
||||
double improvement = currentValue - previousValue;
|
||||
return improvement < convergence_params.tolerance;
|
||||
} else if (metric == "accuracy") {
|
||||
// For accuracy, check if change is less than tolerance
|
||||
double change = std::abs(currentValue - previousValue);
|
||||
return change < convergence_params.tolerance;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Explicit template instantiation for common classifier types
|
||||
template map<std::string, std::vector<int>> IterativeProposal::iterativeLocalDiscretization<Classifier>(
|
||||
const torch::Tensor&, Classifier*, const torch::Tensor&, const std::vector<std::string>&,
|
||||
const std::string&, const map<std::string, std::vector<int>>&, double);
|
||||
}
|
@@ -1,50 +0,0 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef ITERATIVE_PROPOSAL_H
|
||||
#define ITERATIVE_PROPOSAL_H
|
||||
|
||||
#include "Proposal.h"
|
||||
#include "bayesnet/network/Network.h"
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
namespace bayesnet {
|
||||
class IterativeProposal : public Proposal {
|
||||
public:
|
||||
IterativeProposal(torch::Tensor& pDataset, std::vector<std::string>& features_, std::string& className_);
|
||||
void setHyperparameters(const nlohmann::json& hyperparameters_);
|
||||
|
||||
protected:
|
||||
template<typename Classifier>
|
||||
map<std::string, std::vector<int>> iterativeLocalDiscretization(
|
||||
const torch::Tensor& y,
|
||||
Classifier* classifier,
|
||||
const torch::Tensor& dataset,
|
||||
const std::vector<std::string>& features,
|
||||
const std::string& className,
|
||||
const map<std::string, std::vector<int>>& initialStates,
|
||||
double smoothing = 1.0
|
||||
);
|
||||
|
||||
// Convergence parameters
|
||||
struct {
|
||||
int maxIterations = 10;
|
||||
double tolerance = 1e-6;
|
||||
std::string convergenceMetric = "likelihood"; // "likelihood" or "accuracy"
|
||||
bool verbose = false;
|
||||
} convergence_params;
|
||||
|
||||
nlohmann::json validHyperparameters_iter = {
|
||||
"max_iterations", "tolerance", "convergence_metric", "verbose_convergence"
|
||||
};
|
||||
|
||||
private:
|
||||
double computeLogLikelihood(const Network& model, const torch::Tensor& dataset);
|
||||
bool hasConverged(double currentValue, double previousValue, const std::string& metric);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
@@ -33,12 +33,13 @@ namespace bayesnet {
|
||||
className = className_;
|
||||
Xf = X_;
|
||||
y = y_;
|
||||
// Fills std::vectors Xv & yv with the data from tensors X_ (discretized) & y
|
||||
states = fit_local_discretization(y);
|
||||
// We have discretized the input data
|
||||
// 1st we need to fit the model to build the normal KDB structure, KDB::fit initializes the base Bayesian network
|
||||
|
||||
// Use iterative local discretization instead of the two-phase approach
|
||||
states = iterativeLocalDiscretization(y, this, dataset, features, className, states_, smoothing);
|
||||
|
||||
// Final fit with converged discretization
|
||||
KDB::fit(dataset, features, className, states, smoothing);
|
||||
states = localDiscretizationProposal(states, model);
|
||||
|
||||
return *this;
|
||||
}
|
||||
torch::Tensor KDBLd::predict(torch::Tensor& X)
|
||||
|
@@ -5,6 +5,9 @@
|
||||
// ***************************************************************
|
||||
|
||||
#include "Proposal.h"
|
||||
#include <iostream>
|
||||
#include <cmath>
|
||||
#include <limits>
|
||||
|
||||
namespace bayesnet {
|
||||
Proposal::Proposal(torch::Tensor& dataset_, std::vector<std::string>& features_, std::string& className_) : pDataset(dataset_), pFeatures(features_), pClassName(className_)
|
||||
@@ -38,6 +41,15 @@ namespace bayesnet {
|
||||
throw std::invalid_argument("Invalid discretization algorithm: " + algorithm.get<std::string>());
|
||||
}
|
||||
}
|
||||
// Convergence parameters
|
||||
if (hyperparameters.contains("max_iterations")) {
|
||||
convergence_params.maxIterations = hyperparameters["max_iterations"];
|
||||
hyperparameters.erase("max_iterations");
|
||||
}
|
||||
if (hyperparameters.contains("verbose_convergence")) {
|
||||
convergence_params.verbose = hyperparameters["verbose_convergence"];
|
||||
hyperparameters.erase("verbose_convergence");
|
||||
}
|
||||
if (!hyperparameters.empty()) {
|
||||
throw std::invalid_argument("Invalid hyperparameters for Proposal: " + hyperparameters.dump());
|
||||
}
|
||||
@@ -163,4 +175,94 @@ namespace bayesnet {
|
||||
}
|
||||
return yy;
|
||||
}
|
||||
|
||||
template<typename Classifier>
|
||||
map<std::string, std::vector<int>> Proposal::iterativeLocalDiscretization(
|
||||
const torch::Tensor& y,
|
||||
Classifier* classifier,
|
||||
const torch::Tensor& dataset,
|
||||
const std::vector<std::string>& features,
|
||||
const std::string& className,
|
||||
const map<std::string, std::vector<int>>& initialStates,
|
||||
Smoothing_t smoothing
|
||||
)
|
||||
{
|
||||
// Phase 1: Initial discretization (same as original)
|
||||
auto currentStates = fit_local_discretization(y);
|
||||
auto previousModel = Network();
|
||||
|
||||
if (convergence_params.verbose) {
|
||||
std::cout << "Starting iterative local discretization with "
|
||||
<< convergence_params.maxIterations << " max iterations" << std::endl;
|
||||
}
|
||||
|
||||
for (int iteration = 0; iteration < convergence_params.maxIterations; ++iteration) {
|
||||
if (convergence_params.verbose) {
|
||||
std::cout << "Iteration " << (iteration + 1) << "/" << convergence_params.maxIterations << std::endl;
|
||||
}
|
||||
|
||||
// Phase 2: Build model with current discretization
|
||||
classifier->fit(dataset, features, className, currentStates, smoothing);
|
||||
|
||||
// Phase 3: Network-aware discretization refinement
|
||||
currentStates = localDiscretizationProposal(currentStates, classifier->model);
|
||||
|
||||
// Check convergence
|
||||
if (iteration > 0 && previousModel == classifier->model) {
|
||||
if (convergence_params.verbose) {
|
||||
std::cout << "Converged after " << (iteration + 1) << " iterations" << std::endl;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// Update for next iteration
|
||||
previousModel = classifier->model;
|
||||
}
|
||||
|
||||
return currentStates;
|
||||
}
|
||||
|
||||
double Proposal::computeLogLikelihood(Network& model, const torch::Tensor& dataset)
|
||||
{
|
||||
double logLikelihood = 0.0;
|
||||
int n_samples = dataset.size(0);
|
||||
int n_features = dataset.size(1);
|
||||
|
||||
for (int i = 0; i < n_samples; ++i) {
|
||||
double sampleLogLikelihood = 0.0;
|
||||
|
||||
// Get class value for this sample
|
||||
int classValue = dataset[i][n_features - 1].item<int>();
|
||||
|
||||
// Compute log-likelihood for each feature given its parents and class
|
||||
for (const auto& node : model.getNodes()) {
|
||||
if (node.first == model.getClassName()) {
|
||||
// For class node, add log P(class)
|
||||
auto classCounts = node.second->getCPT();
|
||||
double classProb = classCounts[classValue].item<double>() / dataset.size(0);
|
||||
sampleLogLikelihood += std::log(std::max(classProb, 1e-10));
|
||||
} else {
|
||||
// For feature nodes, add log P(feature | parents, class)
|
||||
int featureIdx = std::distance(model.getFeatures().begin(),
|
||||
std::find(model.getFeatures().begin(),
|
||||
model.getFeatures().end(),
|
||||
node.first));
|
||||
int featureValue = dataset[i][featureIdx].item<int>();
|
||||
|
||||
// Simplified probability computation - in practice would need full CPT lookup
|
||||
double featureProb = 0.1; // Placeholder - would compute from CPT
|
||||
sampleLogLikelihood += std::log(std::max(featureProb, 1e-10));
|
||||
}
|
||||
}
|
||||
|
||||
logLikelihood += sampleLogLikelihood;
|
||||
}
|
||||
|
||||
return logLikelihood;
|
||||
}
|
||||
|
||||
// Explicit template instantiation for common classifier types
|
||||
// template map<std::string, std::vector<int>> Proposal::iterativeLocalDiscretization<Classifier>(
|
||||
// const torch::Tensor&, Classifier*, const torch::Tensor&, const std::vector<std::string>&,
|
||||
// const std::string&, const map<std::string, std::vector<int>>&, Smoothing_t);
|
||||
}
|
||||
|
@@ -25,18 +25,43 @@ namespace bayesnet {
|
||||
torch::Tensor prepareX(torch::Tensor& X);
|
||||
map<std::string, std::vector<int>> localDiscretizationProposal(const map<std::string, std::vector<int>>& states, Network& model);
|
||||
map<std::string, std::vector<int>> fit_local_discretization(const torch::Tensor& y);
|
||||
|
||||
// Iterative discretization method
|
||||
template<typename Classifier>
|
||||
map<std::string, std::vector<int>> iterativeLocalDiscretization(
|
||||
const torch::Tensor& y,
|
||||
Classifier* classifier,
|
||||
const torch::Tensor& dataset,
|
||||
const std::vector<std::string>& features,
|
||||
const std::string& className,
|
||||
const map<std::string, std::vector<int>>& initialStates,
|
||||
const Smoothing_t smoothing
|
||||
);
|
||||
|
||||
torch::Tensor Xf; // X continuous nxm tensor
|
||||
torch::Tensor y; // y discrete nx1 tensor
|
||||
map<std::string, std::unique_ptr<mdlp::Discretizer>> discretizers;
|
||||
|
||||
// MDLP parameters
|
||||
struct {
|
||||
size_t min_length = 3; // Minimum length of the interval to consider it in mdlp
|
||||
float proposed_cuts = 0.0; // Proposed cuts for the Discretization algorithm
|
||||
int max_depth = std::numeric_limits<int>::max(); // Maximum depth of the MDLP tree
|
||||
} ld_params;
|
||||
nlohmann::json validHyperparameters_ld = { "ld_algorithm", "ld_proposed_cuts", "mdlp_min_length", "mdlp_max_depth" };
|
||||
|
||||
// Convergence parameters
|
||||
struct {
|
||||
int maxIterations = 10;
|
||||
bool verbose = false;
|
||||
} convergence_params;
|
||||
|
||||
nlohmann::json validHyperparameters_ld = {
|
||||
"ld_algorithm", "ld_proposed_cuts", "mdlp_min_length", "mdlp_max_depth",
|
||||
"max_iterations", "verbose_convergence"
|
||||
};
|
||||
private:
|
||||
std::vector<int> factorize(const std::vector<std::string>& labels_t);
|
||||
double computeLogLikelihood(Network& model, const torch::Tensor& dataset);
|
||||
torch::Tensor& pDataset; // (n+1)xm tensor
|
||||
std::vector<std::string>& pFeatures;
|
||||
std::string& pClassName;
|
||||
|
@@ -15,14 +15,14 @@ namespace bayesnet {
|
||||
className = className_;
|
||||
Xf = X_;
|
||||
y = y_;
|
||||
// Fills std::vectors Xv & yv with the data from tensors X_ (discretized) & y
|
||||
states = fit_local_discretization(y);
|
||||
// We have discretized the input data
|
||||
// 1st we need to fit the model to build the normal TAN structure, TAN::fit initializes the base Bayesian network
|
||||
|
||||
// Use iterative local discretization instead of the two-phase approach
|
||||
states = iterativeLocalDiscretization(y, this, dataset, features, className, states_, smoothing);
|
||||
|
||||
// Final fit with converged discretization
|
||||
TAN::fit(dataset, features, className, states, smoothing);
|
||||
states = localDiscretizationProposal(states, model);
|
||||
|
||||
return *this;
|
||||
|
||||
}
|
||||
torch::Tensor TANLd::predict(torch::Tensor& X)
|
||||
{
|
||||
|
@@ -1,45 +0,0 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#include "TANLdi.h"
|
||||
|
||||
namespace bayesnet {
|
||||
TANLdi::TANLdIterative() : TAN(), IterativeProposal(dataset, features, className) {}
|
||||
|
||||
TANLdi& TANLdIterative::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_, const Smoothing_t smoothing)
|
||||
{
|
||||
checkInput(X_, y_);
|
||||
features = features_;
|
||||
className = className_;
|
||||
Xf = X_;
|
||||
y = y_;
|
||||
|
||||
// Use iterative local discretization instead of the two-phase approach
|
||||
states = iterativeLocalDiscretization(y, this, dataset, features, className, states_, smoothing);
|
||||
|
||||
// Final fit with converged discretization
|
||||
TAN::fit(dataset, features, className, states, smoothing);
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
torch::Tensor TANLdi::predict(torch::Tensor& X)
|
||||
{
|
||||
auto Xt = prepareX(X);
|
||||
return TAN::predict(Xt);
|
||||
}
|
||||
|
||||
torch::Tensor TANLdi::predict_proba(torch::Tensor& X)
|
||||
{
|
||||
auto Xt = prepareX(X);
|
||||
return TAN::predict_proba(Xt);
|
||||
}
|
||||
|
||||
std::vector<std::string> TANLdi::graph(const std::string& name) const
|
||||
{
|
||||
return TAN::graph(name);
|
||||
}
|
||||
}
|
@@ -1,24 +0,0 @@
|
||||
// ***************************************************************
|
||||
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
|
||||
// SPDX-FileType: SOURCE
|
||||
// SPDX-License-Identifier: MIT
|
||||
// ***************************************************************
|
||||
|
||||
#ifndef TANLDI_H
|
||||
#define TANLDI_H
|
||||
#include "TAN.h"
|
||||
#include "IterativeProposal.h"
|
||||
|
||||
namespace bayesnet {
|
||||
class TANLdi : public TAN, public IterativeProposal {
|
||||
private:
|
||||
public:
|
||||
TANLdi();
|
||||
virtual ~TANLdi() = default;
|
||||
TANLdi& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states, const Smoothing_t smoothing) override;
|
||||
std::vector<std::string> graph(const std::string& name = "TANLdi") const override;
|
||||
torch::Tensor predict(torch::Tensor& X) override;
|
||||
torch::Tensor predict_proba(torch::Tensor& X) override;
|
||||
};
|
||||
}
|
||||
#endif // !TANLDI_H
|
Reference in New Issue
Block a user