diff --git a/src/experimental_clfs/XA1DE.cpp b/src/experimental_clfs/XA1DE.cpp index db1dc2d..24a48e8 100644 --- a/src/experimental_clfs/XA1DE.cpp +++ b/src/experimental_clfs/XA1DE.cpp @@ -13,8 +13,8 @@ namespace platform { auto X = TensorUtils::to_matrix(dataset.slice(0, 0, dataset.size(0) - 1)); auto y = TensorUtils::to_vector(dataset.index({ -1, "..." })); int num_instances = X[0].size(); - weights_ = weights; + weights_ = torch::full({ num_instances }, 1.0); normalize_weights(num_instances); - aode_.fit(X, y, features, className, states, weights_, true); + aode_.fit(X, y, features, className, states, weights_, true, smoothing); } } \ No newline at end of file diff --git a/src/experimental_clfs/XBAODE.cpp b/src/experimental_clfs/XBAODE.cpp index 27660a4..184f730 100644 --- a/src/experimental_clfs/XBAODE.cpp +++ b/src/experimental_clfs/XBAODE.cpp @@ -26,7 +26,7 @@ namespace platform { y_train_ = TensorUtils::to_vector(y_train); X_test_ = TensorUtils::to_matrix(X_test); y_test_ = TensorUtils::to_vector(y_test); - maxTolerance = 5; + maxTolerance = 3; // // Logging setup // diff --git a/src/experimental_clfs/XBAODE.h b/src/experimental_clfs/XBAODE.h index 77bc427..ca757e5 100644 --- a/src/experimental_clfs/XBAODE.h +++ b/src/experimental_clfs/XBAODE.h @@ -15,7 +15,10 @@ #include "ExpClf.h" namespace platform { - class XBAODE : public ExpClf { + class XBAODE { + + // Hay que hacer un vector de modelos entrenados y hacer un predict ensemble con todos ellos + // Probar XA1DE con smooth original y laplace y comprobar diferencias si se pasan pesos a 1 o a 1/m public: XBAODE(); std::string getVersion() override { return version; }; diff --git a/src/experimental_clfs/Xaode.hpp b/src/experimental_clfs/Xaode.hpp index 87d34da..76d2bf6 100644 --- a/src/experimental_clfs/Xaode.hpp +++ b/src/experimental_clfs/Xaode.hpp @@ -18,6 +18,7 @@ #include #include #include +#include namespace platform { @@ -49,7 +50,7 @@ namespace platform { // // Internally, in COUNTS mode, data_ accumulates raw counts, then // computeProbabilities(...) normalizes them into conditionals. - void fit(std::vector>& X, std::vector& y, const std::vector& features, const std::string& className, std::map>& states, const torch::Tensor& weights, const bool all_parents) + void fit(std::vector>& X, std::vector& y, const std::vector& features, const std::string& className, std::map>& states, const torch::Tensor& weights, const bool all_parents, const bayesnet::Smoothing_t smoothing) { int num_instances = X[0].size(); nFeatures_ = X.size(); @@ -110,8 +111,16 @@ namespace platform { instance[nFeatures_] = y[n_instance]; addSample(instance, weights[n_instance].item()); } - // alpha_ Laplace smoothing adapted to the number of instances - alpha_ = 1.0 / static_cast(num_instances); + switch (smoothing) { + case bayesnet::Smoothing_t::ORIGINAL: + alpha_ = 1.0 / num_instances; + break; + case bayesnet::Smoothing_t::LAPLACE: + alpha_ = 1.0; + break; + default: + alpha_ = 0.0; // No smoothing + } initializer_ = std::numeric_limits::max() / (nFeatures_ * nFeatures_); computeProbabilities(); }