Compare commits

12 Commits

32 changed files with 139 additions and 89 deletions

View File

@@ -1,7 +1,7 @@
cmake_minimum_required(VERSION 3.20)
project(Platform
VERSION 1.1.0
VERSION 1.1.1
DESCRIPTION "Platform to run Experiments with classifiers."
HOMEPAGE_URL "https://github.com/rmontanana/platform"
LANGUAGES CXX
@@ -13,6 +13,7 @@ set(CMAKE_CXX_STANDARD 20)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3")
@@ -66,7 +67,7 @@ find_package(arff-files CONFIG REQUIRED)
find_package(fimdlp CONFIG REQUIRED)
find_package(folding CONFIG REQUIRED)
find_package(bayesnet CONFIG REQUIRED)
find_package(pyclassifiers CONFIG REQUIRED)
# find_package(pyclassifiers CONFIG REQUIRED)
find_package(libxlsxwriter CONFIG REQUIRED)
find_package(Boost REQUIRED COMPONENTS python)

View File

@@ -111,9 +111,8 @@ release: ## Build a Release version of the project with Conan
opt = ""
test: ## Run tests (opt="-s") to verbose output the tests, (opt="-c='Test Maximum Spanning Tree'") to run only that section
@echo ">>> Running Platform tests...";
@$(MAKE) clean
@$(MAKE) debug
@$(call "Compile_target", "Debug", "$(f_debug)", $(test_targets))
@$(call compile_target, "Debug", "$(f_debug)", $(test_targets))
@for t in $(test_targets); do \
if [ -f $(f_debug)/tests/$$t ]; then \
cd $(f_debug)/tests ; \

View File

@@ -15,13 +15,13 @@ class PlatformConan(ConanFile):
def requirements(self):
# Core dependencies from vcpkg.json
self.requires("argparse/3.2")
self.requires("libtorch/2.7.0")
self.requires("libtorch/2.7.1")
self.requires("nlohmann_json/3.11.3")
self.requires("folding/1.1.1")
self.requires("fimdlp/2.1.0")
self.requires("arff-files/1.2.0")
self.requires("bayesnet/1.2.0")
self.requires("pyclassifiers/1.0.3")
self.requires("folding/1.1.2")
self.requires("fimdlp/2.1.1")
self.requires("arff-files/1.2.1")
self.requires("bayesnet/1.2.1")
# self.requires("pyclassifiers/1.0.3")
self.requires("libxlsxwriter/1.2.2")
def build_requirements(self):

View File

@@ -18,7 +18,7 @@ add_executable(
experimental_clfs/DecisionTree.cpp
experimental_clfs/AdaBoost.cpp
)
target_link_libraries(b_best Boost::boost pyclassifiers::pyclassifiers bayesnet::bayesnet argparse::argparse fimdlp::fimdlp ${Python3_LIBRARIES} torch::torch Boost::python Boost::numpy libxlsxwriter::libxlsxwriter)
target_link_libraries(b_best Boost::boost bayesnet::bayesnet argparse::argparse fimdlp::fimdlp ${Python3_LIBRARIES} torch::torch Boost::python Boost::numpy libxlsxwriter::libxlsxwriter)
# b_grid
set(grid_sources GridSearch.cpp GridData.cpp GridExperiment.cpp GridBase.cpp )
@@ -33,7 +33,7 @@ add_executable(b_grid commands/b_grid.cpp ${grid_sources}
experimental_clfs/DecisionTree.cpp
experimental_clfs/AdaBoost.cpp
)
target_link_libraries(b_grid ${MPI_CXX_LIBRARIES} pyclassifiers::pyclassifiers bayesnet::bayesnet argparse::argparse fimdlp::fimdlp ${Python3_LIBRARIES} torch::torch Boost::python Boost::numpy)
target_link_libraries(b_grid ${MPI_CXX_LIBRARIES} bayesnet::bayesnet argparse::argparse fimdlp::fimdlp ${Python3_LIBRARIES} torch::torch Boost::python Boost::numpy)
# b_list
add_executable(b_list commands/b_list.cpp
@@ -46,7 +46,7 @@ add_executable(b_list commands/b_list.cpp
experimental_clfs/DecisionTree.cpp
experimental_clfs/AdaBoost.cpp
)
target_link_libraries(b_list pyclassifiers::pyclassifiers bayesnet::bayesnet argparse::argparse fimdlp::fimdlp ${Python3_LIBRARIES} torch::torch Boost::python Boost::numpy libxlsxwriter::libxlsxwriter)
target_link_libraries(b_list bayesnet::bayesnet argparse::argparse fimdlp::fimdlp ${Python3_LIBRARIES} torch::torch Boost::python Boost::numpy libxlsxwriter::libxlsxwriter)
# b_main
set(main_sources Experiment.cpp Models.cpp HyperParameters.cpp Scores.cpp ArgumentsExperiment.cpp)
@@ -61,7 +61,7 @@ add_executable(b_main commands/b_main.cpp ${main_sources}
experimental_clfs/DecisionTree.cpp
experimental_clfs/AdaBoost.cpp
)
target_link_libraries(b_main PRIVATE nlohmann_json::nlohmann_json pyclassifiers::pyclassifiers bayesnet::bayesnet argparse::argparse fimdlp::fimdlp ${Python3_LIBRARIES} torch::torch Boost::python Boost::numpy)
target_link_libraries(b_main PRIVATE nlohmann_json::nlohmann_json bayesnet::bayesnet argparse::argparse fimdlp::fimdlp ${Python3_LIBRARIES} torch::torch Boost::python Boost::numpy)
# b_manage
set(manage_sources ManageScreen.cpp OptionsMenu.cpp ResultsManager.cpp)

View File

@@ -41,8 +41,7 @@ namespace platform {
}
json bests;
for (const auto& file : files) {
auto result = Result();
result.load(path, file);
auto result = Result(path, file);
auto data = result.getJson();
for (auto const& item : data.at("results")) {
bool update = true;

View File

@@ -1,8 +1,15 @@
#ifndef DISCRETIZATIONREGISTER_H
#define DISCRETIZATIONREGISTER_H
#include <common/Discretization.h>
#include <limits>
static platform::RegistrarDiscretization registrarM("mdlp",
[](void) -> mdlp::Discretizer* { return new mdlp::CPPFImdlp();});
static platform::RegistrarDiscretization registrarM3("mdlp3",
[](void) -> mdlp::Discretizer* { return new mdlp::CPPFImdlp(3, numeric_limits<int>::max(), 3);});
static platform::RegistrarDiscretization registrarM4("mdlp4",
[](void) -> mdlp::Discretizer* { return new mdlp::CPPFImdlp(3, numeric_limits<int>::max(), 4);});
static platform::RegistrarDiscretization registrarM5("mdlp5",
[](void) -> mdlp::Discretizer* { return new mdlp::CPPFImdlp(3, numeric_limits<int>::max(), 5);});
static platform::RegistrarDiscretization registrarBU3("bin3u",
[](void) -> mdlp::Discretizer* { return new mdlp::BinDisc(3, mdlp::strategy_t::UNIFORM);});
static platform::RegistrarDiscretization registrarBQ3("bin3q",

View File

@@ -21,7 +21,7 @@ namespace platform {
{
{"depth", {"any"}},
{"discretize", {"0", "1"}},
{"discretize_algo", {"mdlp", "bin3u", "bin3q", "bin4u", "bin4q", "bin5q", "bin5u", "bin6q", "bin6u", "bin7q", "bin7u", "bin8q", "bin8u", "bin9q", "bin9u", "bin10q", "bin10u"}},
{"discretize_algo", {"mdlp", "mdlp3", "mdlp4", "mdlp5", "bin3u", "bin3q", "bin4u", "bin4q", "bin5q", "bin5u", "bin6q", "bin6u", "bin7q", "bin7u", "bin8q", "bin8u", "bin9q", "bin9u", "bin10q", "bin10u"}},
{"experiment", {"discretiz", "odte", "covid", "Test"}},
{"fit_features", {"0", "1"}},
{"framework", {"bulma", "bootstrap"}},

View File

@@ -1,18 +0,0 @@
#ifndef TENSOR_UTILS_H
#define TENSOR_UTILS_H
#include <torch/torch.h>
#include <vector>
namespace platform {
template <typename T>
std::vector<T> tensorToVector(const torch::Tensor& tensor)
{
torch::Tensor contig_tensor = tensor.contiguous();
auto num_elements = contig_tensor.numel();
const T* tensor_data = contig_tensor.data_ptr<T>();
std::vector<T> result(tensor_data, tensor_data + num_elements);
return result;
}
}
#endif

View File

@@ -5,6 +5,15 @@
namespace platform {
class TensorUtils {
public:
template <typename T>
static std::vector<T> tensorToVector(const torch::Tensor& tensor)
{
torch::Tensor contig_tensor = tensor.contiguous();
auto num_elements = contig_tensor.numel();
const T* tensor_data = contig_tensor.data_ptr<T>();
std::vector<T> result(tensor_data, tensor_data + num_elements);
return result;
}
static std::vector<std::vector<int>> to_matrix(const torch::Tensor& X)
{
// Ensure tensor is contiguous in memory
@@ -53,7 +62,7 @@ namespace platform {
torch::Tensor tensor = torch::empty({ static_cast<long>(rows), static_cast<long>(cols) }, torch::kInt64);
for (size_t i = 0; i < rows; ++i) {
for (size_t j = 0; j < cols; ++j) {
tensor.index_put_({ static_cast<long>(i), static_cast<long>(j) }, data[i][j]);
tensor.index_put_({static_cast<int64_t>(i), static_cast<int64_t>(j)}, torch::scalar_tensor(data[i][j]));
}
}
return tensor;

View File

@@ -11,7 +11,7 @@
#include <numeric>
#include <sstream>
#include <iomanip>
#include "TensorUtils.hpp"
#include "common/TensorUtils.hpp"
// Conditional debug macro for performance-critical sections
#define DEBUG_LOG(condition, ...) \

View File

@@ -38,7 +38,7 @@ namespace bayesnet {
torch::Tensor predict(torch::Tensor& X) override;
std::vector<int> predict(std::vector<std::vector<int>>& X) override;
torch::Tensor predict_proba(torch::Tensor& X) override;
std::vector<std::vector<double>> predict_proba(std::vector<std::vector<int>>& X);
std::vector<std::vector<double>> predict_proba(std::vector<std::vector<int>>& X) override;
void setDebug(bool debug) { this->debug = debug; }
protected:

View File

@@ -10,7 +10,7 @@
#include <sstream>
#include <iomanip>
#include <limits>
#include "TensorUtils.hpp"
#include "common/TensorUtils.hpp"
namespace bayesnet {

View File

@@ -40,7 +40,7 @@ namespace bayesnet {
torch::Tensor predict(torch::Tensor& X) override;
std::vector<int> predict(std::vector<std::vector<int>>& X) override;
torch::Tensor predict_proba(torch::Tensor& X) override;
std::vector<std::vector<double>> predict_proba(std::vector<std::vector<int>>& X);
std::vector<std::vector<double>> predict_proba(std::vector<std::vector<int>>& X) override;
// Make predictions for a single sample
int predictSample(const torch::Tensor& x) const;

View File

@@ -5,7 +5,7 @@
// ***************************************************************
#include "ExpClf.h"
#include "TensorUtils.hpp"
#include "common/TensorUtils.hpp"
namespace platform {
ExpClf::ExpClf() : semaphore_{ CountingSemaphore::getInstance() }, Boost(false)

View File

@@ -5,7 +5,7 @@
// ***************************************************************
#include "ExpEnsemble.h"
#include "TensorUtils.hpp"
#include "common/TensorUtils.hpp"
namespace platform {
ExpEnsemble::ExpEnsemble() : semaphore_{ CountingSemaphore::getInstance() }, Boost(false)

View File

@@ -5,7 +5,7 @@
// ***************************************************************
#include "XA1DE.h"
#include "TensorUtils.hpp"
#include "common/TensorUtils.hpp"
namespace platform {
void XA1DE::trainModel(const torch::Tensor& weights, const bayesnet::Smoothing_t smoothing)

View File

@@ -10,7 +10,7 @@
#include <tuple>
#include "XBAODE.h"
#include "XSpode.hpp"
#include "TensorUtils.hpp"
#include "common/TensorUtils.hpp"
#include <loguru.hpp>
namespace platform {

View File

@@ -19,12 +19,12 @@
#include <bayesnet/classifiers/KDBLd.h>
#include <bayesnet/classifiers/SPODELd.h>
#include <bayesnet/classifiers/SPODELd.h>
#include <pyclassifiers/STree.h>
#include <pyclassifiers/ODTE.h>
#include <pyclassifiers/SVC.h>
#include <pyclassifiers/XGBoost.h>
#include <pyclassifiers/AdaBoostPy.h>
#include <pyclassifiers/RandomForest.h>
// #include <pyclassifiers/STree.h>
// #include <pyclassifiers/ODTE.h>
// #include <pyclassifiers/SVC.h>
// #include <pyclassifiers/XGBoost.h>
// #include <pyclassifiers/AdaBoostPy.h>
// #include <pyclassifiers/RandomForest.h>
#include "../experimental_clfs/XA1DE.h"
#include "../experimental_clfs/AdaBoost.h"
#include "../experimental_clfs/DecisionTree.h"

View File

@@ -3,7 +3,7 @@
#include <numeric>
#include <utility>
#include "RocAuc.h"
#include "common/TensorUtils.h" // tensorToVector
#include "common/TensorUtils.hpp" // tensorToVector
namespace platform {
double RocAuc::compute(const torch::Tensor& y_proba, const torch::Tensor& labels)

View File

@@ -1,6 +1,6 @@
#include <sstream>
#include "Scores.h"
#include "common/TensorUtils.h" // tensorToVector
#include "common/TensorUtils.hpp" // tensorToVector
#include "common/Colors.h"
namespace platform {
Scores::Scores(torch::Tensor& y_test, torch::Tensor& y_proba, int num_classes, std::vector<std::string> labels) : num_classes(num_classes), labels(labels), y_test(y_test), y_proba(y_proba)
@@ -50,7 +50,7 @@ namespace platform {
auto nClasses = num_classes;
if (num_classes == 2)
nClasses = 1;
auto y_testv = tensorToVector<int>(y_test);
auto y_testv = TensorUtils::tensorToVector<int>(y_test);
std::vector<double> aucScores(nClasses, 0.0);
std::vector<std::pair<double, int>> scoresAndLabels;
for (size_t classIdx = 0; classIdx < nClasses; ++classIdx) {

View File

@@ -25,18 +25,18 @@ namespace platform {
[](void) -> bayesnet::BaseClassifier* { return new bayesnet::BoostAODE();});
static Registrar registrarBA2("BoostA2DE",
[](void) -> bayesnet::BaseClassifier* { return new bayesnet::BoostA2DE();});
static Registrar registrarSt("STree",
[](void) -> bayesnet::BaseClassifier* { return new pywrap::STree();});
static Registrar registrarOdte("Odte",
[](void) -> bayesnet::BaseClassifier* { return new pywrap::ODTE();});
static Registrar registrarSvc("SVC",
[](void) -> bayesnet::BaseClassifier* { return new pywrap::SVC();});
static Registrar registrarRaF("RandomForest",
[](void) -> bayesnet::BaseClassifier* { return new pywrap::RandomForest();});
static Registrar registrarXGB("XGBoost",
[](void) -> bayesnet::BaseClassifier* { return new pywrap::XGBoost();});
static Registrar registrarAdaPy("AdaBoostPy",
[](void) -> bayesnet::BaseClassifier* { return new pywrap::AdaBoostPy();});
// static Registrar registrarSt("STree",
// [](void) -> bayesnet::BaseClassifier* { return new pywrap::STree();});
// static Registrar registrarOdte("Odte",
// [](void) -> bayesnet::BaseClassifier* { return new pywrap::ODTE();});
// static Registrar registrarSvc("SVC",
// [](void) -> bayesnet::BaseClassifier* { return new pywrap::SVC();});
// static Registrar registrarRaF("RandomForest",
// [](void) -> bayesnet::BaseClassifier* { return new pywrap::RandomForest();});
// static Registrar registrarXGB("XGBoost",
// [](void) -> bayesnet::BaseClassifier* { return new pywrap::XGBoost();});
// static Registrar registrarAdaPy("AdaBoostPy",
// [](void) -> bayesnet::BaseClassifier* { return new pywrap::AdaBoostPy();});
static Registrar registrarAda("AdaBoost",
[](void) -> bayesnet::BaseClassifier* { return new bayesnet::AdaBoost();});
static Registrar registrarDT("DecisionTree",

View File

@@ -28,7 +28,7 @@ namespace platform {
maxTitle = results.maxTitleSize();
header_lengths = { 3, 10, maxModel, 11, 10, 12, 2, 3, 7, maxTitle };
header_labels = { " #", "Date", "Model", "Score Name", "Score", "Platform", "SD", "C/P", "Time", "Title" };
sort_fields = { "Date", "Model", "Score", "Time" };
sort_fields = { "Date", "Model", "Score", "Time", "Title" };
updateSize(rows, cols);
// Initializes the paginator for each output type (experiments, datasets, result)
for (int i = 0; i < static_cast<int>(OutputType::Count); i++) {
@@ -346,9 +346,10 @@ namespace platform {
{
std::vector<std::tuple<std::string, char, bool>> sortOptions = {
{"date", 'd', false},
{"model", 'm', false},
{"score", 's', false},
{"time", 't', false},
{"model", 'm', false},
{"title", 'i', false},
{"ascending+", '+', false},
{"descending-", '-', false}
};
@@ -379,6 +380,9 @@ namespace platform {
case 'm':
sort_field = SortField::MODEL;
break;
case 'i':
sort_field = SortField::TITLE;
break;
case '+':
sort_type = SortType::ASC;
break;

View File

@@ -13,8 +13,7 @@ namespace platform {
for (const auto& file : directory_iterator(path)) {
auto filename = file.path().filename().string();
if (filename.find(".json") != std::string::npos && filename.find("results_") == 0) {
auto result = Result();
result.load(path, filename);
auto result = Result(path, filename);
bool addResult = true;
if (platform != "any" && result.getPlatform() != platform
|| model != "any" && result.getModel() != model
@@ -89,6 +88,16 @@ namespace platform {
return a.getDuration() > b.getDuration();
});
}
void ResultsManager::sortTitle(SortType type)
{
if (empty())
return;
sort(files.begin(), files.end(), [type](const Result& a, const Result& b) {
if (type == SortType::ASC)
return a.getTitle() < b.getTitle();
return a.getTitle() > b.getTitle();
});
}
void ResultsManager::sortScore(SortType type)
{
if (empty())
@@ -120,6 +129,9 @@ namespace platform {
case SortField::DURATION:
sortDuration(type);
break;
case SortField::TITLE:
sortTitle(type);
break;
}
}
bool ResultsManager::empty() const

View File

@@ -15,6 +15,7 @@ namespace platform {
MODEL = 1,
SCORE = 2,
DURATION = 3,
TITLE = 4,
};
class ResultsManager {
public:
@@ -24,6 +25,7 @@ namespace platform {
void sortDate(SortType type);
void sortScore(SortType type);
void sortModel(SortType type);
void sortTitle(SortType type);
void sortDuration(SortType type);
int maxModelSize() const { return maxModel; };
int maxTitleSize() const { return maxTitle; };

View File

@@ -54,10 +54,8 @@ namespace platform {
}
void ExcelFile::setProperties(std::string title)
{
char line[title.size() + 1];
strcpy(line, title.c_str());
lxw_doc_properties properties = {
.title = line,
.title = title.c_str(),
.subject = (char*)"Machine learning results",
.author = (char*)"Ricardo Montañana Gómez",
.manager = (char*)"Dr. J. A. Gámez, Dr. J. M. Puerta",

View File

@@ -1,6 +1,8 @@
#include <filesystem>
#include <fstream>
#include <sstream>
#include <random>
#include <cstdlib>
#include "best/BestScore.h"
#include "common/Colors.h"
#include "common/DotEnv.h"
@@ -34,14 +36,25 @@ namespace platform {
}
Result::Result()
{
path = Paths::results();
fileName = "none";
data["date"] = get_actual_date();
data["time"] = get_actual_time();
data["results"] = json::array();
data["seeds"] = json::array();
complete = false;
}
Result& Result::load(const std::string& path, const std::string& fileName)
std::string Result::getFilename() const
{
if (fileName == "none") {
throw std::runtime_error("Filename is not set. Use save() method to generate a filename.");
}
return fileName;
}
Result::Result(const std::string& path, const std::string& fileName)
{
this->path = path;
this->fileName = fileName;
std::ifstream resultData(path + "/" + fileName);
if (resultData.is_open()) {
data = json::parse(resultData);
@@ -58,7 +71,6 @@ namespace platform {
score /= best.second;
}
complete = data["results"].size() > 1;
return *this;
}
json Result::getJson()
{
@@ -71,11 +83,15 @@ namespace platform {
}
void Result::save(const std::string& path)
{
std::ofstream file(path + getFilename());
do {
fileName = generateFileName();
}
while (std::filesystem::exists(path + fileName));
std::ofstream file(path + fileName);
file << data;
file.close();
}
std::string Result::getFilename() const
std::string Result::generateFileName()
{
std::ostringstream oss;
std::string stratified;
@@ -85,13 +101,31 @@ namespace platform {
catch (nlohmann::json_abi_v3_11_3::detail::type_error) {
stratified = data["stratified"].get<int>() == 1 ? "1" : "0";
}
auto generateRandomString = [](int length) -> std::string {
const char alphanum[] =
"0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz";
// Use thread-local static generator to avoid interfering with global random state
thread_local static std::random_device rd;
thread_local static std::mt19937 gen(rd());
std::uniform_int_distribution<> dis(0, sizeof(alphanum) - 2);
std::string result;
for (int i = 0; i < length; ++i) {
result += alphanum[dis(gen)];
}
return result;
};
oss << "results_"
<< data.at("score_name").get<std::string>() << "_"
<< data.at("model").get<std::string>() << "_"
<< data.at("platform").get<std::string>() << "_"
<< data["date"].get<std::string>() << "_"
<< data["time"].get<std::string>() << "_"
<< stratified << ".json";
<< stratified << "_"
<< generateRandomString(5) << ".json";
return oss.str();
}
std::string Result::to_string(int maxModel, int maxTitle) const

View File

@@ -5,6 +5,7 @@
#include <string>
#include <nlohmann/json.hpp>
#include "common/Timer.hpp"
#include "common/Paths.h"
#include "main/HyperParameters.h"
#include "main/PartialResult.h"
@@ -14,7 +15,7 @@ namespace platform {
class Result {
public:
Result();
Result& load(const std::string& path, const std::string& filename);
Result(const std::string& path, const std::string& filename);
void save(const std::string& path);
std::vector<std::string> check();
// Getters
@@ -49,6 +50,9 @@ namespace platform {
void setNFolds(int nfolds) { data["folds"] = nfolds; };
void setPlatform(const std::string& platform_name) { data["platform"] = platform_name; };
private:
std::string generateFileName();
std::string path;
std::string fileName;
json data;
bool complete;
double score = 0.0;

View File

@@ -13,8 +13,7 @@ namespace platform {
for (const auto& file : directory_iterator(path)) {
auto filename = file.path().filename().string();
if (filename.find(".json") != std::string::npos && filename.find("results_") == 0) {
auto result = Result();
result.load(path, filename);
auto result = Result(path, filename);
if (model != "any" && result.getModel() != model)
continue;
auto data = result.getData()["results"];

View File

@@ -13,6 +13,6 @@ if(ENABLE_TESTING)
)
add_executable(${TEST_PLATFORM} ${TEST_SOURCES_PLATFORM})
target_link_libraries(${TEST_PLATFORM} PUBLIC
torch::torch fimdlp:fimdlp Catch2::Catch2WithMain bayesnet::bayesnet pyclassifiers::pyclassifiers)
torch::torch fimdlp::fimdlp Catch2::Catch2WithMain bayesnet::bayesnet)
add_test(NAME ${TEST_PLATFORM} COMMAND ${TEST_PLATFORM})
endif(ENABLE_TESTING)

View File

@@ -13,7 +13,7 @@
#include <stdexcept>
#include "experimental_clfs/AdaBoost.h"
#include "experimental_clfs/DecisionTree.h"
#include "experimental_clfs/TensorUtils.hpp"
#include "common/TensorUtils.hpp"
#include "TestUtils.h"
using namespace bayesnet;

View File

@@ -15,25 +15,25 @@
TEST_CASE("Test Platform version", "[Platform]")
{
std::string version = { platform_project_version.begin(), platform_project_version.end() };
REQUIRE(version == "1.1.0");
REQUIRE(version == "1.1.1");
}
TEST_CASE("Test Folding library version", "[Folding]")
{
std::string version = folding::KFold(5, 100).version();
REQUIRE(version == "1.1.1");
REQUIRE(version == "1.1.2");
}
TEST_CASE("Test BayesNet version", "[BayesNet]")
{
std::string version = bayesnet::TAN().getVersion();
REQUIRE(version == "1.1.2");
REQUIRE(version == "1.2.1");
}
TEST_CASE("Test mdlp version", "[mdlp]")
{
std::string version = mdlp::CPPFImdlp::version();
REQUIRE(version == "2.0.1");
REQUIRE(version == "2.1.1");
}
TEST_CASE("Test Arff version", "[Arff]")
{
std::string version = ArffFiles().version();
REQUIRE(version == "1.1.0");
REQUIRE(version == "1.2.1");
}

View File

@@ -5,7 +5,7 @@
#include <vector>
#include <map>
#include <tuple>
#include <ArffFiles/ArffFiles.hpp>
#include <ArffFiles.hpp>
#include <fimdlp/CPPFImdlp.h>
bool file_exists(const std::string& name);