diff --git a/.gitignore b/.gitignore index c062b91..42fbee0 100644 --- a/.gitignore +++ b/.gitignore @@ -41,3 +41,4 @@ puml/** *.dot diagrams/html/** diagrams/latex/** +.cache diff --git a/Makefile b/Makefile index dadc0ee..59c603b 100644 --- a/Makefile +++ b/Makefile @@ -37,6 +37,11 @@ setup: ## Install dependencies for tests and coverage pip install gcovr; \ fi +dest ?= ${HOME}/bin +main: ## Build only the b_main target + @cmake --build $(f_release) -t b_main --parallel + @cp $(f_release)/src/b_main $(dest) + dest ?= ${HOME}/bin install: ## Copy binary files to bin folder @echo "Destination folder: $(dest)" @@ -98,8 +103,8 @@ test: ## Run tests (opt="-s") to verbose output the tests, (opt="-c='Test Maximu fname = iris example: ## Build sample @echo ">>> Building Sample..."; - @cmake --build build_debug -t sample - build_debug/sample/PlatformSample --model BoostAODE --dataset $(fname) --discretize --stratified + @cmake --build $(f_release) -t sample + $(f_release)/sample/PlatformSample --model BoostAODE --dataset $(fname) --discretize --stratified @echo ">>> Done"; diff --git a/lib/log/loguru.cpp b/lib/log/loguru.cpp new file mode 100644 index 0000000..a95cfbf --- /dev/null +++ b/lib/log/loguru.cpp @@ -0,0 +1,2009 @@ +#if defined(__GNUC__) || defined(__clang__) +// Disable all warnings from gcc/clang: +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpragmas" + +#pragma GCC diagnostic ignored "-Wc++98-compat" +#pragma GCC diagnostic ignored "-Wc++98-compat-pedantic" +#pragma GCC diagnostic ignored "-Wexit-time-destructors" +#pragma GCC diagnostic ignored "-Wformat-nonliteral" +#pragma GCC diagnostic ignored "-Wglobal-constructors" +#pragma GCC diagnostic ignored "-Wgnu-zero-variadic-macro-arguments" +#pragma GCC diagnostic ignored "-Wmissing-prototypes" +#pragma GCC diagnostic ignored "-Wpadded" +#pragma GCC diagnostic ignored "-Wsign-conversion" +#pragma GCC diagnostic ignored "-Wunknown-pragmas" +#pragma GCC diagnostic ignored "-Wunused-macros" +#pragma GCC diagnostic ignored "-Wzero-as-null-pointer-constant" +#elif defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable:4365) // conversion from 'X' to 'Y', signed/unsigned mismatch +#endif + +#include "loguru.hpp" + +#ifndef LOGURU_HAS_BEEN_IMPLEMENTED +#define LOGURU_HAS_BEEN_IMPLEMENTED + +#define LOGURU_PREAMBLE_WIDTH (53 + LOGURU_THREADNAME_WIDTH + LOGURU_FILENAME_WIDTH) + +#undef min +#undef max + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if LOGURU_SYSLOG +#include +#else +#define LOG_USER 0 +#endif + +#ifdef _WIN32 +#include + +#define localtime_r(a, b) localtime_s(b, a) // No localtime_r with MSVC, but arguments are swapped for localtime_s +#else +#include +#include // mkdir +#include // STDERR_FILENO +#endif + +#ifdef __linux__ +#include // PATH_MAX +#elif !defined(_WIN32) +#include // PATH_MAX +#endif + +#ifndef PATH_MAX +#define PATH_MAX 1024 +#endif + +#ifdef __APPLE__ +#include "TargetConditionals.h" +#endif + +// TODO: use defined(_POSIX_VERSION) for some of these things? + +#if defined(_WIN32) || defined(__CYGWIN__) +#define LOGURU_PTHREADS 0 +#define LOGURU_WINTHREADS 1 +#ifndef LOGURU_STACKTRACES +#define LOGURU_STACKTRACES 0 +#endif +#else +#define LOGURU_PTHREADS 1 +#define LOGURU_WINTHREADS 0 +#ifdef __GLIBC__ +#ifndef LOGURU_STACKTRACES +#define LOGURU_STACKTRACES 1 +#endif +#else +#ifndef LOGURU_STACKTRACES +#define LOGURU_STACKTRACES 0 +#endif +#endif +#endif + +#if LOGURU_STACKTRACES +#include // for __cxa_demangle +#include // for dladdr +#include // for backtrace +#endif // LOGURU_STACKTRACES + +#if LOGURU_PTHREADS +#include +#if defined(__FreeBSD__) +#include +#include +#elif defined(__OpenBSD__) +#include +#endif + +#ifdef __linux__ + /* On Linux, the default thread name is the same as the name of the binary. + Additionally, all new threads inherit the name of the thread it got forked from. + For this reason, Loguru use the pthread Thread Local Storage + for storing thread names on Linux. */ +#ifndef LOGURU_PTLS_NAMES +#define LOGURU_PTLS_NAMES 1 +#endif +#endif +#endif + +#if LOGURU_WINTHREADS +#ifndef _WIN32_WINNT +#define _WIN32_WINNT 0x0502 +#endif +#define WIN32_LEAN_AND_MEAN +#define NOMINMAX +#include +#endif + +#ifndef LOGURU_PTLS_NAMES +#define LOGURU_PTLS_NAMES 0 +#endif + +LOGURU_ANONYMOUS_NAMESPACE_BEGIN + +namespace loguru { + using namespace std::chrono; + +#if LOGURU_WITH_FILEABS + struct FileAbs { + char path[PATH_MAX]; + char mode_str[4]; + Verbosity verbosity; + struct stat st; + FILE* fp; + bool is_reopening = false; // to prevent recursive call in file_reopen. + decltype(steady_clock::now()) last_check_time = steady_clock::now(); + }; +#else + typedef FILE* FileAbs; +#endif + + struct Callback { + std::string id; + log_handler_t callback; + void* user_data; + Verbosity verbosity; // Does not change! + close_handler_t close; + flush_handler_t flush; + unsigned indentation; + }; + + using CallbackVec = std::vector; + + using StringPair = std::pair; + using StringPairList = std::vector; + + const auto s_start_time = steady_clock::now(); + + Verbosity g_stderr_verbosity = Verbosity_0; + bool g_colorlogtostderr = true; + unsigned g_flush_interval_ms = 0; + bool g_preamble_header = true; + bool g_preamble = true; + + Verbosity g_internal_verbosity = Verbosity_0; + + // Preamble details + bool g_preamble_date = true; + bool g_preamble_time = true; + bool g_preamble_uptime = true; + bool g_preamble_thread = true; + bool g_preamble_file = true; + bool g_preamble_verbose = true; + bool g_preamble_pipe = true; + + static std::recursive_mutex s_mutex; + static Verbosity s_max_out_verbosity = Verbosity_OFF; + static std::string s_argv0_filename; + static std::string s_arguments; + static char s_current_dir[PATH_MAX]; + static CallbackVec s_callbacks; + static fatal_handler_t s_fatal_handler = nullptr; + static verbosity_to_name_t s_verbosity_to_name_callback = nullptr; + static name_to_verbosity_t s_name_to_verbosity_callback = nullptr; + static StringPairList s_user_stack_cleanups; + static bool s_strip_file_path = true; + static std::atomic s_stderr_indentation{ 0 }; + + // For periodic flushing: + static std::thread* s_flush_thread = nullptr; + static bool s_needs_flushing = false; + + static SignalOptions s_signal_options = SignalOptions::none(); + + static const bool s_terminal_has_color = []() { +#ifdef _WIN32 +#ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING +#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004 +#endif + + HANDLE hOut = GetStdHandle(STD_OUTPUT_HANDLE); + if (hOut != INVALID_HANDLE_VALUE) { + DWORD dwMode = 0; + GetConsoleMode(hOut, &dwMode); + dwMode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING; + return SetConsoleMode(hOut, dwMode) != 0; + } + return false; +#else + if (!isatty(STDERR_FILENO)) { + return false; + } + if (const char* term = getenv("TERM")) { + return 0 == strcmp(term, "cygwin") + || 0 == strcmp(term, "linux") + || 0 == strcmp(term, "rxvt-unicode-256color") + || 0 == strcmp(term, "screen") + || 0 == strcmp(term, "screen-256color") + || 0 == strcmp(term, "screen.xterm-256color") + || 0 == strcmp(term, "tmux-256color") + || 0 == strcmp(term, "xterm") + || 0 == strcmp(term, "xterm-256color") + || 0 == strcmp(term, "xterm-termite") + || 0 == strcmp(term, "xterm-color"); + } else { + return false; + } +#endif + }(); + + static void print_preamble_header(char* out_buff, size_t out_buff_size); + + // ------------------------------------------------------------------------------ + // Colors + + bool terminal_has_color() { return s_terminal_has_color; } + + // Colors + +#ifdef _WIN32 +#define VTSEQ(ID) ("\x1b[1;" #ID "m") +#else +#define VTSEQ(ID) ("\x1b[" #ID "m") +#endif + + const char* terminal_black() { return s_terminal_has_color ? VTSEQ(30) : ""; } + const char* terminal_red() { return s_terminal_has_color ? VTSEQ(31) : ""; } + const char* terminal_green() { return s_terminal_has_color ? VTSEQ(32) : ""; } + const char* terminal_yellow() { return s_terminal_has_color ? VTSEQ(33) : ""; } + const char* terminal_blue() { return s_terminal_has_color ? VTSEQ(34) : ""; } + const char* terminal_purple() { return s_terminal_has_color ? VTSEQ(35) : ""; } + const char* terminal_cyan() { return s_terminal_has_color ? VTSEQ(36) : ""; } + const char* terminal_light_gray() { return s_terminal_has_color ? VTSEQ(37) : ""; } + const char* terminal_white() { return s_terminal_has_color ? VTSEQ(37) : ""; } + const char* terminal_light_red() { return s_terminal_has_color ? VTSEQ(91) : ""; } + const char* terminal_dim() { return s_terminal_has_color ? VTSEQ(2) : ""; } + + // Formating + const char* terminal_bold() { return s_terminal_has_color ? VTSEQ(1) : ""; } + const char* terminal_underline() { return s_terminal_has_color ? VTSEQ(4) : ""; } + + // You should end each line with this! + const char* terminal_reset() { return s_terminal_has_color ? VTSEQ(0) : ""; } + + // ------------------------------------------------------------------------------ +#if LOGURU_WITH_FILEABS + void file_reopen(void* user_data); + inline FILE* to_file(void* user_data) { return reinterpret_cast(user_data)->fp; } +#else + inline FILE* to_file(void* user_data) { return reinterpret_cast(user_data); } +#endif + + void file_log(void* user_data, const Message& message) + { +#if LOGURU_WITH_FILEABS + FileAbs* file_abs = reinterpret_cast(user_data); + if (file_abs->is_reopening) { + return; + } + // It is better checking file change every minute/hour/day, + // instead of doing this every time we log. + // Here check_interval is set to zero to enable checking every time; + const auto check_interval = seconds(0); + if (duration_cast(steady_clock::now() - file_abs->last_check_time) > check_interval) { + file_abs->last_check_time = steady_clock::now(); + file_reopen(user_data); + } + FILE* file = to_file(user_data); + if (!file) { + return; + } +#else + FILE* file = to_file(user_data); +#endif + fprintf(file, "%s%s%s%s\n", + message.preamble, message.indentation, message.prefix, message.message); + if (g_flush_interval_ms == 0) { + fflush(file); + } + } + + void file_close(void* user_data) + { + FILE* file = to_file(user_data); + if (file) { + fclose(file); + } +#if LOGURU_WITH_FILEABS + delete reinterpret_cast(user_data); +#endif + } + + void file_flush(void* user_data) + { + FILE* file = to_file(user_data); + fflush(file); + } + +#if LOGURU_WITH_FILEABS + void file_reopen(void* user_data) + { + FileAbs* file_abs = reinterpret_cast(user_data); + struct stat st; + int ret; + if (!file_abs->fp || (ret = stat(file_abs->path, &st)) == -1 || (st.st_ino != file_abs->st.st_ino)) { + file_abs->is_reopening = true; + if (file_abs->fp) { + fclose(file_abs->fp); + } + if (!file_abs->fp) { + VLOG_F(g_internal_verbosity, "Reopening file '" LOGURU_FMT(s) "' due to previous error", file_abs->path); + } else if (ret < 0) { + const auto why = errno_as_text(); + VLOG_F(g_internal_verbosity, "Reopening file '" LOGURU_FMT(s) "' due to '" LOGURU_FMT(s) "'", file_abs->path, why.c_str()); + } else { + VLOG_F(g_internal_verbosity, "Reopening file '" LOGURU_FMT(s) "' due to file changed", file_abs->path); + } + // try reopen current file. + if (!create_directories(file_abs->path)) { + LOG_F(ERROR, "Failed to create directories to '" LOGURU_FMT(s) "'", file_abs->path); + } + file_abs->fp = fopen(file_abs->path, file_abs->mode_str); + if (!file_abs->fp) { + LOG_F(ERROR, "Failed to open '" LOGURU_FMT(s) "'", file_abs->path); + } else { + stat(file_abs->path, &file_abs->st); + } + file_abs->is_reopening = false; + } + } +#endif + // ------------------------------------------------------------------------------ + // ------------------------------------------------------------------------------ +#if LOGURU_SYSLOG + void syslog_log(void* /*user_data*/, const Message& message) + { + /* + Level 0: Is reserved for kernel panic type situations. + Level 1: Is for Major resource failure. + Level 2->7 Application level failures + */ + int level; + if (message.verbosity < Verbosity_FATAL) { + level = 1; // System Alert + } else { + switch (message.verbosity) { + case Verbosity_FATAL: level = 2; break; // System Critical + case Verbosity_ERROR: level = 3; break; // System Error + case Verbosity_WARNING: level = 4; break; // System Warning + case Verbosity_INFO: level = 5; break; // System Notice + case Verbosity_1: level = 6; break; // System Info + default: level = 7; break; // System Debug + } + } + + // Note: We don't add the time info. + // This is done automatically by the syslog deamon. + // Otherwise log all information that the file log does. + syslog(level, "%s%s%s", message.indentation, message.prefix, message.message); + } + + void syslog_close(void* /*user_data*/) + { + closelog(); + } + + void syslog_flush(void* /*user_data*/) + { + } +#endif + // ------------------------------------------------------------------------------ + // Helpers: + + Text::~Text() { free(_str); } + +#if LOGURU_USE_FMTLIB + Text vtextprintf(const char* format, fmt::format_args args) + { + return Text(STRDUP(fmt::vformat(format, args).c_str())); + } +#else + LOGURU_PRINTF_LIKE(1, 0) + static Text vtextprintf(const char* format, va_list vlist) + { +#ifdef _WIN32 + int bytes_needed = _vscprintf(format, vlist); + CHECK_F(bytes_needed >= 0, "Bad string format: '%s'", format); + char* buff = (char*)malloc(bytes_needed + 1); + vsnprintf(buff, bytes_needed + 1, format, vlist); + return Text(buff); +#else + char* buff = nullptr; + int result = vasprintf(&buff, format, vlist); + CHECK_F(result >= 0, "Bad string format: '" LOGURU_FMT(s) "'", format); + return Text(buff); +#endif + } + + Text textprintf(const char* format, ...) + { + va_list vlist; + va_start(vlist, format); + auto result = vtextprintf(format, vlist); + va_end(vlist); + return result; + } +#endif + + // Overloaded for variadic template matching. + Text textprintf() + { + return Text(static_cast(calloc(1, 1))); + } + + static const char* indentation(unsigned depth) + { + static const char buff[] = + ". . . . . . . . . . " ". . . . . . . . . . " + ". . . . . . . . . . " ". . . . . . . . . . " + ". . . . . . . . . . " ". . . . . . . . . . " + ". . . . . . . . . . " ". . . . . . . . . . " + ". . . . . . . . . . " ". . . . . . . . . . "; + static const size_t INDENTATION_WIDTH = 4; + static const size_t NUM_INDENTATIONS = (sizeof(buff) - 1) / INDENTATION_WIDTH; + depth = std::min(depth, NUM_INDENTATIONS); + return buff + INDENTATION_WIDTH * (NUM_INDENTATIONS - depth); + } + + static void parse_args(int& argc, char* argv[], const char* verbosity_flag) + { + int arg_dest = 1; + int out_argc = argc; + + for (int arg_it = 1; arg_it < argc; ++arg_it) { + auto cmd = argv[arg_it]; + auto arg_len = strlen(verbosity_flag); + + bool last_is_alpha = false; +#if LOGURU_USE_LOCALE + try { // locale variant of isalpha will throw on error + last_is_alpha = std::isalpha(cmd[arg_len], std::locale("")); + } + catch (...) { + last_is_alpha = std::isalpha(static_cast(cmd[arg_len])); + } +#else + last_is_alpha = std::isalpha(static_cast(cmd[arg_len])); +#endif + + if (strncmp(cmd, verbosity_flag, arg_len) == 0 && !last_is_alpha) { + out_argc -= 1; + auto value_str = cmd + arg_len; + if (value_str[0] == '\0') { + // Value in separate argument + arg_it += 1; + CHECK_LT_F(arg_it, argc, "Missing verbosiy level after " LOGURU_FMT(s) "", verbosity_flag); + value_str = argv[arg_it]; + out_argc -= 1; + } + if (*value_str == '=') { value_str += 1; } + + auto req_verbosity = get_verbosity_from_name(value_str); + if (req_verbosity != Verbosity_INVALID) { + g_stderr_verbosity = req_verbosity; + } else { + char* end = 0; + g_stderr_verbosity = static_cast(strtol(value_str, &end, 10)); + CHECK_F(end && *end == '\0', + "Invalid verbosity. Expected integer, INFO, WARNING, ERROR or OFF, got '" LOGURU_FMT(s) "'", value_str); + } + } else { + argv[arg_dest++] = argv[arg_it]; + } + } + + argc = out_argc; + argv[argc] = nullptr; + } + + static long long now_ns() + { + return duration_cast(high_resolution_clock::now().time_since_epoch()).count(); + } + + // Returns the part of the path after the last / or \ (if any). + const char* filename(const char* path) + { + for (auto ptr = path; *ptr; ++ptr) { + if (*ptr == '/' || *ptr == '\\') { + path = ptr + 1; + } + } + return path; + } + + // ------------------------------------------------------------------------------ + + static void on_atexit() + { + VLOG_F(g_internal_verbosity, "atexit"); + flush(); + } + + static void install_signal_handlers(const SignalOptions& signal_options); + + static void write_hex_digit(std::string& out, unsigned num) + { + DCHECK_LT_F(num, 16u); + if (num < 10u) { out.push_back(char('0' + num)); } else { out.push_back(char('A' + num - 10)); } + } + + static void write_hex_byte(std::string& out, uint8_t n) + { + write_hex_digit(out, n >> 4u); + write_hex_digit(out, n & 0x0f); + } + + static void escape(std::string& out, const std::string& str) + { + for (char c : str) { + /**/ if (c == '\a') { out += "\\a"; } else if (c == '\b') { out += "\\b"; } else if (c == '\f') { out += "\\f"; } else if (c == '\n') { out += "\\n"; } else if (c == '\r') { out += "\\r"; } else if (c == '\t') { out += "\\t"; } else if (c == '\v') { out += "\\v"; } else if (c == '\\') { out += "\\\\"; } else if (c == '\'') { out += "\\\'"; } else if (c == '\"') { out += "\\\""; } else if (c == ' ') { out += "\\ "; } else if (0 <= c && c < 0x20) { // ASCI control character: + // else if (c < 0x20 || c != (c & 127)) { // ASCII control character or UTF-8: + out += "\\x"; + write_hex_byte(out, static_cast(c)); + } else { out += c; } + } + } + + Text errno_as_text() + { + char buff[256]; +#if defined(__GLIBC__) && defined(_GNU_SOURCE) + // GNU Version + return Text(STRDUP(strerror_r(errno, buff, sizeof(buff)))); +#elif defined(__APPLE__) || _POSIX_C_SOURCE >= 200112L + // XSI Version + strerror_r(errno, buff, sizeof(buff)); + return Text(strdup(buff)); +#elif defined(_WIN32) + strerror_s(buff, sizeof(buff), errno); + return Text(STRDUP(buff)); +#else + // Not thread-safe. + return Text(STRDUP(strerror(errno))); +#endif + } + + void init(int& argc, char* argv[], const Options& options) + { + CHECK_GT_F(argc, 0, "Expected proper argc/argv"); + CHECK_EQ_F(argv[argc], nullptr, "Expected proper argc/argv"); + + s_argv0_filename = filename(argv[0]); + +#ifdef _WIN32 +#define getcwd _getcwd +#endif + + if (!getcwd(s_current_dir, sizeof(s_current_dir))) { + const auto error_text = errno_as_text(); + LOG_F(WARNING, "Failed to get current working directory: " LOGURU_FMT(s) "", error_text.c_str()); + } + + s_arguments = ""; + for (int i = 0; i < argc; ++i) { + escape(s_arguments, argv[i]); + if (i + 1 < argc) { + s_arguments += " "; + } + } + + if (options.verbosity_flag) { + parse_args(argc, argv, options.verbosity_flag); + } + + if (const auto main_thread_name = options.main_thread_name) { +#if LOGURU_PTLS_NAMES || LOGURU_WINTHREADS + set_thread_name(main_thread_name); +#elif LOGURU_PTHREADS + char old_thread_name[16] = { 0 }; + auto this_thread = pthread_self(); +#if defined(__APPLE__) || defined(__linux__) || defined(__sun) + pthread_getname_np(this_thread, old_thread_name, sizeof(old_thread_name)); +#endif + if (old_thread_name[0] == 0) { +#ifdef __APPLE__ + pthread_setname_np(main_thread_name); +#elif defined(__FreeBSD__) || defined(__OpenBSD__) + pthread_set_name_np(this_thread, main_thread_name); +#elif defined(__linux__) || defined(__sun) + pthread_setname_np(this_thread, main_thread_name); +#endif + } +#endif // LOGURU_PTHREADS + } + + if (g_stderr_verbosity >= Verbosity_INFO) { + if (g_preamble_header) { + char preamble_explain[LOGURU_PREAMBLE_WIDTH]; + print_preamble_header(preamble_explain, sizeof(preamble_explain)); + if (g_colorlogtostderr && s_terminal_has_color) { + fprintf(stderr, "%s%s%s\n", terminal_reset(), terminal_dim(), preamble_explain); + } else { + fprintf(stderr, "%s\n", preamble_explain); + } + } + fflush(stderr); + } + VLOG_F(g_internal_verbosity, "arguments: " LOGURU_FMT(s) "", s_arguments.c_str()); + if (strlen(s_current_dir) != 0) { + VLOG_F(g_internal_verbosity, "Current dir: " LOGURU_FMT(s) "", s_current_dir); + } + VLOG_F(g_internal_verbosity, "stderr verbosity: " LOGURU_FMT(d) "", g_stderr_verbosity); + VLOG_F(g_internal_verbosity, "-----------------------------------"); + + install_signal_handlers(options.signal_options); + + atexit(on_atexit); + } + + void shutdown() + { + VLOG_F(g_internal_verbosity, "loguru::shutdown()"); + remove_all_callbacks(); + set_fatal_handler(nullptr); + set_verbosity_to_name_callback(nullptr); + set_name_to_verbosity_callback(nullptr); + } + + void write_date_time(char* buff, unsigned long long buff_size) + { + auto now = system_clock::now(); + long long ms_since_epoch = duration_cast(now.time_since_epoch()).count(); + time_t sec_since_epoch = time_t(ms_since_epoch / 1000); + tm time_info; + localtime_r(&sec_since_epoch, &time_info); + snprintf(buff, buff_size, "%04d%02d%02d_%02d%02d%02d.%03lld", + 1900 + time_info.tm_year, 1 + time_info.tm_mon, time_info.tm_mday, + time_info.tm_hour, time_info.tm_min, time_info.tm_sec, ms_since_epoch % 1000); + } + + const char* argv0_filename() + { + return s_argv0_filename.c_str(); + } + + const char* arguments() + { + return s_arguments.c_str(); + } + + const char* current_dir() + { + return s_current_dir; + } + + const char* home_dir() + { +#ifdef __MINGW32__ + auto home = getenv("USERPROFILE"); + CHECK_F(home != nullptr, "Missing USERPROFILE"); + return home; +#elif defined(_WIN32) + char* user_profile; + size_t len; + errno_t err = _dupenv_s(&user_profile, &len, "USERPROFILE"); + CHECK_F(err == 0, "Missing USERPROFILE"); + return user_profile; +#else // _WIN32 + auto home = getenv("HOME"); + CHECK_F(home != nullptr, "Missing HOME"); + return home; +#endif // _WIN32 + } + + void suggest_log_path(const char* prefix, char* buff, unsigned long long buff_size) + { + if (prefix[0] == '~') { + snprintf(buff, buff_size - 1, "%s%s", home_dir(), prefix + 1); + } else { + snprintf(buff, buff_size - 1, "%s", prefix); + } + + // Check for terminating / + size_t n = strlen(buff); + if (n != 0) { + if (buff[n - 1] != '/') { + CHECK_F(n + 2 < buff_size, "Filename buffer too small"); + buff[n] = '/'; + buff[n + 1] = '\0'; + } + } + +#ifdef _WIN32 + strncat_s(buff, buff_size - strlen(buff) - 1, s_argv0_filename.c_str(), buff_size - strlen(buff) - 1); + strncat_s(buff, buff_size - strlen(buff) - 1, "/", buff_size - strlen(buff) - 1); + write_date_time(buff + strlen(buff), buff_size - strlen(buff)); + strncat_s(buff, buff_size - strlen(buff) - 1, ".log", buff_size - strlen(buff) - 1); +#else + strncat(buff, s_argv0_filename.c_str(), buff_size - strlen(buff) - 1); + strncat(buff, "/", buff_size - strlen(buff) - 1); + write_date_time(buff + strlen(buff), buff_size - strlen(buff)); + strncat(buff, ".log", buff_size - strlen(buff) - 1); +#endif + } + + bool create_directories(const char* file_path_const) + { + CHECK_F(file_path_const && *file_path_const); + char* file_path = STRDUP(file_path_const); + for (char* p = strchr(file_path + 1, '/'); p; p = strchr(p + 1, '/')) { + *p = '\0'; + +#ifdef _WIN32 + if (_mkdir(file_path) == -1) { +#else + if (mkdir(file_path, 0755) == -1) { +#endif + if (errno != EEXIST) { + LOG_F(ERROR, "Failed to create directory '" LOGURU_FMT(s) "'", file_path); + LOG_IF_F(ERROR, errno == EACCES, "EACCES"); + LOG_IF_F(ERROR, errno == ENAMETOOLONG, "ENAMETOOLONG"); + LOG_IF_F(ERROR, errno == ENOENT, "ENOENT"); + LOG_IF_F(ERROR, errno == ENOTDIR, "ENOTDIR"); + LOG_IF_F(ERROR, errno == ELOOP, "ELOOP"); + + *p = '/'; + free(file_path); + return false; + } + } + *p = '/'; + } + free(file_path); + return true; + } + bool add_file(const char* path_in, FileMode mode, Verbosity verbosity) + { + char path[PATH_MAX]; + if (path_in[0] == '~') { + snprintf(path, sizeof(path) - 1, "%s%s", home_dir(), path_in + 1); + } else { + snprintf(path, sizeof(path) - 1, "%s", path_in); + } + + if (!create_directories(path)) { + LOG_F(ERROR, "Failed to create directories to '" LOGURU_FMT(s) "'", path); + } + + const char* mode_str = (mode == FileMode::Truncate ? "w" : "a"); + FILE* file; +#ifdef _WIN32 + file = _fsopen(path, mode_str, _SH_DENYNO); +#else + file = fopen(path, mode_str); +#endif + if (!file) { + LOG_F(ERROR, "Failed to open '" LOGURU_FMT(s) "'", path); + return false; + } +#if LOGURU_WITH_FILEABS + FileAbs* file_abs = new FileAbs(); // this is deleted in file_close; + snprintf(file_abs->path, sizeof(file_abs->path) - 1, "%s", path); + snprintf(file_abs->mode_str, sizeof(file_abs->mode_str) - 1, "%s", mode_str); + stat(file_abs->path, &file_abs->st); + file_abs->fp = file; + file_abs->verbosity = verbosity; + add_callback(path_in, file_log, file_abs, verbosity, file_close, file_flush); +#else + add_callback(path_in, file_log, file, verbosity, file_close, file_flush); +#endif + + if (mode == FileMode::Append) { + fprintf(file, "\n\n\n\n\n"); + } + if (!s_arguments.empty()) { + fprintf(file, "arguments: %s\n", s_arguments.c_str()); + } + if (strlen(s_current_dir) != 0) { + fprintf(file, "Current dir: %s\n", s_current_dir); + } + fprintf(file, "File verbosity level: %d\n", verbosity); + if (g_preamble_header) { + char preamble_explain[LOGURU_PREAMBLE_WIDTH]; + print_preamble_header(preamble_explain, sizeof(preamble_explain)); + fprintf(file, "%s\n", preamble_explain); + } + fflush(file); + + VLOG_F(g_internal_verbosity, "Logging to '" LOGURU_FMT(s) "', mode: '" LOGURU_FMT(s) "', verbosity: " LOGURU_FMT(d) "", path, mode_str, verbosity); + return true; + } + + /* + Will add syslog as a standard sink for log messages + Any logging message with a verbosity lower or equal to + the given verbosity will be included. + + This works for Unix like systems (i.e. Linux/Mac) + There is no current implementation for Windows (as I don't know the + equivalent calls or have a way to test them). If you know please + add and send a pull request. + + The code should still compile under windows but will only generate + a warning message that syslog is unavailable. + + Search for LOGURU_SYSLOG to find and fix. + */ + bool add_syslog(const char* app_name, Verbosity verbosity) + { + return add_syslog(app_name, verbosity, LOG_USER); + } + bool add_syslog(const char* app_name, Verbosity verbosity, int facility) + { +#if LOGURU_SYSLOG + if (app_name == nullptr) { + app_name = argv0_filename(); + } + openlog(app_name, 0, facility); + add_callback("'syslog'", syslog_log, nullptr, verbosity, syslog_close, syslog_flush); + + VLOG_F(g_internal_verbosity, "Logging to 'syslog' , verbosity: " LOGURU_FMT(d) "", verbosity); + return true; +#else + (void)app_name; + (void)verbosity; + (void)facility; + VLOG_F(g_internal_verbosity, "syslog not implemented on this system. Request to install syslog logging ignored."); + return false; +#endif + } + // Will be called right before abort(). + void set_fatal_handler(fatal_handler_t handler) + { + s_fatal_handler = handler; + } + + fatal_handler_t get_fatal_handler() + { + return s_fatal_handler; + } + + void set_verbosity_to_name_callback(verbosity_to_name_t callback) + { + s_verbosity_to_name_callback = callback; + } + + void set_name_to_verbosity_callback(name_to_verbosity_t callback) + { + s_name_to_verbosity_callback = callback; + } + + void add_stack_cleanup(const char* find_this, const char* replace_with_this) + { + if (strlen(find_this) <= strlen(replace_with_this)) { + LOG_F(WARNING, "add_stack_cleanup: the replacement should be shorter than the pattern!"); + return; + } + + s_user_stack_cleanups.push_back(StringPair(find_this, replace_with_this)); + } + + static void on_callback_change() + { + s_max_out_verbosity = Verbosity_OFF; + for (const auto& callback : s_callbacks) { + s_max_out_verbosity = std::max(s_max_out_verbosity, callback.verbosity); + } + } + + void add_callback( + const char* id, + log_handler_t callback, + void* user_data, + Verbosity verbosity, + close_handler_t on_close, + flush_handler_t on_flush) + { + std::lock_guard lock(s_mutex); + s_callbacks.push_back(Callback{ id, callback, user_data, verbosity, on_close, on_flush, 0 }); + on_callback_change(); + } + + // Returns a custom verbosity name if one is available, or nullptr. + // See also set_verbosity_to_name_callback. + const char* get_verbosity_name(Verbosity verbosity) + { + auto name = s_verbosity_to_name_callback + ? (*s_verbosity_to_name_callback)(verbosity) + : nullptr; + + // Use standard replacements if callback fails: + if (!name) { + if (verbosity <= Verbosity_FATAL) { + name = "FATL"; + } else if (verbosity == Verbosity_ERROR) { + name = "ERR"; + } else if (verbosity == Verbosity_WARNING) { + name = "WARN"; + } else if (verbosity == Verbosity_INFO) { + name = "INFO"; + } + } + + return name; + } + + // Returns Verbosity_INVALID if the name is not found. + // See also set_name_to_verbosity_callback. + Verbosity get_verbosity_from_name(const char* name) + { + auto verbosity = s_name_to_verbosity_callback + ? (*s_name_to_verbosity_callback)(name) + : Verbosity_INVALID; + + // Use standard replacements if callback fails: + if (verbosity == Verbosity_INVALID) { + if (strcmp(name, "OFF") == 0) { + verbosity = Verbosity_OFF; + } else if (strcmp(name, "INFO") == 0) { + verbosity = Verbosity_INFO; + } else if (strcmp(name, "WARNING") == 0) { + verbosity = Verbosity_WARNING; + } else if (strcmp(name, "ERROR") == 0) { + verbosity = Verbosity_ERROR; + } else if (strcmp(name, "FATAL") == 0) { + verbosity = Verbosity_FATAL; + } + } + + return verbosity; + } + + bool remove_callback(const char* id) + { + std::lock_guard lock(s_mutex); + auto it = std::find_if(begin(s_callbacks), end(s_callbacks), [&](const Callback& c) { return c.id == id; }); + if (it != s_callbacks.end()) { + if (it->close) { it->close(it->user_data); } + s_callbacks.erase(it); + on_callback_change(); + return true; + } else { + LOG_F(ERROR, "Failed to locate callback with id '" LOGURU_FMT(s) "'", id); + return false; + } + } + + void remove_all_callbacks() + { + std::lock_guard lock(s_mutex); + for (auto& callback : s_callbacks) { + if (callback.close) { + callback.close(callback.user_data); + } + } + s_callbacks.clear(); + on_callback_change(); + } + + // Returns the maximum of g_stderr_verbosity and all file/custom outputs. + Verbosity current_verbosity_cutoff() + { + return g_stderr_verbosity > s_max_out_verbosity ? + g_stderr_verbosity : s_max_out_verbosity; + } + + // ------------------------------------------------------------------------ + // Threads names + +#if LOGURU_PTLS_NAMES + static pthread_once_t s_pthread_key_once = PTHREAD_ONCE_INIT; + static pthread_key_t s_pthread_key_name; + + void make_pthread_key_name() + { + (void)pthread_key_create(&s_pthread_key_name, free); + } +#endif + +#if LOGURU_WINTHREADS + // Where we store the custom thread name set by `set_thread_name` + char* thread_name_buffer() + { + __declspec(thread) static char thread_name[LOGURU_THREADNAME_WIDTH + 1] = { 0 }; + return &thread_name[0]; + } +#endif // LOGURU_WINTHREADS + + void set_thread_name(const char* name) + { +#if LOGURU_PTLS_NAMES + // Store thread name in thread-local storage at `s_pthread_key_name` + (void)pthread_once(&s_pthread_key_once, make_pthread_key_name); + (void)pthread_setspecific(s_pthread_key_name, STRDUP(name)); +#elif LOGURU_PTHREADS + // Tell the OS the thread name +#ifdef __APPLE__ + pthread_setname_np(name); +#elif defined(__FreeBSD__) || defined(__OpenBSD__) + pthread_set_name_np(pthread_self(), name); +#elif defined(__linux__) || defined(__sun) + pthread_setname_np(pthread_self(), name); +#endif +#elif LOGURU_WINTHREADS + // Store thread name in a thread-local storage: + strncpy_s(thread_name_buffer(), LOGURU_THREADNAME_WIDTH + 1, name, _TRUNCATE); +#else // LOGURU_PTHREADS + // TODO: on these weird platforms we should also store the thread name + // in a generic thread-local storage. + (void)name; +#endif // LOGURU_PTHREADS + } + + void get_thread_name(char* buffer, unsigned long long length, bool right_align_hex_id) + { + CHECK_NE_F(length, 0u, "Zero length buffer in get_thread_name"); + CHECK_NOTNULL_F(buffer, "nullptr in get_thread_name"); + +#if LOGURU_PTLS_NAMES + (void)pthread_once(&s_pthread_key_once, make_pthread_key_name); + if (const char* name = static_cast(pthread_getspecific(s_pthread_key_name))) { + snprintf(buffer, static_cast(length), "%s", name); + } else { + buffer[0] = 0; + } +#elif LOGURU_PTHREADS + // Ask the OS about the thread name. + // This is what we *want* to do on all platforms, but + // only some platforms support it (currently). + pthread_getname_np(pthread_self(), buffer, length); +#elif LOGURU_WINTHREADS + snprintf(buffer, static_cast(length), "%s", thread_name_buffer()); +#else + // Thread names unsupported + buffer[0] = 0; +#endif + + if (buffer[0] == 0) { + // We failed to get a readable thread name. + // Write a HEX thread ID instead. + // We try to get an ID that is the same as the ID you could + // read in your debugger, system monitor etc. + +#ifdef __APPLE__ + uint64_t thread_id; + pthread_threadid_np(pthread_self(), &thread_id); +#elif defined(__FreeBSD__) + long thread_id; + (void)thr_self(&thread_id); +#elif LOGURU_PTHREADS + uint64_t thread_id = pthread_self(); +#else + // This ID does not correllate to anything we can get from the OS, + // so this is the worst way to get the ID. + const auto thread_id = std::hash{}(std::this_thread::get_id()); +#endif + + if (right_align_hex_id) { + snprintf(buffer, static_cast(length), "%*X", static_cast(length - 1), static_cast(thread_id)); + } else { + snprintf(buffer, static_cast(length), "%X", static_cast(thread_id)); + } + } + } + + // ------------------------------------------------------------------------ + // Stack traces + +#if LOGURU_STACKTRACES + Text demangle(const char* name) + { + int status = -1; + char* demangled = abi::__cxa_demangle(name, 0, 0, &status); + Text result{ status == 0 ? demangled : STRDUP(name) }; + return result; + } + +#if LOGURU_RTTI + template + std::string type_name() + { + auto demangled = demangle(typeid(T).name()); + return demangled.c_str(); + } +#endif // LOGURU_RTTI + + static const StringPairList REPLACE_LIST = { + #if LOGURU_RTTI + { type_name(), "std::string" }, + { type_name(), "std::wstring" }, + { type_name(), "std::u16string" }, + { type_name(), "std::u32string" }, + #endif // LOGURU_RTTI + { "std::__1::", "std::" }, + { "__thiscall ", "" }, + { "__cdecl ", "" }, + }; + + void do_replacements(const StringPairList & replacements, std::string & str) + { + for (auto&& p : replacements) { + if (p.first.size() <= p.second.size()) { + // On gcc, "type_name()" is "std::string" + continue; + } + + size_t it; + while ((it = str.find(p.first)) != std::string::npos) { + str.replace(it, p.first.size(), p.second); + } + } + } + + std::string prettify_stacktrace(const std::string & input) + { + std::string output = input; + + do_replacements(s_user_stack_cleanups, output); + do_replacements(REPLACE_LIST, output); + + try { + std::regex std_allocator_re(R"(,\s*std::allocator<[^<>]+>)"); + output = std::regex_replace(output, std_allocator_re, std::string("")); + + std::regex template_spaces_re(R"(<\s*([^<> ]+)\s*>)"); + output = std::regex_replace(output, template_spaces_re, std::string("<$1>")); + } + catch (std::regex_error&) { + // Probably old GCC. + } + + return output; + } + + std::string stacktrace_as_stdstring(int skip) + { + // From https://gist.github.com/fmela/591333 + void* callstack[128]; + const auto max_frames = sizeof(callstack) / sizeof(callstack[0]); + int num_frames = backtrace(callstack, max_frames); + char** symbols = backtrace_symbols(callstack, num_frames); + + std::string result; + // Print stack traces so the most relevant ones are written last + // Rationale: http://yellerapp.com/posts/2015-01-22-upside-down-stacktraces.html + for (int i = num_frames - 1; i >= skip; --i) { + char buf[1024]; + Dl_info info; + if (dladdr(callstack[i], &info) && info.dli_sname) { + char* demangled = NULL; + int status = -1; + if (info.dli_sname[0] == '_') { + demangled = abi::__cxa_demangle(info.dli_sname, 0, 0, &status); + } + snprintf(buf, sizeof(buf), "%-3d %*p %s + %zd\n", + i - skip, int(2 + sizeof(void*) * 2), callstack[i], + status == 0 ? demangled : + info.dli_sname == 0 ? symbols[i] : info.dli_sname, + static_cast(callstack[i]) - static_cast(info.dli_saddr)); + free(demangled); + } else { + snprintf(buf, sizeof(buf), "%-3d %*p %s\n", + i - skip, int(2 + sizeof(void*) * 2), callstack[i], symbols[i]); + } + result += buf; + } + free(symbols); + + if (num_frames == max_frames) { + result = "[truncated]\n" + result; + } + + if (!result.empty() && result[result.size() - 1] == '\n') { + result.resize(result.size() - 1); + } + + return prettify_stacktrace(result); + } + +#else // LOGURU_STACKTRACES + Text demangle(const char* name) + { + return Text(STRDUP(name)); + } + + std::string stacktrace_as_stdstring(int) + { + // No stacktraces available on this platform" + return ""; + } + +#endif // LOGURU_STACKTRACES + + Text stacktrace(int skip) + { + auto str = stacktrace_as_stdstring(skip + 1); + return Text(STRDUP(str.c_str())); + } + + // ------------------------------------------------------------------------ + + static void print_preamble_header(char* out_buff, size_t out_buff_size) + { + if (out_buff_size == 0) { return; } + out_buff[0] = '\0'; + size_t pos = 0; + if (g_preamble_date && pos < out_buff_size) { + int bytes = snprintf(out_buff + pos, out_buff_size - pos, "date "); + if (bytes > 0) { + pos += bytes; + } + } + if (g_preamble_time && pos < out_buff_size) { + int bytes = snprintf(out_buff + pos, out_buff_size - pos, "time "); + if (bytes > 0) { + pos += bytes; + } + } + if (g_preamble_uptime && pos < out_buff_size) { + int bytes = snprintf(out_buff + pos, out_buff_size - pos, "( uptime ) "); + if (bytes > 0) { + pos += bytes; + } + } + if (g_preamble_thread && pos < out_buff_size) { + int bytes = snprintf(out_buff + pos, out_buff_size - pos, "[%-*s]", LOGURU_THREADNAME_WIDTH, " thread name/id"); + if (bytes > 0) { + pos += bytes; + } + } + if (g_preamble_file && pos < out_buff_size) { + int bytes = snprintf(out_buff + pos, out_buff_size - pos, "%*s:line ", LOGURU_FILENAME_WIDTH, "file"); + if (bytes > 0) { + pos += bytes; + } + } + if (g_preamble_verbose && pos < out_buff_size) { + int bytes = snprintf(out_buff + pos, out_buff_size - pos, " v"); + if (bytes > 0) { + pos += bytes; + } + } + if (g_preamble_pipe && pos < out_buff_size) { + int bytes = snprintf(out_buff + pos, out_buff_size - pos, "| "); + if (bytes > 0) { + pos += bytes; + } + } + } + + static void print_preamble(char* out_buff, size_t out_buff_size, Verbosity verbosity, const char* file, unsigned line) + { + if (out_buff_size == 0) { return; } + out_buff[0] = '\0'; + if (!g_preamble) { return; } + long long ms_since_epoch = duration_cast(system_clock::now().time_since_epoch()).count(); + time_t sec_since_epoch = time_t(ms_since_epoch / 1000); + tm time_info; + localtime_r(&sec_since_epoch, &time_info); + + auto uptime_ms = duration_cast(steady_clock::now() - s_start_time).count(); + auto uptime_sec = static_cast (uptime_ms) / 1000.0; + + char thread_name[LOGURU_THREADNAME_WIDTH + 1] = { 0 }; + get_thread_name(thread_name, LOGURU_THREADNAME_WIDTH + 1, true); + + if (s_strip_file_path) { + file = filename(file); + } + + char level_buff[6]; + const char* custom_level_name = get_verbosity_name(verbosity); + if (custom_level_name) { + snprintf(level_buff, sizeof(level_buff) - 1, "%s", custom_level_name); + } else { + snprintf(level_buff, sizeof(level_buff) - 1, "% 4d", static_cast(verbosity)); + } + + size_t pos = 0; + + if (g_preamble_date && pos < out_buff_size) { + int bytes = snprintf(out_buff + pos, out_buff_size - pos, "%04d-%02d-%02d ", + 1900 + time_info.tm_year, 1 + time_info.tm_mon, time_info.tm_mday); + if (bytes > 0) { + pos += bytes; + } + } + if (g_preamble_time && pos < out_buff_size) { + int bytes = snprintf(out_buff + pos, out_buff_size - pos, "%02d:%02d:%02d.%03lld ", + time_info.tm_hour, time_info.tm_min, time_info.tm_sec, ms_since_epoch % 1000); + if (bytes > 0) { + pos += bytes; + } + } + if (g_preamble_uptime && pos < out_buff_size) { + int bytes = snprintf(out_buff + pos, out_buff_size - pos, "(%8.3fs) ", + uptime_sec); + if (bytes > 0) { + pos += bytes; + } + } + if (g_preamble_thread && pos < out_buff_size) { + int bytes = snprintf(out_buff + pos, out_buff_size - pos, "[%-*s]", + LOGURU_THREADNAME_WIDTH, thread_name); + if (bytes > 0) { + pos += bytes; + } + } + if (g_preamble_file && pos < out_buff_size) { + char shortened_filename[LOGURU_FILENAME_WIDTH + 1]; + snprintf(shortened_filename, LOGURU_FILENAME_WIDTH + 1, "%s", file); + int bytes = snprintf(out_buff + pos, out_buff_size - pos, "%*s:%-5u ", + LOGURU_FILENAME_WIDTH, shortened_filename, line); + if (bytes > 0) { + pos += bytes; + } + } + if (g_preamble_verbose && pos < out_buff_size) { + int bytes = snprintf(out_buff + pos, out_buff_size - pos, "%4s", + level_buff); + if (bytes > 0) { + pos += bytes; + } + } + if (g_preamble_pipe && pos < out_buff_size) { + int bytes = snprintf(out_buff + pos, out_buff_size - pos, "| "); + if (bytes > 0) { + pos += bytes; + } + } + } + + // stack_trace_skip is just if verbosity == FATAL. + static void log_message(int stack_trace_skip, Message & message, bool with_indentation, bool abort_if_fatal) + { + const auto verbosity = message.verbosity; + std::lock_guard lock(s_mutex); + + if (message.verbosity == Verbosity_FATAL) { + auto st = loguru::stacktrace(stack_trace_skip + 2); + if (!st.empty()) { + RAW_LOG_F(ERROR, "Stack trace:\n" LOGURU_FMT(s) "", st.c_str()); + } + + auto ec = loguru::get_error_context(); + if (!ec.empty()) { + RAW_LOG_F(ERROR, "" LOGURU_FMT(s) "", ec.c_str()); + } + } + + if (with_indentation) { + message.indentation = indentation(s_stderr_indentation); + } + + if (verbosity <= g_stderr_verbosity) { + if (g_colorlogtostderr && s_terminal_has_color) { + if (verbosity > Verbosity_WARNING) { + fprintf(stderr, "%s%s%s%s%s%s%s%s\n", + terminal_reset(), + terminal_dim(), + message.preamble, + message.indentation, + verbosity == Verbosity_INFO ? terminal_reset() : "", // un-dim for info + message.prefix, + message.message, + terminal_reset()); + } else { + fprintf(stderr, "%s%s%s%s%s%s%s\n", + terminal_reset(), + verbosity == Verbosity_WARNING ? terminal_yellow() : terminal_red(), + message.preamble, + message.indentation, + message.prefix, + message.message, + terminal_reset()); + } + } else { + fprintf(stderr, "%s%s%s%s\n", + message.preamble, message.indentation, message.prefix, message.message); + } + + if (g_flush_interval_ms == 0) { + fflush(stderr); + } else { + s_needs_flushing = true; + } + } + + for (auto& p : s_callbacks) { + if (verbosity <= p.verbosity) { + if (with_indentation) { + message.indentation = indentation(p.indentation); + } + p.callback(p.user_data, message); + if (g_flush_interval_ms == 0) { + if (p.flush) { p.flush(p.user_data); } + } else { + s_needs_flushing = true; + } + } + } + + if (g_flush_interval_ms > 0 && !s_flush_thread) { + s_flush_thread = new std::thread([]() { + for (;;) { + if (s_needs_flushing) { + flush(); + } + std::this_thread::sleep_for(std::chrono::milliseconds(g_flush_interval_ms)); + } + }); + } + + if (message.verbosity == Verbosity_FATAL) { + flush(); + + if (s_fatal_handler) { + s_fatal_handler(message); + flush(); + } + + if (abort_if_fatal) { +#if !defined(_WIN32) + if (s_signal_options.sigabrt) { + // Make sure we don't catch our own abort: + signal(SIGABRT, SIG_DFL); + } +#endif + abort(); + } + } + } + + // stack_trace_skip is just if verbosity == FATAL. + void log_to_everywhere(int stack_trace_skip, Verbosity verbosity, + const char* file, unsigned line, + const char* prefix, const char* buff) + { + char preamble_buff[LOGURU_PREAMBLE_WIDTH]; + print_preamble(preamble_buff, sizeof(preamble_buff), verbosity, file, line); + auto message = Message{ verbosity, file, line, preamble_buff, "", prefix, buff }; + log_message(stack_trace_skip + 1, message, true, true); + } + +#if LOGURU_USE_FMTLIB + void vlog(Verbosity verbosity, const char* file, unsigned line, const char* format, fmt::format_args args) + { + auto formatted = fmt::vformat(format, args); + log_to_everywhere(1, verbosity, file, line, "", formatted.c_str()); + } + + void raw_vlog(Verbosity verbosity, const char* file, unsigned line, const char* format, fmt::format_args args) + { + auto formatted = fmt::vformat(format, args); + auto message = Message{ verbosity, file, line, "", "", "", formatted.c_str() }; + log_message(1, message, false, true); + } +#else + void log(Verbosity verbosity, const char* file, unsigned line, const char* format, ...) + { + va_list vlist; + va_start(vlist, format); + vlog(verbosity, file, line, format, vlist); + va_end(vlist); + } + + void vlog(Verbosity verbosity, const char* file, unsigned line, const char* format, va_list vlist) + { + auto buff = vtextprintf(format, vlist); + log_to_everywhere(1, verbosity, file, line, "", buff.c_str()); + } + + void raw_log(Verbosity verbosity, const char* file, unsigned line, const char* format, ...) + { + va_list vlist; + va_start(vlist, format); + auto buff = vtextprintf(format, vlist); + auto message = Message{ verbosity, file, line, "", "", "", buff.c_str() }; + log_message(1, message, false, true); + va_end(vlist); + } +#endif + + void flush() + { + std::lock_guard lock(s_mutex); + fflush(stderr); + for (const auto& callback : s_callbacks) { + if (callback.flush) { + callback.flush(callback.user_data); + } + } + s_needs_flushing = false; + } + + LogScopeRAII::LogScopeRAII(Verbosity verbosity, const char* file, unsigned line, const char* format, va_list vlist) : + _verbosity(verbosity), _file(file), _line(line) + { + this->Init(format, vlist); + } + + LogScopeRAII::LogScopeRAII(Verbosity verbosity, const char* file, unsigned line, const char* format, ...) : + _verbosity(verbosity), _file(file), _line(line) + { + va_list vlist; + va_start(vlist, format); + this->Init(format, vlist); + va_end(vlist); + } + + LogScopeRAII::~LogScopeRAII() + { + if (_file) { + std::lock_guard lock(s_mutex); + if (_indent_stderr && s_stderr_indentation > 0) { + --s_stderr_indentation; + } + for (auto& p : s_callbacks) { + // Note: Callback indentation cannot change! + if (_verbosity <= p.verbosity) { + // in unlikely case this callback is new + if (p.indentation > 0) { + --p.indentation; + } + } + } +#if LOGURU_VERBOSE_SCOPE_ENDINGS + auto duration_sec = static_cast(now_ns() - _start_time_ns) / 1e9; +#if LOGURU_USE_FMTLIB + auto buff = textprintf("{:.{}f} s: {:s}", duration_sec, LOGURU_SCOPE_TIME_PRECISION, _name); +#else + auto buff = textprintf("%.*f s: %s", LOGURU_SCOPE_TIME_PRECISION, duration_sec, _name); +#endif + log_to_everywhere(1, _verbosity, _file, _line, "} ", buff.c_str()); +#else + log_to_everywhere(1, _verbosity, _file, _line, "}", ""); +#endif + } + } + + void LogScopeRAII::Init(const char* format, va_list vlist) + { + if (_verbosity <= current_verbosity_cutoff()) { + std::lock_guard lock(s_mutex); + _indent_stderr = (_verbosity <= g_stderr_verbosity); + _start_time_ns = now_ns(); + vsnprintf(_name, sizeof(_name), format, vlist); + log_to_everywhere(1, _verbosity, _file, _line, "{ ", _name); + + if (_indent_stderr) { + ++s_stderr_indentation; + } + + for (auto& p : s_callbacks) { + if (_verbosity <= p.verbosity) { + ++p.indentation; + } + } + } else { + _file = nullptr; + } + } + +#if LOGURU_USE_FMTLIB + void vlog_and_abort(int stack_trace_skip, const char* expr, const char* file, unsigned line, const char* format, fmt::format_args args) + { + auto formatted = fmt::vformat(format, args); + log_to_everywhere(stack_trace_skip + 1, Verbosity_FATAL, file, line, expr, formatted.c_str()); + abort(); // log_to_everywhere already does this, but this makes the analyzer happy. + } +#else + void log_and_abort(int stack_trace_skip, const char* expr, const char* file, unsigned line, const char* format, ...) + { + va_list vlist; + va_start(vlist, format); + auto buff = vtextprintf(format, vlist); + log_to_everywhere(stack_trace_skip + 1, Verbosity_FATAL, file, line, expr, buff.c_str()); + va_end(vlist); + abort(); // log_to_everywhere already does this, but this makes the analyzer happy. + } +#endif + + void log_and_abort(int stack_trace_skip, const char* expr, const char* file, unsigned line) + { + log_and_abort(stack_trace_skip + 1, expr, file, line, " "); + } + + // ---------------------------------------------------------------------------- + // Streams: + +#if LOGURU_USE_FMTLIB + template + std::string vstrprintf(const char* format, const Args&... args) + { + auto text = textprintf(format, args...); + std::string result = text.c_str(); + return result; + } + + template + std::string strprintf(const char* format, const Args&... args) + { + return vstrprintf(format, args...); + } +#else + std::string vstrprintf(const char* format, va_list vlist) + { + auto text = vtextprintf(format, vlist); + std::string result = text.c_str(); + return result; + } + + std::string strprintf(const char* format, ...) + { + va_list vlist; + va_start(vlist, format); + auto result = vstrprintf(format, vlist); + va_end(vlist); + return result; + } +#endif + +#if LOGURU_WITH_STREAMS + + StreamLogger::~StreamLogger() noexcept(false) + { + auto message = _ss.str(); + log(_verbosity, _file, _line, LOGURU_FMT(s), message.c_str()); + } + + AbortLogger::~AbortLogger() noexcept(false) + { + auto message = _ss.str(); + loguru::log_and_abort(1, _expr, _file, _line, LOGURU_FMT(s), message.c_str()); + } + +#endif // LOGURU_WITH_STREAMS + + // ---------------------------------------------------------------------------- + // 888888 88""Yb 88""Yb dP"Yb 88""Yb dP""b8 dP"Yb 88b 88 888888 888888 Yb dP 888888 + // 88__ 88__dP 88__dP dP Yb 88__dP dP `" dP Yb 88Yb88 88 88__ YbdP 88 + // 88"" 88"Yb 88"Yb Yb dP 88"Yb Yb Yb dP 88 Y88 88 88"" dPYb 88 + // 888888 88 Yb 88 Yb YbodP 88 Yb YboodP YbodP 88 Y8 88 888888 dP Yb 88 + // ---------------------------------------------------------------------------- + + struct StringStream { + std::string str; + }; + + // Use this in your EcPrinter implementations. + void stream_print(StringStream & out_string_stream, const char* text) + { + out_string_stream.str += text; + } + + // ---------------------------------------------------------------------------- + + using ECPtr = EcEntryBase*; + +#if defined(_WIN32) || (defined(__APPLE__) && !TARGET_OS_IPHONE) +#ifdef __APPLE__ +#define LOGURU_THREAD_LOCAL __thread +#else +#define LOGURU_THREAD_LOCAL thread_local +#endif + static LOGURU_THREAD_LOCAL ECPtr thread_ec_ptr = nullptr; + + ECPtr& get_thread_ec_head_ref() + { + return thread_ec_ptr; + } +#else // !thread_local + static pthread_once_t s_ec_pthread_once = PTHREAD_ONCE_INIT; + static pthread_key_t s_ec_pthread_key; + + void free_ec_head_ref(void* io_error_context) + { + delete reinterpret_cast(io_error_context); + } + + void ec_make_pthread_key() + { + (void)pthread_key_create(&s_ec_pthread_key, free_ec_head_ref); + } + + ECPtr& get_thread_ec_head_ref() + { + (void)pthread_once(&s_ec_pthread_once, ec_make_pthread_key); + auto ec = reinterpret_cast(pthread_getspecific(s_ec_pthread_key)); + if (ec == nullptr) { + ec = new ECPtr(nullptr); + (void)pthread_setspecific(s_ec_pthread_key, ec); + } + return *ec; + } +#endif // !thread_local + + // ---------------------------------------------------------------------------- + + EcHandle get_thread_ec_handle() + { + return get_thread_ec_head_ref(); + } + + Text get_error_context() + { + return get_error_context_for(get_thread_ec_head_ref()); + } + + Text get_error_context_for(const EcEntryBase * ec_head) + { + std::vector stack; + while (ec_head) { + stack.push_back(ec_head); + ec_head = ec_head->_previous; + } + std::reverse(stack.begin(), stack.end()); + + StringStream result; + if (!stack.empty()) { + result.str += "------------------------------------------------\n"; + for (auto entry : stack) { + const auto description = std::string(entry->_descr) + ":"; +#if LOGURU_USE_FMTLIB + auto prefix = textprintf("[ErrorContext] {.{}s}:{:-5u} {:-20s} ", + filename(entry->_file), LOGURU_FILENAME_WIDTH, entry->_line, description.c_str()); +#else + auto prefix = textprintf("[ErrorContext] %*s:%-5u %-20s ", + LOGURU_FILENAME_WIDTH, filename(entry->_file), entry->_line, description.c_str()); +#endif + result.str += prefix.c_str(); + entry->print_value(result); + result.str += "\n"; + } + result.str += "------------------------------------------------"; + } + return Text(STRDUP(result.str.c_str())); + } + + EcEntryBase::EcEntryBase(const char* file, unsigned line, const char* descr) + : _file(file), _line(line), _descr(descr) + { + EcEntryBase*& ec_head = get_thread_ec_head_ref(); + _previous = ec_head; + ec_head = this; + } + + EcEntryBase::~EcEntryBase() + { + get_thread_ec_head_ref() = _previous; + } + + // ------------------------------------------------------------------------ + + Text ec_to_text(const char* value) + { + // Add quotes around the string to make it obvious where it begin and ends. + // This is great for detecting erroneous leading or trailing spaces in e.g. an identifier. + auto str = "\"" + std::string(value) + "\""; + return Text{ STRDUP(str.c_str()) }; + } + + Text ec_to_text(char c) + { + // Add quotes around the character to make it obvious where it begin and ends. + std::string str = "'"; + + auto write_hex_digit = [&](unsigned num) + { + if (num < 10u) { str += char('0' + num); } else { str += char('a' + num - 10); } + }; + + auto write_hex_16 = [&](uint16_t n) + { + write_hex_digit((n >> 12u) & 0x0f); + write_hex_digit((n >> 8u) & 0x0f); + write_hex_digit((n >> 4u) & 0x0f); + write_hex_digit((n >> 0u) & 0x0f); + }; + + if (c == '\\') { str += "\\\\"; } else if (c == '\"') { str += "\\\""; } else if (c == '\'') { str += "\\\'"; } else if (c == '\0') { str += "\\0"; } else if (c == '\b') { str += "\\b"; } else if (c == '\f') { str += "\\f"; } else if (c == '\n') { str += "\\n"; } else if (c == '\r') { str += "\\r"; } else if (c == '\t') { str += "\\t"; } else if (0 <= c && c < 0x20) { + str += "\\u"; + write_hex_16(static_cast(c)); + } else { str += c; } + + str += "'"; + + return Text{ STRDUP(str.c_str()) }; + } + +#define DEFINE_EC(Type) \ + Text ec_to_text(Type value) \ + { \ + auto str = std::to_string(value); \ + return Text{STRDUP(str.c_str())}; \ + } + + DEFINE_EC(int) + DEFINE_EC(unsigned int) + DEFINE_EC(long) + DEFINE_EC(unsigned long) + DEFINE_EC(long long) + DEFINE_EC(unsigned long long) + DEFINE_EC(float) + DEFINE_EC(double) + DEFINE_EC(long double) + +#undef DEFINE_EC + + Text ec_to_text(EcHandle ec_handle) + { + Text parent_ec = get_error_context_for(ec_handle); + size_t buffer_size = strlen(parent_ec.c_str()) + 2; + char* with_newline = reinterpret_cast(malloc(buffer_size)); + with_newline[0] = '\n'; +#ifdef _WIN32 + strncpy_s(with_newline + 1, buffer_size, parent_ec.c_str(), buffer_size - 2); +#else + strcpy(with_newline + 1, parent_ec.c_str()); +#endif + return Text(with_newline); + } + + // ---------------------------------------------------------------------------- + +} // namespace loguru + +// ---------------------------------------------------------------------------- +// .dP"Y8 88 dP""b8 88b 88 db 88 .dP"Y8 +// `Ybo." 88 dP `" 88Yb88 dPYb 88 `Ybo." +// o.`Y8b 88 Yb "88 88 Y88 dP__Yb 88 .o o.`Y8b +// 8bodP' 88 YboodP 88 Y8 dP""""Yb 88ood8 8bodP' +// ---------------------------------------------------------------------------- + +#ifdef _WIN32 +namespace loguru { + void install_signal_handlers(const SignalOptions& signal_options) + { + (void)signal_options; + // TODO: implement signal handlers on windows + } +} // namespace loguru + +#else // _WIN32 + +namespace loguru { + void write_to_stderr(const char* data, size_t size) + { + auto result = write(STDERR_FILENO, data, size); + (void)result; // Ignore errors. + } + + void write_to_stderr(const char* data) + { + write_to_stderr(data, strlen(data)); + } + + void call_default_signal_handler(int signal_number) + { + struct sigaction sig_action; + memset(&sig_action, 0, sizeof(sig_action)); + sigemptyset(&sig_action.sa_mask); + sig_action.sa_handler = SIG_DFL; + sigaction(signal_number, &sig_action, NULL); + kill(getpid(), signal_number); + } + + void signal_handler(int signal_number, siginfo_t*, void*) + { + const char* signal_name = "UNKNOWN SIGNAL"; + + if (signal_number == SIGABRT) { signal_name = "SIGABRT"; } + if (signal_number == SIGBUS) { signal_name = "SIGBUS"; } + if (signal_number == SIGFPE) { signal_name = "SIGFPE"; } + if (signal_number == SIGILL) { signal_name = "SIGILL"; } + if (signal_number == SIGINT) { signal_name = "SIGINT"; } + if (signal_number == SIGSEGV) { signal_name = "SIGSEGV"; } + if (signal_number == SIGTERM) { signal_name = "SIGTERM"; } + + // -------------------------------------------------------------------- + /* There are few things that are safe to do in a signal handler, + but writing to stderr is one of them. + So we first print out what happened to stderr so we're sure that gets out, + then we do the unsafe things, like logging the stack trace. + */ + + if (g_colorlogtostderr && s_terminal_has_color) { + write_to_stderr(terminal_reset()); + write_to_stderr(terminal_bold()); + write_to_stderr(terminal_light_red()); + } + write_to_stderr("\n"); + write_to_stderr("Loguru caught a signal: "); + write_to_stderr(signal_name); + write_to_stderr("\n"); + if (g_colorlogtostderr && s_terminal_has_color) { + write_to_stderr(terminal_reset()); + } + + // -------------------------------------------------------------------- + + if (s_signal_options.unsafe_signal_handler) { + // -------------------------------------------------------------------- + /* Now we do unsafe things. This can for example lead to deadlocks if + the signal was triggered from the system's memory management functions + and the code below tries to do allocations. + */ + + flush(); + char preamble_buff[LOGURU_PREAMBLE_WIDTH]; + print_preamble(preamble_buff, sizeof(preamble_buff), Verbosity_FATAL, "", 0); + auto message = Message{ Verbosity_FATAL, "", 0, preamble_buff, "", "Signal: ", signal_name }; + try { + log_message(1, message, false, false); + } + catch (...) { + // This can happed due to s_fatal_handler. + write_to_stderr("Exception caught and ignored by Loguru signal handler.\n"); + } + flush(); + + // -------------------------------------------------------------------- + } + + call_default_signal_handler(signal_number); + } + + void install_signal_handlers(const SignalOptions& signal_options) + { + s_signal_options = signal_options; + + struct sigaction sig_action; + memset(&sig_action, 0, sizeof(sig_action)); + sigemptyset(&sig_action.sa_mask); + sig_action.sa_flags |= SA_SIGINFO; + sig_action.sa_sigaction = &signal_handler; + + if (signal_options.sigabrt) { + CHECK_F(sigaction(SIGABRT, &sig_action, NULL) != -1, "Failed to install handler for SIGABRT"); + } + if (signal_options.sigbus) { + CHECK_F(sigaction(SIGBUS, &sig_action, NULL) != -1, "Failed to install handler for SIGBUS"); + } + if (signal_options.sigfpe) { + CHECK_F(sigaction(SIGFPE, &sig_action, NULL) != -1, "Failed to install handler for SIGFPE"); + } + if (signal_options.sigill) { + CHECK_F(sigaction(SIGILL, &sig_action, NULL) != -1, "Failed to install handler for SIGILL"); + } + if (signal_options.sigint) { + CHECK_F(sigaction(SIGINT, &sig_action, NULL) != -1, "Failed to install handler for SIGINT"); + } + if (signal_options.sigsegv) { + CHECK_F(sigaction(SIGSEGV, &sig_action, NULL) != -1, "Failed to install handler for SIGSEGV"); + } + if (signal_options.sigterm) { + CHECK_F(sigaction(SIGTERM, &sig_action, NULL) != -1, "Failed to install handler for SIGTERM"); + } + } +} // namespace loguru + +#endif // _WIN32 + + +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic pop +#elif defined(_MSC_VER) +#pragma warning(pop) +#endif + +LOGURU_ANONYMOUS_NAMESPACE_END + +#endif // LOGURU_IMPLEMENTATION diff --git a/lib/log/loguru.hpp b/lib/log/loguru.hpp new file mode 100644 index 0000000..8917b79 --- /dev/null +++ b/lib/log/loguru.hpp @@ -0,0 +1,1475 @@ +/* +Loguru logging library for C++, by Emil Ernerfeldt. +www.github.com/emilk/loguru +If you find Loguru useful, please let me know on twitter or in a mail! +Twitter: @ernerfeldt +Mail: emil.ernerfeldt@gmail.com +Website: www.ilikebigbits.com + +# License + This software is in the public domain. Where that dedication is not + recognized, you are granted a perpetual, irrevocable license to + copy, modify and distribute it as you see fit. + +# Inspiration + Much of Loguru was inspired by GLOG, https://code.google.com/p/google-glog/. + The choice of public domain is fully due Sean T. Barrett + and his wonderful stb libraries at https://github.com/nothings/stb. + +# Version history + * Version 0.1.0 - 2015-03-22 - Works great on Mac. + * Version 0.2.0 - 2015-09-17 - Removed the only dependency. + * Version 0.3.0 - 2015-10-02 - Drop-in replacement for most of GLOG + * Version 0.4.0 - 2015-10-07 - Single-file! + * Version 0.5.0 - 2015-10-17 - Improved file logging + * Version 0.6.0 - 2015-10-24 - Add stack traces + * Version 0.7.0 - 2015-10-27 - Signals + * Version 0.8.0 - 2015-10-30 - Color logging. + * Version 0.9.0 - 2015-11-26 - ABORT_S and proper handling of FATAL + * Version 1.0.0 - 2016-02-14 - ERROR_CONTEXT + * Version 1.1.0 - 2016-02-19 - -v OFF, -v INFO etc + * Version 1.1.1 - 2016-02-20 - textprintf vs strprintf + * Version 1.1.2 - 2016-02-22 - Remove g_alsologtostderr + * Version 1.1.3 - 2016-02-29 - ERROR_CONTEXT as linked list + * Version 1.2.0 - 2016-03-19 - Add get_thread_name() + * Version 1.2.1 - 2016-03-20 - Minor fixes + * Version 1.2.2 - 2016-03-29 - Fix issues with set_fatal_handler throwing an exception + * Version 1.2.3 - 2016-05-16 - Log current working directory in loguru::init(). + * Version 1.2.4 - 2016-05-18 - Custom replacement for -v in loguru::init() by bjoernpollex + * Version 1.2.5 - 2016-05-18 - Add ability to print ERROR_CONTEXT of parent thread. + * Version 1.2.6 - 2016-05-19 - Bug fix regarding VLOG verbosity argument lacking (). + * Version 1.2.7 - 2016-05-23 - Fix PATH_MAX problem. + * Version 1.2.8 - 2016-05-26 - Add shutdown() and remove_all_callbacks() + * Version 1.2.9 - 2016-06-09 - Use a monotonic clock for uptime. + * Version 1.3.0 - 2016-07-20 - Fix issues with callback flush/close not being called. + * Version 1.3.1 - 2016-07-20 - Add LOGURU_UNSAFE_SIGNAL_HANDLER to toggle stacktrace on signals. + * Version 1.3.2 - 2016-07-20 - Add loguru::arguments() + * Version 1.4.0 - 2016-09-15 - Semantic versioning + add loguru::create_directories + * Version 1.4.1 - 2016-09-29 - Customize formating with LOGURU_FILENAME_WIDTH + * Version 1.5.0 - 2016-12-22 - LOGURU_USE_FMTLIB by kolis and LOGURU_WITH_FILEABS by scinart + * Version 1.5.1 - 2017-08-08 - Terminal colors on Windows 10 thanks to looki + * Version 1.6.0 - 2018-01-03 - Add LOGURU_RTTI and LOGURU_STACKTRACES settings + * Version 1.7.0 - 2018-01-03 - Add ability to turn off the preamble with loguru::g_preamble + * Version 1.7.1 - 2018-04-05 - Add function get_fatal_handler + * Version 1.7.2 - 2018-04-22 - Fix a bug where large file names could cause stack corruption (thanks @ccamporesi) + * Version 1.8.0 - 2018-04-23 - Shorten long file names to keep preamble fixed width + * Version 1.9.0 - 2018-09-22 - Adjust terminal colors, add LOGURU_VERBOSE_SCOPE_ENDINGS, add LOGURU_SCOPE_TIME_PRECISION, add named log levels + * Version 2.0.0 - 2018-09-22 - Split loguru.hpp into loguru.hpp and loguru.cpp + * Version 2.1.0 - 2019-09-23 - Update fmtlib + add option to loguru::init to NOT set main thread name. + * Version 2.2.0 - 2020-07-31 - Replace LOGURU_CATCH_SIGABRT with struct SignalOptions + +# Compiling + Just include where you want to use Loguru. + Then, in one .cpp file #include + Make sure you compile with -std=c++11 -lstdc++ -lpthread -ldl + +# Usage + For details, please see the official documentation at emilk.github.io/loguru + + #include + + int main(int argc, char* argv[]) { + loguru::init(argc, argv); + + // Put every log message in "everything.log": + loguru::add_file("everything.log", loguru::Append, loguru::Verbosity_MAX); + + LOG_F(INFO, "The magic number is %d", 42); + } + +*/ + +#if defined(LOGURU_IMPLEMENTATION) +#error "You are defining LOGURU_IMPLEMENTATION. This is for older versions of Loguru. You should now instead include loguru.cpp (or build it and link with it)" +#endif + +// Disable all warnings from gcc/clang: +#if defined(__clang__) +#pragma clang system_header +#elif defined(__GNUC__) +#pragma GCC system_header +#endif + +#ifndef LOGURU_HAS_DECLARED_FORMAT_HEADER +#define LOGURU_HAS_DECLARED_FORMAT_HEADER + +// Semantic versioning. Loguru version can be printed with printf("%d.%d.%d", LOGURU_VERSION_MAJOR, LOGURU_VERSION_MINOR, LOGURU_VERSION_PATCH); +#define LOGURU_VERSION_MAJOR 2 +#define LOGURU_VERSION_MINOR 1 +#define LOGURU_VERSION_PATCH 0 + +#if defined(_MSC_VER) +#include // Needed for _In_z_ etc annotations +#endif + +#if defined(__linux__) || defined(__APPLE__) +#define LOGURU_SYSLOG 1 +#else +#define LOGURU_SYSLOG 0 +#endif + +// ---------------------------------------------------------------------------- + +#ifndef LOGURU_EXPORT + // Define to your project's export declaration if needed for use in a shared library. +#define LOGURU_EXPORT +#endif + +#ifndef LOGURU_SCOPE_TEXT_SIZE + // Maximum length of text that can be printed by a LOG_SCOPE. + // This should be long enough to get most things, but short enough not to clutter the stack. +#define LOGURU_SCOPE_TEXT_SIZE 196 +#endif + +#ifndef LOGURU_FILENAME_WIDTH + // Width of the column containing the file name +#define LOGURU_FILENAME_WIDTH 23 +#endif + +#ifndef LOGURU_THREADNAME_WIDTH + // Width of the column containing the thread name +#define LOGURU_THREADNAME_WIDTH 16 +#endif + +#ifndef LOGURU_SCOPE_TIME_PRECISION + // Resolution of scope timers. 3=ms, 6=us, 9=ns +#define LOGURU_SCOPE_TIME_PRECISION 3 +#endif + +#ifdef LOGURU_CATCH_SIGABRT +#error "You are defining LOGURU_CATCH_SIGABRT. This is for older versions of Loguru. You should now instead set the options passed to loguru::init" +#endif + +#ifndef LOGURU_VERBOSE_SCOPE_ENDINGS + // Show milliseconds and scope name at end of scope. +#define LOGURU_VERBOSE_SCOPE_ENDINGS 1 +#endif + +#ifndef LOGURU_REDEFINE_ASSERT +#define LOGURU_REDEFINE_ASSERT 0 +#endif + +#ifndef LOGURU_WITH_STREAMS +#define LOGURU_WITH_STREAMS 0 +#endif + +#ifndef LOGURU_REPLACE_GLOG +#define LOGURU_REPLACE_GLOG 0 +#endif + +#if LOGURU_REPLACE_GLOG +#undef LOGURU_WITH_STREAMS +#define LOGURU_WITH_STREAMS 1 +#endif + +#if defined(LOGURU_UNSAFE_SIGNAL_HANDLER) +#error "You are defining LOGURU_UNSAFE_SIGNAL_HANDLER. This is for older versions of Loguru. You should now instead set the unsafe_signal_handler option when you call loguru::init." +#endif + +#if LOGURU_IMPLEMENTATION +#undef LOGURU_WITH_STREAMS +#define LOGURU_WITH_STREAMS 1 +#endif + +#ifndef LOGURU_USE_FMTLIB +#define LOGURU_USE_FMTLIB 0 +#endif + +#ifndef LOGURU_USE_LOCALE +#define LOGURU_USE_LOCALE 0 +#endif + +#ifndef LOGURU_WITH_FILEABS +#define LOGURU_WITH_FILEABS 0 +#endif + +#ifndef LOGURU_RTTI +#if defined(__clang__) +#if __has_feature(cxx_rtti) +#define LOGURU_RTTI 1 +#endif +#elif defined(__GNUG__) +#if defined(__GXX_RTTI) +#define LOGURU_RTTI 1 +#endif +#elif defined(_MSC_VER) +#if defined(_CPPRTTI) +#define LOGURU_RTTI 1 +#endif +#endif +#endif + +#ifdef LOGURU_USE_ANONYMOUS_NAMESPACE +#define LOGURU_ANONYMOUS_NAMESPACE_BEGIN namespace { +#define LOGURU_ANONYMOUS_NAMESPACE_END } +#else +#define LOGURU_ANONYMOUS_NAMESPACE_BEGIN +#define LOGURU_ANONYMOUS_NAMESPACE_END +#endif + +// -------------------------------------------------------------------- +// Utility macros + +#define LOGURU_CONCATENATE_IMPL(s1, s2) s1 ## s2 +#define LOGURU_CONCATENATE(s1, s2) LOGURU_CONCATENATE_IMPL(s1, s2) + +#ifdef __COUNTER__ +# define LOGURU_ANONYMOUS_VARIABLE(str) LOGURU_CONCATENATE(str, __COUNTER__) +#else +# define LOGURU_ANONYMOUS_VARIABLE(str) LOGURU_CONCATENATE(str, __LINE__) +#endif + +#if defined(__clang__) || defined(__GNUC__) + // Helper macro for declaring functions as having similar signature to printf. + // This allows the compiler to catch format errors at compile-time. +#define LOGURU_PRINTF_LIKE(fmtarg, firstvararg) __attribute__((__format__ (__printf__, fmtarg, firstvararg))) +#define LOGURU_FORMAT_STRING_TYPE const char* +#elif defined(_MSC_VER) +#define LOGURU_PRINTF_LIKE(fmtarg, firstvararg) +#define LOGURU_FORMAT_STRING_TYPE _In_z_ _Printf_format_string_ const char* +#else +#define LOGURU_PRINTF_LIKE(fmtarg, firstvararg) +#define LOGURU_FORMAT_STRING_TYPE const char* +#endif + +// Used to mark log_and_abort for the benefit of the static analyzer and optimizer. +#if defined(_MSC_VER) +#define LOGURU_NORETURN __declspec(noreturn) +#else +#define LOGURU_NORETURN __attribute__((noreturn)) +#endif + +#if defined(_MSC_VER) +#define LOGURU_PREDICT_FALSE(x) (x) +#define LOGURU_PREDICT_TRUE(x) (x) +#else +#define LOGURU_PREDICT_FALSE(x) (__builtin_expect(x, 0)) +#define LOGURU_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1)) +#endif + +#if LOGURU_USE_FMTLIB +#include +#define LOGURU_FMT(x) "{:" #x "}" +#else +#define LOGURU_FMT(x) "%" #x +#endif + +#ifdef _WIN32 +#define STRDUP(str) _strdup(str) +#else +#define STRDUP(str) strdup(str) +#endif + +#include + +// -------------------------------------------------------------------- +LOGURU_ANONYMOUS_NAMESPACE_BEGIN + +namespace loguru { + // Simple RAII ownership of a char*. + class LOGURU_EXPORT Text { + public: + explicit Text(char* owned_str) : _str(owned_str) {} + ~Text(); + Text(Text&& t) + { + _str = t._str; + t._str = nullptr; + } + Text(Text& t) = delete; + Text& operator=(Text& t) = delete; + void operator=(Text&& t) = delete; + + const char* c_str() const { return _str; } + bool empty() const { return _str == nullptr || *_str == '\0'; } + + char* release() + { + auto result = _str; + _str = nullptr; + return result; + } + + private: + char* _str; + }; + + // Like printf, but returns the formated text. +#if LOGURU_USE_FMTLIB + LOGURU_EXPORT + Text vtextprintf(const char* format, fmt::format_args args); + + template + LOGURU_EXPORT + Text textprintf(LOGURU_FORMAT_STRING_TYPE format, const Args&... args) + { + return vtextprintf(format, fmt::make_format_args(args...)); + } +#else + LOGURU_EXPORT + Text textprintf(LOGURU_FORMAT_STRING_TYPE format, ...) LOGURU_PRINTF_LIKE(1, 2); +#endif + + // Overloaded for variadic template matching. + LOGURU_EXPORT + Text textprintf(); + + using Verbosity = int; + +#undef FATAL +#undef ERROR +#undef WARNING +#undef INFO +#undef MAX + + enum NamedVerbosity : Verbosity { + // Used to mark an invalid verbosity. Do not log to this level. + Verbosity_INVALID = -10, // Never do LOG_F(INVALID) + + // You may use Verbosity_OFF on g_stderr_verbosity, but for nothing else! + Verbosity_OFF = -9, // Never do LOG_F(OFF) + + // Prefer to use ABORT_F or ABORT_S over LOG_F(FATAL) or LOG_S(FATAL). + Verbosity_FATAL = -3, + Verbosity_ERROR = -2, + Verbosity_WARNING = -1, + + // Normal messages. By default written to stderr. + Verbosity_INFO = 0, + + // Same as Verbosity_INFO in every way. + Verbosity_0 = 0, + + // Verbosity levels 1-9 are generally not written to stderr, but are written to file. + Verbosity_1 = +1, + Verbosity_2 = +2, + Verbosity_3 = +3, + Verbosity_4 = +4, + Verbosity_5 = +5, + Verbosity_6 = +6, + Verbosity_7 = +7, + Verbosity_8 = +8, + Verbosity_9 = +9, + + // Do not use higher verbosity levels, as that will make grepping log files harder. + Verbosity_MAX = +9, + }; + + struct Message { + // You would generally print a Message by just concatenating the buffers without spacing. + // Optionally, ignore preamble and indentation. + Verbosity verbosity; // Already part of preamble + const char* filename; // Already part of preamble + unsigned line; // Already part of preamble + const char* preamble; // Date, time, uptime, thread, file:line, verbosity. + const char* indentation; // Just a bunch of spacing. + const char* prefix; // Assertion failure info goes here (or ""). + const char* message; // User message goes here. + }; + + /* Everything with a verbosity equal or greater than g_stderr_verbosity will be + written to stderr. You can set this in code or via the -v argument. + Set to loguru::Verbosity_OFF to write nothing to stderr. + Default is 0, i.e. only log ERROR, WARNING and INFO are written to stderr. + */ + LOGURU_EXPORT extern Verbosity g_stderr_verbosity; + LOGURU_EXPORT extern bool g_colorlogtostderr; // True by default. + LOGURU_EXPORT extern unsigned g_flush_interval_ms; // 0 (unbuffered) by default. + LOGURU_EXPORT extern bool g_preamble_header; // Prepend each log start by a descriptions line with all columns name? True by default. + LOGURU_EXPORT extern bool g_preamble; // Prefix each log line with date, time etc? True by default. + + /* Specify the verbosity used by loguru to log its info messages including the header + logged when logged::init() is called or on exit. Default is 0 (INFO). + */ + LOGURU_EXPORT extern Verbosity g_internal_verbosity; + + // Turn off individual parts of the preamble + LOGURU_EXPORT extern bool g_preamble_date; // The date field + LOGURU_EXPORT extern bool g_preamble_time; // The time of the current day + LOGURU_EXPORT extern bool g_preamble_uptime; // The time since init call + LOGURU_EXPORT extern bool g_preamble_thread; // The logging thread + LOGURU_EXPORT extern bool g_preamble_file; // The file from which the log originates from + LOGURU_EXPORT extern bool g_preamble_verbose; // The verbosity field + LOGURU_EXPORT extern bool g_preamble_pipe; // The pipe symbol right before the message + + // May not throw! + typedef void (*log_handler_t)(void* user_data, const Message& message); + typedef void (*close_handler_t)(void* user_data); + typedef void (*flush_handler_t)(void* user_data); + + // May throw if that's how you'd like to handle your errors. + typedef void (*fatal_handler_t)(const Message& message); + + // Given a verbosity level, return the level's name or nullptr. + typedef const char* (*verbosity_to_name_t)(Verbosity verbosity); + + // Given a verbosity level name, return the verbosity level or + // Verbosity_INVALID if name is not recognized. + typedef Verbosity(*name_to_verbosity_t)(const char* name); + + struct SignalOptions { + /// Make Loguru try to do unsafe but useful things, + /// like printing a stack trace, when catching signals. + /// This may lead to bad things like deadlocks in certain situations. + bool unsafe_signal_handler = true; + + /// Should Loguru catch SIGABRT ? + bool sigabrt = true; + + /// Should Loguru catch SIGBUS ? + bool sigbus = true; + + /// Should Loguru catch SIGFPE ? + bool sigfpe = true; + + /// Should Loguru catch SIGILL ? + bool sigill = true; + + /// Should Loguru catch SIGINT ? + bool sigint = true; + + /// Should Loguru catch SIGSEGV ? + bool sigsegv = true; + + /// Should Loguru catch SIGTERM ? + bool sigterm = true; + + static SignalOptions none() + { + SignalOptions options; + options.unsafe_signal_handler = false; + options.sigabrt = false; + options.sigbus = false; + options.sigfpe = false; + options.sigill = false; + options.sigint = false; + options.sigsegv = false; + options.sigterm = false; + return options; + } + }; + + // Runtime options passed to loguru::init + struct Options { + // This allows you to use something else instead of "-v" via verbosity_flag. + // Set to nullptr if you don't want Loguru to parse verbosity from the args. + const char* verbosity_flag = "-v"; + + // loguru::init will set the name of the calling thread to this. + // If you don't want Loguru to set the name of the main thread, + // set this to nullptr. + // NOTE: on SOME platforms loguru::init will only overwrite the thread name + // if a thread name has not already been set. + // To always set a thread name, use loguru::set_thread_name instead. + const char* main_thread_name = "main thread"; + + SignalOptions signal_options; + }; + + /* Should be called from the main thread. + You don't *need* to call this, but if you do you get: + * Signal handlers installed + * Program arguments logged + * Working dir logged + * Optional -v verbosity flag parsed + * Main thread name set to "main thread" + * Explanation of the preamble (date, thread name, etc) logged + + loguru::init() will look for arguments meant for loguru and remove them. + Arguments meant for loguru are: + -v n Set loguru::g_stderr_verbosity level. Examples: + -v 3 Show verbosity level 3 and lower. + -v 0 Only show INFO, WARNING, ERROR, FATAL (default). + -v INFO Only show INFO, WARNING, ERROR, FATAL (default). + -v WARNING Only show WARNING, ERROR, FATAL. + -v ERROR Only show ERROR, FATAL. + -v FATAL Only show FATAL. + -v OFF Turn off logging to stderr. + + Tip: You can set g_stderr_verbosity before calling loguru::init. + That way you can set the default but have the user override it with the -v flag. + Note that -v does not affect file logging (see loguru::add_file). + + You can you something other than the -v flag by setting the verbosity_flag option. + */ + LOGURU_EXPORT + void init(int& argc, char* argv[], const Options& options = {}); + + // Will call remove_all_callbacks(). After calling this, logging will still go to stderr. + // You generally don't need to call this. + LOGURU_EXPORT + void shutdown(); + + // What ~ will be replaced with, e.g. "/home/your_user_name/" + LOGURU_EXPORT + const char* home_dir(); + + /* Returns the name of the app as given in argv[0] but without leading path. + That is, if argv[0] is "../foo/app" this will return "app". + */ + LOGURU_EXPORT + const char* argv0_filename(); + + // Returns all arguments given to loguru::init(), but escaped with a single space as separator. + LOGURU_EXPORT + const char* arguments(); + + // Returns the path to the current working dir when loguru::init() was called. + LOGURU_EXPORT + const char* current_dir(); + + // Returns the part of the path after the last / or \ (if any). + LOGURU_EXPORT + const char* filename(const char* path); + + // e.g. "foo/bar/baz.ext" will create the directories "foo/" and "foo/bar/" + LOGURU_EXPORT + bool create_directories(const char* file_path_const); + + // Writes date and time with millisecond precision, e.g. "20151017_161503.123" + LOGURU_EXPORT + void write_date_time(char* buff, unsigned long long buff_size); + + // Helper: thread-safe version strerror + LOGURU_EXPORT + Text errno_as_text(); + + /* Given a prefix of e.g. "~/loguru/" this might return + "/home/your_username/loguru/app_name/20151017_161503.123.log" + + where "app_name" is a sanitized version of argv[0]. + */ + LOGURU_EXPORT + void suggest_log_path(const char* prefix, char* buff, unsigned long long buff_size); + + enum FileMode { Truncate, Append }; + + /* Will log to a file at the given path. + Any logging message with a verbosity lower or equal to + the given verbosity will be included. + The function will create all directories in 'path' if needed. + If path starts with a ~, it will be replaced with loguru::home_dir() + To stop the file logging, just call loguru::remove_callback(path) with the same path. + */ + LOGURU_EXPORT + bool add_file(const char* path, FileMode mode, Verbosity verbosity); + + LOGURU_EXPORT + // Send logs to syslog with LOG_USER facility (see next call) + bool add_syslog(const char* app_name, Verbosity verbosity); + LOGURU_EXPORT + // Send logs to syslog with your own choice of facility (LOG_USER, LOG_AUTH, ...) + // see loguru.cpp: syslog_log() for more details. + bool add_syslog(const char* app_name, Verbosity verbosity, int facility); + + /* Will be called right before abort(). + You can for instance use this to print custom error messages, or throw an exception. + Feel free to call LOG:ing function from this, but not FATAL ones! */ + LOGURU_EXPORT + void set_fatal_handler(fatal_handler_t handler); + + // Get the current fatal handler, if any. Default value is nullptr. + LOGURU_EXPORT + fatal_handler_t get_fatal_handler(); + + /* Will be called on each log messages with a verbosity less or equal to the given one. + Useful for displaying messages on-screen in a game, for example. + The given on_close is also expected to flush (if desired). + */ + LOGURU_EXPORT + void add_callback( + const char* id, + log_handler_t callback, + void* user_data, + Verbosity verbosity, + close_handler_t on_close = nullptr, + flush_handler_t on_flush = nullptr); + + /* Set a callback that returns custom verbosity level names. If callback + is nullptr or returns nullptr, default log names will be used. + */ + LOGURU_EXPORT + void set_verbosity_to_name_callback(verbosity_to_name_t callback); + + /* Set a callback that returns the verbosity level matching a name. The + callback should return Verbosity_INVALID if the name is not + recognized. + */ + LOGURU_EXPORT + void set_name_to_verbosity_callback(name_to_verbosity_t callback); + + /* Get a custom name for a specific verbosity, if one exists, or nullptr. */ + LOGURU_EXPORT + const char* get_verbosity_name(Verbosity verbosity); + + /* Get the verbosity enum value from a custom 4-character level name, if one exists. + If the name does not match a custom level name, Verbosity_INVALID is returned. + */ + LOGURU_EXPORT + Verbosity get_verbosity_from_name(const char* name); + + // Returns true iff the callback was found (and removed). + LOGURU_EXPORT + bool remove_callback(const char* id); + + // Shut down all file logging and any other callback hooks installed. + LOGURU_EXPORT + void remove_all_callbacks(); + + // Returns the maximum of g_stderr_verbosity and all file/custom outputs. + LOGURU_EXPORT + Verbosity current_verbosity_cutoff(); + +#if LOGURU_USE_FMTLIB + // Internal functions + LOGURU_EXPORT + void vlog(Verbosity verbosity, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, fmt::format_args args); + LOGURU_EXPORT + void raw_vlog(Verbosity verbosity, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, fmt::format_args args); + + // Actual logging function. Use the LOG macro instead of calling this directly. + template + LOGURU_EXPORT + void log(Verbosity verbosity, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, const Args &... args) + { + vlog(verbosity, file, line, format, fmt::make_format_args(args...)); + } + + // Log without any preamble or indentation. + template + LOGURU_EXPORT + void raw_log(Verbosity verbosity, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, const Args &... args) + { + raw_vlog(verbosity, file, line, format, fmt::make_format_args(args...)); + } +#else // LOGURU_USE_FMTLIB? + // Actual logging function. Use the LOG macro instead of calling this directly. + LOGURU_EXPORT + void log(Verbosity verbosity, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, ...) LOGURU_PRINTF_LIKE(4, 5); + + // Actual logging function. + LOGURU_EXPORT + void vlog(Verbosity verbosity, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, va_list) LOGURU_PRINTF_LIKE(4, 0); + + // Log without any preamble or indentation. + LOGURU_EXPORT + void raw_log(Verbosity verbosity, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, ...) LOGURU_PRINTF_LIKE(4, 5); +#endif // !LOGURU_USE_FMTLIB + + // Helper class for LOG_SCOPE_F + class LOGURU_EXPORT LogScopeRAII { + public: + LogScopeRAII() : _file(nullptr) {} // No logging + LogScopeRAII(Verbosity verbosity, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, va_list vlist) LOGURU_PRINTF_LIKE(5, 0); + LogScopeRAII(Verbosity verbosity, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, ...) LOGURU_PRINTF_LIKE(5, 6); + ~LogScopeRAII(); + + void Init(LOGURU_FORMAT_STRING_TYPE format, va_list vlist) LOGURU_PRINTF_LIKE(2, 0); + +#if defined(_MSC_VER) && _MSC_VER > 1800 + // older MSVC default move ctors close the scope on move. See + // issue #43 + LogScopeRAII(LogScopeRAII&& other) + : _verbosity(other._verbosity) + , _file(other._file) + , _line(other._line) + , _indent_stderr(other._indent_stderr) + , _start_time_ns(other._start_time_ns) + { + // Make sure the tmp object's destruction doesn't close the scope: + other._file = nullptr; + + for (unsigned int i = 0; i < LOGURU_SCOPE_TEXT_SIZE; ++i) { + _name[i] = other._name[i]; + } + } +#else + LogScopeRAII(LogScopeRAII&&) = default; +#endif + + private: + LogScopeRAII(const LogScopeRAII&) = delete; + LogScopeRAII& operator=(const LogScopeRAII&) = delete; + void operator=(LogScopeRAII&&) = delete; + + Verbosity _verbosity; + const char* _file; // Set to null if we are disabled due to verbosity + unsigned _line; + bool _indent_stderr; // Did we? + long long _start_time_ns; + char _name[LOGURU_SCOPE_TEXT_SIZE]; + }; + + // Marked as 'noreturn' for the benefit of the static analyzer and optimizer. + // stack_trace_skip is the number of extrace stack frames to skip above log_and_abort. +#if LOGURU_USE_FMTLIB + LOGURU_EXPORT + LOGURU_NORETURN void vlog_and_abort(int stack_trace_skip, const char* expr, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, fmt::format_args); + template + LOGURU_EXPORT + LOGURU_NORETURN void log_and_abort(int stack_trace_skip, const char* expr, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, const Args&... args) + { + vlog_and_abort(stack_trace_skip, expr, file, line, format, fmt::make_format_args(args...)); + } +#else + LOGURU_EXPORT + LOGURU_NORETURN void log_and_abort(int stack_trace_skip, const char* expr, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, ...) LOGURU_PRINTF_LIKE(5, 6); +#endif + LOGURU_EXPORT + LOGURU_NORETURN void log_and_abort(int stack_trace_skip, const char* expr, const char* file, unsigned line); + + // Flush output to stderr and files. + // If g_flush_interval_ms is set to non-zero, this will be called automatically this often. + // If not set, you do not need to call this at all. + LOGURU_EXPORT + void flush(); + + template inline Text format_value(const T&) { return textprintf("N/A"); } + template<> inline Text format_value(const char& v) { return textprintf(LOGURU_FMT(c), v); } + template<> inline Text format_value(const int& v) { return textprintf(LOGURU_FMT(d), v); } + template<> inline Text format_value(const float& v) { return textprintf(LOGURU_FMT(f), v); } + template<> inline Text format_value(const double& v) { return textprintf(LOGURU_FMT(f), v); } + +#if LOGURU_USE_FMTLIB + template<> inline Text format_value(const unsigned int& v) { return textprintf(LOGURU_FMT(d), v); } + template<> inline Text format_value(const long& v) { return textprintf(LOGURU_FMT(d), v); } + template<> inline Text format_value(const unsigned long& v) { return textprintf(LOGURU_FMT(d), v); } + template<> inline Text format_value(const long long& v) { return textprintf(LOGURU_FMT(d), v); } + template<> inline Text format_value(const unsigned long long& v) { return textprintf(LOGURU_FMT(d), v); } +#else + template<> inline Text format_value(const unsigned int& v) { return textprintf(LOGURU_FMT(u), v); } + template<> inline Text format_value(const long& v) { return textprintf(LOGURU_FMT(lu), v); } + template<> inline Text format_value(const unsigned long& v) { return textprintf(LOGURU_FMT(ld), v); } + template<> inline Text format_value(const long long& v) { return textprintf(LOGURU_FMT(llu), v); } + template<> inline Text format_value(const unsigned long long& v) { return textprintf(LOGURU_FMT(lld), v); } +#endif + + /* Thread names can be set for the benefit of readable logs. + If you do not set the thread name, a hex id will be shown instead. + These thread names may or may not be the same as the system thread names, + depending on the system. + Try to limit the thread name to 15 characters or less. */ + LOGURU_EXPORT + void set_thread_name(const char* name); + + /* Returns the thread name for this thread. + On most *nix systems this will return the system thread name (settable from both within and without Loguru). + On other systems it will return whatever you set in `set_thread_name()`; + If no thread name is set, this will return a hexadecimal thread id. + `length` should be the number of bytes available in the buffer. + 17 is a good number for length. + `right_align_hex_id` means any hexadecimal thread id will be written to the end of buffer. + */ + LOGURU_EXPORT + void get_thread_name(char* buffer, unsigned long long length, bool right_align_hex_id); + + /* Generates a readable stacktrace as a string. + 'skip' specifies how many stack frames to skip. + For instance, the default skip (1) means: + don't include the call to loguru::stacktrace in the stack trace. */ + LOGURU_EXPORT + Text stacktrace(int skip = 1); + + /* Add a string to be replaced with something else in the stack output. + + For instance, instead of having a stack trace look like this: + 0x41f541 some_function(std::basic_ofstream >&) + You can clean it up with: + auto verbose_type_name = loguru::demangle(typeid(std::ofstream).name()); + loguru::add_stack_cleanup(verbose_type_name.c_str(); "std::ofstream"); + So the next time you will instead see: + 0x41f541 some_function(std::ofstream&) + + `replace_with_this` must be shorter than `find_this`. + */ + LOGURU_EXPORT + void add_stack_cleanup(const char* find_this, const char* replace_with_this); + + // Example: demangle(typeid(std::ofstream).name()) -> "std::basic_ofstream >" + LOGURU_EXPORT + Text demangle(const char* name); + + // ------------------------------------------------------------------------ + /* + Not all terminals support colors, but if they do, and g_colorlogtostderr + is set, Loguru will write them to stderr to make errors in red, etc. + + You also have the option to manually use them, via the function below. + + Note, however, that if you do, the color codes could end up in your logfile! + + This means if you intend to use them functions you should either: + * Use them on the stderr/stdout directly (bypass Loguru). + * Don't add file outputs to Loguru. + * Expect some \e[1m things in your logfile. + + Usage: + printf("%sRed%sGreen%sBold green%sClear again\n", + loguru::terminal_red(), loguru::terminal_green(), + loguru::terminal_bold(), loguru::terminal_reset()); + + If the terminal at hand does not support colors the above output + will just not have funky \e[1m things showing. + */ + + // Do the output terminal support colors? + LOGURU_EXPORT + bool terminal_has_color(); + + // Colors + LOGURU_EXPORT const char* terminal_black(); + LOGURU_EXPORT const char* terminal_red(); + LOGURU_EXPORT const char* terminal_green(); + LOGURU_EXPORT const char* terminal_yellow(); + LOGURU_EXPORT const char* terminal_blue(); + LOGURU_EXPORT const char* terminal_purple(); + LOGURU_EXPORT const char* terminal_cyan(); + LOGURU_EXPORT const char* terminal_light_gray(); + LOGURU_EXPORT const char* terminal_light_red(); + LOGURU_EXPORT const char* terminal_white(); + + // Formating + LOGURU_EXPORT const char* terminal_bold(); + LOGURU_EXPORT const char* terminal_underline(); + + // You should end each line with this! + LOGURU_EXPORT const char* terminal_reset(); + + // -------------------------------------------------------------------- + // Error context related: + + struct StringStream; + + // Use this in your EcEntryBase::print_value overload. + LOGURU_EXPORT + void stream_print(StringStream& out_string_stream, const char* text); + + class LOGURU_EXPORT EcEntryBase { + public: + EcEntryBase(const char* file, unsigned line, const char* descr); + ~EcEntryBase(); + EcEntryBase(const EcEntryBase&) = delete; + EcEntryBase(EcEntryBase&&) = delete; + EcEntryBase& operator=(const EcEntryBase&) = delete; + EcEntryBase& operator=(EcEntryBase&&) = delete; + + virtual void print_value(StringStream& out_string_stream) const = 0; + + EcEntryBase* previous() const { return _previous; } + + // private: + const char* _file; + unsigned _line; + const char* _descr; + EcEntryBase* _previous; + }; + + template + class EcEntryData : public EcEntryBase { + public: + using Printer = Text(*)(T data); + + EcEntryData(const char* file, unsigned line, const char* descr, T data, Printer&& printer) + : EcEntryBase(file, line, descr), _data(data), _printer(printer) + { + } + + virtual void print_value(StringStream& out_string_stream) const override + { + const auto str = _printer(_data); + stream_print(out_string_stream, str.c_str()); + } + + private: + T _data; + Printer _printer; + }; + + // template + // class EcEntryLambda : public EcEntryBase + // { + // public: + // EcEntryLambda(const char* file, unsigned line, const char* descr, Printer&& printer) + // : EcEntryBase(file, line, descr), _printer(std::move(printer)) {} + + // virtual void print_value(StringStream& out_string_stream) const override + // { + // const auto str = _printer(); + // stream_print(out_string_stream, str.c_str()); + // } + + // private: + // Printer _printer; + // }; + + // template + // EcEntryLambda make_ec_entry_lambda(const char* file, unsigned line, const char* descr, Printer&& printer) + // { + // return {file, line, descr, std::move(printer)}; + // } + + template + struct decay_char_array { using type = T; }; + + template + struct decay_char_array { using type = const char*; }; + + template + struct make_const_ptr { using type = T; }; + + template + struct make_const_ptr { using type = const T*; }; + + template + struct make_ec_type { using type = typename make_const_ptr::type>::type; }; + + /* A stack trace gives you the names of the function at the point of a crash. + With ERROR_CONTEXT, you can also get the values of select local variables. + Usage: + + void process_customers(const std::string& filename) + { + ERROR_CONTEXT("Processing file", filename.c_str()); + for (int customer_index : ...) + { + ERROR_CONTEXT("Customer index", customer_index); + ... + } + } + + The context is in effect during the scope of the ERROR_CONTEXT. + Use loguru::get_error_context() to get the contents of the active error contexts. + + Example result: + + ------------------------------------------------ + [ErrorContext] main.cpp:416 Processing file: "customers.json" + [ErrorContext] main.cpp:417 Customer index: 42 + ------------------------------------------------ + + Error contexts are printed automatically on crashes, and only on crashes. + This makes them much faster than logging the value of a variable. + */ +#define ERROR_CONTEXT(descr, data) \ + const loguru::EcEntryData::type> \ + LOGURU_ANONYMOUS_VARIABLE(error_context_scope_)( \ + __FILE__, __LINE__, descr, data, \ + static_cast::type>::Printer>(loguru::ec_to_text) ) // For better error messages + + /* + #define ERROR_CONTEXT(descr, data) \ + const auto LOGURU_ANONYMOUS_VARIABLE(error_context_scope_)( \ + loguru::make_ec_entry_lambda(__FILE__, __LINE__, descr, \ + [=](){ return loguru::ec_to_text(data); })) + */ + + using EcHandle = const EcEntryBase*; + + /* + Get a light-weight handle to the error context stack on this thread. + The handle is valid as long as the current thread has no changes to its error context stack. + You can pass the handle to loguru::get_error_context on another thread. + This can be very useful for when you have a parent thread spawning several working threads, + and you want the error context of the parent thread to get printed (too) when there is an + error on the child thread. You can accomplish this thusly: + + void foo(const char* parameter) + { + ERROR_CONTEXT("parameter", parameter) + const auto parent_ec_handle = loguru::get_thread_ec_handle(); + + std::thread([=]{ + loguru::set_thread_name("child thread"); + ERROR_CONTEXT("parent context", parent_ec_handle); + dangerous_code(); + }.join(); + } + + */ + LOGURU_EXPORT + EcHandle get_thread_ec_handle(); + + // Get a string describing the current stack of error context. Empty string if there is none. + LOGURU_EXPORT + Text get_error_context(); + + // Get a string describing the error context of the given thread handle. + LOGURU_EXPORT + Text get_error_context_for(EcHandle ec_handle); + + // ------------------------------------------------------------------------ + + LOGURU_EXPORT Text ec_to_text(const char* data); + LOGURU_EXPORT Text ec_to_text(char data); + LOGURU_EXPORT Text ec_to_text(int data); + LOGURU_EXPORT Text ec_to_text(unsigned int data); + LOGURU_EXPORT Text ec_to_text(long data); + LOGURU_EXPORT Text ec_to_text(unsigned long data); + LOGURU_EXPORT Text ec_to_text(long long data); + LOGURU_EXPORT Text ec_to_text(unsigned long long data); + LOGURU_EXPORT Text ec_to_text(float data); + LOGURU_EXPORT Text ec_to_text(double data); + LOGURU_EXPORT Text ec_to_text(long double data); + LOGURU_EXPORT Text ec_to_text(EcHandle); + + /* + You can add ERROR_CONTEXT support for your own types by overloading ec_to_text. Here's how: + + some.hpp: + namespace loguru { + Text ec_to_text(MySmallType data) + Text ec_to_text(const MyBigType* data) + } // namespace loguru + + some.cpp: + namespace loguru { + Text ec_to_text(MySmallType small_value) + { + // Called only when needed, i.e. on a crash. + std::string str = small_value.as_string(); // Format 'small_value' here somehow. + return Text{STRDUP(str.c_str())}; + } + + Text ec_to_text(const MyBigType* big_value) + { + // Called only when needed, i.e. on a crash. + std::string str = big_value->as_string(); // Format 'big_value' here somehow. + return Text{STRDUP(str.c_str())}; + } + } // namespace loguru + + Any file that include some.hpp: + void foo(MySmallType small, const MyBigType& big) + { + ERROR_CONTEXT("Small", small); // Copy ´small` by value. + ERROR_CONTEXT("Big", &big); // `big` should not change during this scope! + .... + } + */ +} // namespace loguru + +LOGURU_ANONYMOUS_NAMESPACE_END + +// -------------------------------------------------------------------- +// Logging macros + +// LOG_F(2, "Only logged if verbosity is 2 or higher: %d", some_number); +#define VLOG_F(verbosity, ...) \ + ((verbosity) > loguru::current_verbosity_cutoff()) ? (void)0 \ + : loguru::log(verbosity, __FILE__, __LINE__, __VA_ARGS__) + +// LOG_F(INFO, "Foo: %d", some_number); +#define LOG_F(verbosity_name, ...) VLOG_F(loguru::Verbosity_ ## verbosity_name, __VA_ARGS__) + +#define VLOG_IF_F(verbosity, cond, ...) \ + ((verbosity) > loguru::current_verbosity_cutoff() || (cond) == false) \ + ? (void)0 \ + : loguru::log(verbosity, __FILE__, __LINE__, __VA_ARGS__) + +#define LOG_IF_F(verbosity_name, cond, ...) \ + VLOG_IF_F(loguru::Verbosity_ ## verbosity_name, cond, __VA_ARGS__) + +#define VLOG_SCOPE_F(verbosity, ...) \ + loguru::LogScopeRAII LOGURU_ANONYMOUS_VARIABLE(error_context_RAII_) = \ + ((verbosity) > loguru::current_verbosity_cutoff()) ? loguru::LogScopeRAII() : \ + loguru::LogScopeRAII(verbosity, __FILE__, __LINE__, __VA_ARGS__) + +// Raw logging - no preamble, no indentation. Slightly faster than full logging. +#define RAW_VLOG_F(verbosity, ...) \ + ((verbosity) > loguru::current_verbosity_cutoff()) ? (void)0 \ + : loguru::raw_log(verbosity, __FILE__, __LINE__, __VA_ARGS__) + +#define RAW_LOG_F(verbosity_name, ...) RAW_VLOG_F(loguru::Verbosity_ ## verbosity_name, __VA_ARGS__) + +// Use to book-end a scope. Affects logging on all threads. +#define LOG_SCOPE_F(verbosity_name, ...) \ + VLOG_SCOPE_F(loguru::Verbosity_ ## verbosity_name, __VA_ARGS__) + +#define LOG_SCOPE_FUNCTION(verbosity_name) LOG_SCOPE_F(verbosity_name, __func__) + +// ----------------------------------------------- +// ABORT_F macro. Usage: ABORT_F("Cause of error: %s", error_str); + +// Message is optional +#define ABORT_F(...) loguru::log_and_abort(0, "ABORT: ", __FILE__, __LINE__, __VA_ARGS__) + +// -------------------------------------------------------------------- +// CHECK_F macros: + +#define CHECK_WITH_INFO_F(test, info, ...) \ + LOGURU_PREDICT_TRUE((test) == true) ? (void)0 : loguru::log_and_abort(0, "CHECK FAILED: " info " ", __FILE__, \ + __LINE__, ##__VA_ARGS__) + +/* Checked at runtime too. Will print error, then call fatal_handler (if any), then 'abort'. + Note that the test must be boolean. + CHECK_F(ptr); will not compile, but CHECK_F(ptr != nullptr); will. */ +#define CHECK_F(test, ...) CHECK_WITH_INFO_F(test, #test, ##__VA_ARGS__) + +#define CHECK_NOTNULL_F(x, ...) CHECK_WITH_INFO_F((x) != nullptr, #x " != nullptr", ##__VA_ARGS__) + +#define CHECK_OP_F(expr_left, expr_right, op, ...) \ + do \ + { \ + auto val_left = expr_left; \ + auto val_right = expr_right; \ + if (! LOGURU_PREDICT_TRUE(val_left op val_right)) \ + { \ + auto str_left = loguru::format_value(val_left); \ + auto str_right = loguru::format_value(val_right); \ + auto fail_info = loguru::textprintf("CHECK FAILED: " LOGURU_FMT(s) " " LOGURU_FMT(s) " " LOGURU_FMT(s) " (" LOGURU_FMT(s) " " LOGURU_FMT(s) " " LOGURU_FMT(s) ") ", \ + #expr_left, #op, #expr_right, str_left.c_str(), #op, str_right.c_str()); \ + auto user_msg = loguru::textprintf(__VA_ARGS__); \ + loguru::log_and_abort(0, fail_info.c_str(), __FILE__, __LINE__, \ + LOGURU_FMT(s), user_msg.c_str()); \ + } \ + } while (false) + +#ifndef LOGURU_DEBUG_LOGGING +#ifndef NDEBUG +#define LOGURU_DEBUG_LOGGING 1 +#else +#define LOGURU_DEBUG_LOGGING 0 +#endif +#endif + +#if LOGURU_DEBUG_LOGGING + // Debug logging enabled: +#define DLOG_F(verbosity_name, ...) LOG_F(verbosity_name, __VA_ARGS__) +#define DVLOG_F(verbosity, ...) VLOG_F(verbosity, __VA_ARGS__) +#define DLOG_IF_F(verbosity_name, ...) LOG_IF_F(verbosity_name, __VA_ARGS__) +#define DVLOG_IF_F(verbosity, ...) VLOG_IF_F(verbosity, __VA_ARGS__) +#define DRAW_LOG_F(verbosity_name, ...) RAW_LOG_F(verbosity_name, __VA_ARGS__) +#define DRAW_VLOG_F(verbosity, ...) RAW_VLOG_F(verbosity, __VA_ARGS__) +#else + // Debug logging disabled: +#define DLOG_F(verbosity_name, ...) +#define DVLOG_F(verbosity, ...) +#define DLOG_IF_F(verbosity_name, ...) +#define DVLOG_IF_F(verbosity, ...) +#define DRAW_LOG_F(verbosity_name, ...) +#define DRAW_VLOG_F(verbosity, ...) +#endif + +#define CHECK_EQ_F(a, b, ...) CHECK_OP_F(a, b, ==, ##__VA_ARGS__) +#define CHECK_NE_F(a, b, ...) CHECK_OP_F(a, b, !=, ##__VA_ARGS__) +#define CHECK_LT_F(a, b, ...) CHECK_OP_F(a, b, < , ##__VA_ARGS__) +#define CHECK_GT_F(a, b, ...) CHECK_OP_F(a, b, > , ##__VA_ARGS__) +#define CHECK_LE_F(a, b, ...) CHECK_OP_F(a, b, <=, ##__VA_ARGS__) +#define CHECK_GE_F(a, b, ...) CHECK_OP_F(a, b, >=, ##__VA_ARGS__) + +#ifndef LOGURU_DEBUG_CHECKS +#ifndef NDEBUG +#define LOGURU_DEBUG_CHECKS 1 +#else +#define LOGURU_DEBUG_CHECKS 0 +#endif +#endif + +#if LOGURU_DEBUG_CHECKS + // Debug checks enabled: +#define DCHECK_F(test, ...) CHECK_F(test, ##__VA_ARGS__) +#define DCHECK_NOTNULL_F(x, ...) CHECK_NOTNULL_F(x, ##__VA_ARGS__) +#define DCHECK_EQ_F(a, b, ...) CHECK_EQ_F(a, b, ##__VA_ARGS__) +#define DCHECK_NE_F(a, b, ...) CHECK_NE_F(a, b, ##__VA_ARGS__) +#define DCHECK_LT_F(a, b, ...) CHECK_LT_F(a, b, ##__VA_ARGS__) +#define DCHECK_LE_F(a, b, ...) CHECK_LE_F(a, b, ##__VA_ARGS__) +#define DCHECK_GT_F(a, b, ...) CHECK_GT_F(a, b, ##__VA_ARGS__) +#define DCHECK_GE_F(a, b, ...) CHECK_GE_F(a, b, ##__VA_ARGS__) +#else + // Debug checks disabled: +#define DCHECK_F(test, ...) +#define DCHECK_NOTNULL_F(x, ...) +#define DCHECK_EQ_F(a, b, ...) +#define DCHECK_NE_F(a, b, ...) +#define DCHECK_LT_F(a, b, ...) +#define DCHECK_LE_F(a, b, ...) +#define DCHECK_GT_F(a, b, ...) +#define DCHECK_GE_F(a, b, ...) +#endif // NDEBUG + + +#if LOGURU_REDEFINE_ASSERT +#undef assert +#ifndef NDEBUG + // Debug: +#define assert(test) CHECK_WITH_INFO_F(!!(test), #test) // HACK +#else +#define assert(test) +#endif +#endif // LOGURU_REDEFINE_ASSERT + +#endif // LOGURU_HAS_DECLARED_FORMAT_HEADER + +// ---------------------------------------------------------------------------- +// .dP"Y8 888888 88""Yb 888888 db 8b d8 .dP"Y8 +// `Ybo." 88 88__dP 88__ dPYb 88b d88 `Ybo." +// o.`Y8b 88 88"Yb 88"" dP__Yb 88YbdP88 o.`Y8b +// 8bodP' 88 88 Yb 888888 dP""""Yb 88 YY 88 8bodP' + +#if LOGURU_WITH_STREAMS +#ifndef LOGURU_HAS_DECLARED_STREAMS_HEADER +#define LOGURU_HAS_DECLARED_STREAMS_HEADER + +/* This file extends loguru to enable std::stream-style logging, a la Glog. + It's an optional feature behind the LOGURU_WITH_STREAMS settings + because including it everywhere will slow down compilation times. +*/ + +#include +#include // Adds about 38 kLoC on clang. +#include + +LOGURU_ANONYMOUS_NAMESPACE_BEGIN + +namespace loguru { + // Like sprintf, but returns the formated text. + LOGURU_EXPORT + std::string strprintf(LOGURU_FORMAT_STRING_TYPE format, ...) LOGURU_PRINTF_LIKE(1, 2); + + // Like vsprintf, but returns the formated text. + LOGURU_EXPORT + std::string vstrprintf(LOGURU_FORMAT_STRING_TYPE format, va_list) LOGURU_PRINTF_LIKE(1, 0); + + class LOGURU_EXPORT StreamLogger { + public: + StreamLogger(Verbosity verbosity, const char* file, unsigned line) : _verbosity(verbosity), _file(file), _line(line) {} + ~StreamLogger() noexcept(false); + + template + StreamLogger& operator<<(const T& t) + { + _ss << t; + return *this; + } + + // std::endl and other iomanip:s. + StreamLogger& operator<<(std::ostream& (*f)(std::ostream&)) + { + f(_ss); + return *this; + } + + private: + Verbosity _verbosity; + const char* _file; + unsigned _line; + std::ostringstream _ss; + }; + + class LOGURU_EXPORT AbortLogger { + public: + AbortLogger(const char* expr, const char* file, unsigned line) : _expr(expr), _file(file), _line(line) {} + LOGURU_NORETURN ~AbortLogger() noexcept(false); + + template + AbortLogger& operator<<(const T& t) + { + _ss << t; + return *this; + } + + // std::endl and other iomanip:s. + AbortLogger& operator<<(std::ostream& (*f)(std::ostream&)) + { + f(_ss); + return *this; + } + + private: + const char* _expr; + const char* _file; + unsigned _line; + std::ostringstream _ss; + }; + + class LOGURU_EXPORT Voidify { + public: + Voidify() {} + // This has to be an operator with a precedence lower than << but higher than ?: + void operator&(const StreamLogger&) {} + void operator&(const AbortLogger&) {} + }; + + /* Helper functions for CHECK_OP_S macro. + GLOG trick: The (int, int) specialization works around the issue that the compiler + will not instantiate the template version of the function on values of unnamed enum type. */ +#define DEFINE_CHECK_OP_IMPL(name, op) \ + template \ + inline std::string* name(const char* expr, const T1& v1, const char* op_str, const T2& v2) \ + { \ + if (LOGURU_PREDICT_TRUE(v1 op v2)) { return NULL; } \ + std::ostringstream ss; \ + ss << "CHECK FAILED: " << expr << " (" << v1 << " " << op_str << " " << v2 << ") "; \ + return new std::string(ss.str()); \ + } \ + inline std::string* name(const char* expr, int v1, const char* op_str, int v2) \ + { \ + return name(expr, v1, op_str, v2); \ + } + + DEFINE_CHECK_OP_IMPL(check_EQ_impl, == ) + DEFINE_CHECK_OP_IMPL(check_NE_impl, != ) + DEFINE_CHECK_OP_IMPL(check_LE_impl, <= ) + DEFINE_CHECK_OP_IMPL(check_LT_impl, < ) + DEFINE_CHECK_OP_IMPL(check_GE_impl, >= ) + DEFINE_CHECK_OP_IMPL(check_GT_impl, > ) +#undef DEFINE_CHECK_OP_IMPL + + /* GLOG trick: Function is overloaded for integral types to allow static const integrals + declared in classes and not defined to be used as arguments to CHECK* macros. */ + template + inline const T& referenceable_value(const T& t) { return t; } + inline char referenceable_value(char t) { return t; } + inline unsigned char referenceable_value(unsigned char t) { return t; } + inline signed char referenceable_value(signed char t) { return t; } + inline short referenceable_value(short t) { return t; } + inline unsigned short referenceable_value(unsigned short t) { return t; } + inline int referenceable_value(int t) { return t; } + inline unsigned int referenceable_value(unsigned int t) { return t; } + inline long referenceable_value(long t) { return t; } + inline unsigned long referenceable_value(unsigned long t) { return t; } + inline long long referenceable_value(long long t) { return t; } + inline unsigned long long referenceable_value(unsigned long long t) { return t; } +} // namespace loguru + +LOGURU_ANONYMOUS_NAMESPACE_END + +// ----------------------------------------------- +// Logging macros: + +// usage: LOG_STREAM(INFO) << "Foo " << std::setprecision(10) << some_value; +#define VLOG_IF_S(verbosity, cond) \ + ((verbosity) > loguru::current_verbosity_cutoff() || (cond) == false) \ + ? (void)0 \ + : loguru::Voidify() & loguru::StreamLogger(verbosity, __FILE__, __LINE__) +#define LOG_IF_S(verbosity_name, cond) VLOG_IF_S(loguru::Verbosity_ ## verbosity_name, cond) +#define VLOG_S(verbosity) VLOG_IF_S(verbosity, true) +#define LOG_S(verbosity_name) VLOG_S(loguru::Verbosity_ ## verbosity_name) + +// ----------------------------------------------- +// ABORT_S macro. Usage: ABORT_S() << "Causo of error: " << details; + +#define ABORT_S() loguru::Voidify() & loguru::AbortLogger("ABORT: ", __FILE__, __LINE__) + +// ----------------------------------------------- +// CHECK_S macros: + +#define CHECK_WITH_INFO_S(cond, info) \ + LOGURU_PREDICT_TRUE((cond) == true) \ + ? (void)0 \ + : loguru::Voidify() & loguru::AbortLogger("CHECK FAILED: " info " ", __FILE__, __LINE__) + +#define CHECK_S(cond) CHECK_WITH_INFO_S(cond, #cond) +#define CHECK_NOTNULL_S(x) CHECK_WITH_INFO_S((x) != nullptr, #x " != nullptr") + +#define CHECK_OP_S(function_name, expr1, op, expr2) \ + while (auto error_string = loguru::function_name(#expr1 " " #op " " #expr2, \ + loguru::referenceable_value(expr1), #op, \ + loguru::referenceable_value(expr2))) \ + loguru::AbortLogger(error_string->c_str(), __FILE__, __LINE__) + +#define CHECK_EQ_S(expr1, expr2) CHECK_OP_S(check_EQ_impl, expr1, ==, expr2) +#define CHECK_NE_S(expr1, expr2) CHECK_OP_S(check_NE_impl, expr1, !=, expr2) +#define CHECK_LE_S(expr1, expr2) CHECK_OP_S(check_LE_impl, expr1, <=, expr2) +#define CHECK_LT_S(expr1, expr2) CHECK_OP_S(check_LT_impl, expr1, < , expr2) +#define CHECK_GE_S(expr1, expr2) CHECK_OP_S(check_GE_impl, expr1, >=, expr2) +#define CHECK_GT_S(expr1, expr2) CHECK_OP_S(check_GT_impl, expr1, > , expr2) + +#if LOGURU_DEBUG_LOGGING + // Debug logging enabled: +#define DVLOG_IF_S(verbosity, cond) VLOG_IF_S(verbosity, cond) +#define DLOG_IF_S(verbosity_name, cond) LOG_IF_S(verbosity_name, cond) +#define DVLOG_S(verbosity) VLOG_S(verbosity) +#define DLOG_S(verbosity_name) LOG_S(verbosity_name) +#else + // Debug logging disabled: +#define DVLOG_IF_S(verbosity, cond) \ + (true || (verbosity) > loguru::current_verbosity_cutoff() || (cond) == false) \ + ? (void)0 \ + : loguru::Voidify() & loguru::StreamLogger(verbosity, __FILE__, __LINE__) + +#define DLOG_IF_S(verbosity_name, cond) DVLOG_IF_S(loguru::Verbosity_ ## verbosity_name, cond) +#define DVLOG_S(verbosity) DVLOG_IF_S(verbosity, true) +#define DLOG_S(verbosity_name) DVLOG_S(loguru::Verbosity_ ## verbosity_name) +#endif + +#if LOGURU_DEBUG_CHECKS + // Debug checks enabled: +#define DCHECK_S(cond) CHECK_S(cond) +#define DCHECK_NOTNULL_S(x) CHECK_NOTNULL_S(x) +#define DCHECK_EQ_S(a, b) CHECK_EQ_S(a, b) +#define DCHECK_NE_S(a, b) CHECK_NE_S(a, b) +#define DCHECK_LT_S(a, b) CHECK_LT_S(a, b) +#define DCHECK_LE_S(a, b) CHECK_LE_S(a, b) +#define DCHECK_GT_S(a, b) CHECK_GT_S(a, b) +#define DCHECK_GE_S(a, b) CHECK_GE_S(a, b) +#else +// Debug checks disabled: +#define DCHECK_S(cond) CHECK_S(true || (cond)) +#define DCHECK_NOTNULL_S(x) CHECK_S(true || (x) != nullptr) +#define DCHECK_EQ_S(a, b) CHECK_S(true || (a) == (b)) +#define DCHECK_NE_S(a, b) CHECK_S(true || (a) != (b)) +#define DCHECK_LT_S(a, b) CHECK_S(true || (a) < (b)) +#define DCHECK_LE_S(a, b) CHECK_S(true || (a) <= (b)) +#define DCHECK_GT_S(a, b) CHECK_S(true || (a) > (b)) +#define DCHECK_GE_S(a, b) CHECK_S(true || (a) >= (b)) +#endif + +#if LOGURU_REPLACE_GLOG +#undef LOG +#undef VLOG +#undef LOG_IF +#undef VLOG_IF +#undef CHECK +#undef CHECK_NOTNULL +#undef CHECK_EQ +#undef CHECK_NE +#undef CHECK_LT +#undef CHECK_LE +#undef CHECK_GT +#undef CHECK_GE +#undef DLOG +#undef DVLOG +#undef DLOG_IF +#undef DVLOG_IF +#undef DCHECK +#undef DCHECK_NOTNULL +#undef DCHECK_EQ +#undef DCHECK_NE +#undef DCHECK_LT +#undef DCHECK_LE +#undef DCHECK_GT +#undef DCHECK_GE +#undef VLOG_IS_ON + +#define LOG LOG_S +#define VLOG VLOG_S +#define LOG_IF LOG_IF_S +#define VLOG_IF VLOG_IF_S +#define CHECK(cond) CHECK_S(!!(cond)) +#define CHECK_NOTNULL CHECK_NOTNULL_S +#define CHECK_EQ CHECK_EQ_S +#define CHECK_NE CHECK_NE_S +#define CHECK_LT CHECK_LT_S +#define CHECK_LE CHECK_LE_S +#define CHECK_GT CHECK_GT_S +#define CHECK_GE CHECK_GE_S +#define DLOG DLOG_S +#define DVLOG DVLOG_S +#define DLOG_IF DLOG_IF_S +#define DVLOG_IF DVLOG_IF_S +#define DCHECK DCHECK_S +#define DCHECK_NOTNULL DCHECK_NOTNULL_S +#define DCHECK_EQ DCHECK_EQ_S +#define DCHECK_NE DCHECK_NE_S +#define DCHECK_LT DCHECK_LT_S +#define DCHECK_LE DCHECK_LE_S +#define DCHECK_GT DCHECK_GT_S +#define DCHECK_GE DCHECK_GE_S +#define VLOG_IS_ON(verbosity) ((verbosity) <= loguru::current_verbosity_cutoff()) + +#endif // LOGURU_REPLACE_GLOG + +#endif // LOGURU_WITH_STREAMS + +#endif // LOGURU_HAS_DECLARED_STREAMS_HEADER diff --git a/sample/sample.cpp b/sample/sample.cpp index b505e12..53b629d 100644 --- a/sample/sample.cpp +++ b/sample/sample.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include "Models.h" #include "modelRegister.h" #include "config_platform.h" @@ -160,82 +161,119 @@ int main(int argc, char** argv) states[feature] = std::vector(maxes[feature]); } states[className] = std::vector(maxes[className]); - auto clf = platform::Models::instance()->create(model_name); + // Output the states + std::cout << std::string(80, '-') << std::endl; + std::cout << "States" << std::endl; + for (auto feature : features) { + std::cout << feature << ": " << states[feature].size() << std::endl; + } + std::cout << std::string(80, '-') << std::endl; + //auto clf = platform::Models::instance()->create("SPODE"); + auto clf = bayesnet::SPODE(2); + bayesnet::Smoothing_t smoothing = bayesnet::Smoothing_t::ORIGINAL; - clf->fit(Xd, y, features, className, states, smoothing); + clf.fit(Xd, y, features, className, states, smoothing); if (dump_cpt) { std::cout << "--- CPT Tables ---" << std::endl; - clf->dump_cpt(); + std::cout << clf.dump_cpt(); } - auto lines = clf->show(); + std::cout << "--- Datos predicción ---" << std::endl; + std::cout << "Orden de variables: " << std::endl; + for (auto feature : features) { + std::cout << feature << ", "; + } + std::cout << std::endl; + std::cout << "X[0]: "; + for (int i = 0; i < Xd.size(); ++i) { + std::cout << Xd[i][0] << ", "; + } + std::cout << std::endl; + std::cout << std::string(80, '-') << std::endl; + + auto lines = clf.show(); for (auto line : lines) { std::cout << line << std::endl; } std::cout << "--- Topological Order ---" << std::endl; - auto order = clf->topological_order(); + auto order = clf.topological_order(); for (auto name : order) { std::cout << name << ", "; } - std::cout << "end." << std::endl; - auto score = clf->score(Xd, y); - std::cout << "Score: " << score << std::endl; - auto graph = clf->graph(); - auto dot_file = model_name + "_" + file_name; - ofstream file(dot_file + ".dot"); - file << graph; - file.close(); - std::cout << "Graph saved in " << model_name << "_" << file_name << ".dot" << std::endl; - std::cout << "dot -Tpng -o " + dot_file + ".png " + dot_file + ".dot " << std::endl; - std::string stratified_string = stratified ? " Stratified" : ""; - std::cout << nFolds << " Folds" << stratified_string << " Cross validation" << std::endl; - std::cout << "==========================================" << std::endl; - torch::Tensor Xt = torch::zeros({ static_cast(Xd.size()), static_cast(Xd[0].size()) }, torch::kInt32); - torch::Tensor yt = torch::tensor(y, torch::kInt32); - for (int i = 0; i < features.size(); ++i) { - Xt.index_put_({ i, "..." }, torch::tensor(Xd[i], torch::kInt32)); - } - float total_score = 0, total_score_train = 0, score_train, score_test; - folding::Fold* fold; - double nodes = 0.0; - if (stratified) - fold = new folding::StratifiedKFold(nFolds, y, seed); - else - fold = new folding::KFold(nFolds, y.size(), seed); - for (auto i = 0; i < nFolds; ++i) { - auto [train, test] = fold->getFold(i); - std::cout << "Fold: " << i + 1 << std::endl; - if (tensors) { - auto ttrain = torch::tensor(train, torch::kInt64); - auto ttest = torch::tensor(test, torch::kInt64); - torch::Tensor Xtraint = torch::index_select(Xt, 1, ttrain); - torch::Tensor ytraint = yt.index({ ttrain }); - torch::Tensor Xtestt = torch::index_select(Xt, 1, ttest); - torch::Tensor ytestt = yt.index({ ttest }); - clf->fit(Xtraint, ytraint, features, className, states, smoothing); - auto temp = clf->predict(Xtraint); - score_train = clf->score(Xtraint, ytraint); - score_test = clf->score(Xtestt, ytestt); - } else { - auto [Xtrain, ytrain] = extract_indices(train, Xd, y); - auto [Xtest, ytest] = extract_indices(test, Xd, y); - clf->fit(Xtrain, ytrain, features, className, states, smoothing); - std::cout << "Nodes: " << clf->getNumberOfNodes() << std::endl; - nodes += clf->getNumberOfNodes(); - score_train = clf->score(Xtrain, ytrain); - score_test = clf->score(Xtest, ytest); + auto predict_proba = clf.predict_proba(Xd); + std::cout << "Instances predict_proba: "; + for (int i = 0; i < predict_proba.size(); i++) { + std::cout << "Instance " << i << ": "; + for (int j = 0; j < 4; ++j) { + std::cout << Xd[j][i] << ", "; } - if (dump_cpt) { - std::cout << "--- CPT Tables ---" << std::endl; - std::cout << clf->dump_cpt(); + std::cout << ": "; + for (auto score : predict_proba[i]) { + std::cout << score << ", "; } - total_score_train += score_train; - total_score += score_test; - std::cout << "Score Train: " << score_train << std::endl; - std::cout << "Score Test : " << score_test << std::endl; - std::cout << "-------------------------------------------------------------------------------" << std::endl; + std::cout << std::endl; } - std::cout << "Nodes: " << nodes / nFolds << std::endl; - std::cout << "**********************************************************************************" << std::endl; - std::cout << "Average Score Train: " << total_score_train / nFolds << std::endl; - std::cout << "Average Score Test : " << total_score / nFolds << std::endl;return 0; + // std::cout << std::endl; + // std::cout << "end." << std::endl; + // auto score = clf->score(Xd, y); + // std::cout << "Score: " << score << std::endl; + // auto graph = clf->graph(); + // auto dot_file = model_name + "_" + file_name; + // ofstream file(dot_file + ".dot"); + // file << graph; + // file.close(); + // std::cout << "Graph saved in " << model_name << "_" << file_name << ".dot" << std::endl; + // std::cout << "dot -Tpng -o " + dot_file + ".png " + dot_file + ".dot " << std::endl; + // std::string stratified_string = stratified ? " Stratified" : ""; + // std::cout << nFolds << " Folds" << stratified_string << " Cross validation" << std::endl; + // std::cout << "==========================================" << std::endl; + // torch::Tensor Xt = torch::zeros({ static_cast(Xd.size()), static_cast(Xd[0].size()) }, torch::kInt32); + // torch::Tensor yt = torch::tensor(y, torch::kInt32); + // for (int i = 0; i < features.size(); ++i) { + // Xt.index_put_({ i, "..." }, torch::tensor(Xd[i], torch::kInt32)); + // } + // float total_score = 0, total_score_train = 0, score_train, score_test; + // folding::Fold* fold; + // double nodes = 0.0; + // if (stratified) + // fold = new folding::StratifiedKFold(nFolds, y, seed); + // else + // fold = new folding::KFold(nFolds, y.size(), seed); + // for (auto i = 0; i < nFolds; ++i) { + // auto [train, test] = fold->getFold(i); + // std::cout << "Fold: " << i + 1 << std::endl; + // if (tensors) { + // auto ttrain = torch::tensor(train, torch::kInt64); + // auto ttest = torch::tensor(test, torch::kInt64); + // torch::Tensor Xtraint = torch::index_select(Xt, 1, ttrain); + // torch::Tensor ytraint = yt.index({ ttrain }); + // torch::Tensor Xtestt = torch::index_select(Xt, 1, ttest); + // torch::Tensor ytestt = yt.index({ ttest }); + // clf->fit(Xtraint, ytraint, features, className, states, smoothing); + // auto temp = clf->predict(Xtraint); + // score_train = clf->score(Xtraint, ytraint); + // score_test = clf->score(Xtestt, ytestt); + // } else { + // auto [Xtrain, ytrain] = extract_indices(train, Xd, y); + // auto [Xtest, ytest] = extract_indices(test, Xd, y); + // clf->fit(Xtrain, ytrain, features, className, states, smoothing); + // std::cout << "Nodes: " << clf->getNumberOfNodes() << std::endl; + // nodes += clf->getNumberOfNodes(); + // score_train = clf->score(Xtrain, ytrain); + // score_test = clf->score(Xtest, ytest); + // } + // // if (dump_cpt) { + // // std::cout << "--- CPT Tables ---" << std::endl; + // // std::cout << clf->dump_cpt(); + // // } + // total_score_train += score_train; + // total_score += score_test; + // std::cout << "Score Train: " << score_train << std::endl; + // std::cout << "Score Test : " << score_test << std::endl; + // std::cout << "-------------------------------------------------------------------------------" << std::endl; + // } + + // std::cout << "Nodes: " << nodes / nFolds << std::endl; + // std::cout << "**********************************************************************************" << std::endl; + // std::cout << "Average Score Train: " << total_score_train / nFolds << std::endl; + // std::cout << "Average Score Test : " << total_score / nFolds << std::endl;return 0; } \ No newline at end of file diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 5c8a536..7825077 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,5 +1,6 @@ include_directories( ## Libs + ${Platform_SOURCE_DIR}/lib/log ${Platform_SOURCE_DIR}/lib/Files ${Platform_SOURCE_DIR}/lib/folding ${Platform_SOURCE_DIR}/lib/mdlp/src @@ -25,6 +26,8 @@ add_executable( main/Models.cpp main/Scores.cpp reports/ReportExcel.cpp reports/ReportBase.cpp reports/ExcelFile.cpp results/Result.cpp + experimental_clfs/XA1DE.cpp + experimental_clfs/ExpClf.cpp ) target_link_libraries(b_best Boost::boost "${PyClassifiers}" "${BayesNet}" fimdlp ${Python3_LIBRARIES} "${TORCH_LIBRARIES}" ${LIBTORCH_PYTHON} Boost::python Boost::numpy "${XLSXWRITER_LIB}") @@ -36,6 +39,8 @@ add_executable(b_grid commands/b_grid.cpp ${grid_sources} main/HyperParameters.cpp main/Models.cpp main/Experiment.cpp main/Scores.cpp main/ArgumentsExperiment.cpp reports/ReportConsole.cpp reports/ReportBase.cpp results/Result.cpp + experimental_clfs/XA1DE.cpp + experimental_clfs/ExpClf.cpp ) target_link_libraries(b_grid ${MPI_CXX_LIBRARIES} "${PyClassifiers}" "${BayesNet}" fimdlp ${Python3_LIBRARIES} "${TORCH_LIBRARIES}" ${LIBTORCH_PYTHON} Boost::python Boost::numpy) @@ -45,6 +50,8 @@ add_executable(b_list commands/b_list.cpp main/Models.cpp main/Scores.cpp reports/ReportExcel.cpp reports/ExcelFile.cpp reports/ReportBase.cpp reports/DatasetsExcel.cpp reports/DatasetsConsole.cpp reports/ReportsPaged.cpp results/Result.cpp results/ResultsDatasetExcel.cpp results/ResultsDataset.cpp results/ResultsDatasetConsole.cpp + experimental_clfs/XA1DE.cpp + experimental_clfs/ExpClf.cpp ) target_link_libraries(b_list "${PyClassifiers}" "${BayesNet}" fimdlp ${Python3_LIBRARIES} "${TORCH_LIBRARIES}" ${LIBTORCH_PYTHON} Boost::python Boost::numpy "${XLSXWRITER_LIB}") @@ -55,6 +62,8 @@ add_executable(b_main commands/b_main.cpp ${main_sources} common/Datasets.cpp common/Dataset.cpp common/Discretization.cpp reports/ReportConsole.cpp reports/ReportBase.cpp results/Result.cpp + experimental_clfs/XA1DE.cpp + experimental_clfs/ExpClf.cpp ) target_link_libraries(b_main "${PyClassifiers}" "${BayesNet}" fimdlp ${Python3_LIBRARIES} "${TORCH_LIBRARIES}" ${LIBTORCH_PYTHON} Boost::python Boost::numpy) diff --git a/src/commands/b_grid.cpp b/src/commands/b_grid.cpp index 2306b26..b6efd56 100644 --- a/src/commands/b_grid.cpp +++ b/src/commands/b_grid.cpp @@ -1,14 +1,12 @@ #include #include #include -#include #include #include #include "main/Models.h" -#include "main/modelRegister.h" #include "main/ArgumentsExperiment.h" #include "common/Paths.h" -#include "common/Timer.h" +#include "common/Timer.hpp" #include "common/Colors.h" #include "common/DotEnv.h" #include "grid/GridSearch.h" diff --git a/src/commands/b_manage.cpp b/src/commands/b_manage.cpp index 17c1465..0dda157 100644 --- a/src/commands/b_manage.cpp +++ b/src/commands/b_manage.cpp @@ -51,6 +51,66 @@ void handleResize(int sig) manager->updateSize(rows, cols); } +void openFile(const std::string& fileName) +{ + // #ifdef __APPLE__ + // // macOS uses the "open" command + // std::string command = "open"; + // #elif defined(__linux__) + // // Linux typically uses "xdg-open" + // std::string command = "xdg-open"; + // #else + // // For other OSes, do nothing or handle differently + // std::cerr << "Unsupported platform." << std::endl; + // return; + // #endif + // execlp(command.c_str(), command.c_str(), fileName.c_str(), NULL); +#ifdef __APPLE__ + const char* tool = "/usr/bin/open"; +#elif defined(__linux__) + const char* tool = "/usr/bin/xdg-open"; +#else + std::cerr << "Unsupported platform." << std::endl; + return; +#endif + + // We'll build an argv array for execve: + std::vector argv; + argv.push_back(const_cast(tool)); // argv[0] + argv.push_back(const_cast(fileName.c_str())); // argv[1] + argv.push_back(nullptr); + + // Make a new environment array, skipping BASH_FUNC_ variables + std::vector filteredEnv; + for (char** env = environ; *env != nullptr; ++env) { + // *env is a string like "NAME=VALUE" + // We want to skip those starting with "BASH_FUNC_" + if (strncmp(*env, "BASH_FUNC_", 10) == 0) { + // skip it + continue; + } + filteredEnv.push_back(*env); + } + + // Convert filteredEnv into a char* array + std::vector envp; + for (auto& var : filteredEnv) { + envp.push_back(const_cast(var.c_str())); + } + envp.push_back(nullptr); + + // Now call execve with the cleaned environment + // NOTE: You may need a full path to the tool if it's not in PATH, or use which() logic + // For now, let's assume "open" or "xdg-open" is found in the default PATH: + execve(tool, argv.data(), envp.data()); + + // If we reach here, execve failed + perror("execve failed"); + // This would terminate your current process if it's not in a child + // Usually you'd do something like: + _exit(EXIT_FAILURE); +} + int main(int argc, char** argv) { auto program = argparse::ArgumentParser("b_manage", { platform_project_version.begin(), platform_project_version.end() }); @@ -67,6 +127,11 @@ int main(int argc, char** argv) auto [rows, cols] = numRowsCols(); manager = new platform::ManageScreen(rows, cols, model, score, platform, complete, partial, compare); manager->doMenu(); + auto fileName = manager->getExcelFileName(); delete manager; + if (!fileName.empty()) { + std::cout << "Opening " << fileName << std::endl; + openFile(fileName); + } return 0; } diff --git a/src/common/Timer.h b/src/common/Timer.hpp similarity index 100% rename from src/common/Timer.h rename to src/common/Timer.hpp diff --git a/src/experimental_clfs/CountingSemaphore.hpp b/src/experimental_clfs/CountingSemaphore.hpp new file mode 100644 index 0000000..e217d00 --- /dev/null +++ b/src/experimental_clfs/CountingSemaphore.hpp @@ -0,0 +1,53 @@ +#ifndef COUNTING_SEMAPHORE_H +#define COUNTING_SEMAPHORE_H +#include +#include +#include +#include +#include +#include + +class CountingSemaphore { +public: + static CountingSemaphore& getInstance() + { + static CountingSemaphore instance; + return instance; + } + // Delete copy constructor and assignment operator + CountingSemaphore(const CountingSemaphore&) = delete; + CountingSemaphore& operator=(const CountingSemaphore&) = delete; + void acquire() + { + std::unique_lock lock(mtx_); + cv_.wait(lock, [this]() { return count_ > 0; }); + --count_; + } + void release() + { + std::lock_guard lock(mtx_); + ++count_; + if (count_ <= max_count_) { + cv_.notify_one(); + } + } + uint getCount() const + { + return count_; + } + uint getMaxCount() const + { + return max_count_; + } +private: + CountingSemaphore() + : max_count_(std::max(1u, static_cast(0.95 * std::thread::hardware_concurrency()))), + count_(max_count_) + { + } + std::mutex mtx_; + std::condition_variable cv_; + const uint max_count_; + uint count_; +}; +#endif \ No newline at end of file diff --git a/src/experimental_clfs/ExpClf.cpp b/src/experimental_clfs/ExpClf.cpp new file mode 100644 index 0000000..b54bba5 --- /dev/null +++ b/src/experimental_clfs/ExpClf.cpp @@ -0,0 +1,182 @@ +// *************************************************************** +// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez +// SPDX-FileType: SOURCE +// SPDX-License-Identifier: MIT +// *************************************************************** + +#include "ExpClf.h" +#include "TensorUtils.hpp" + +namespace platform { + ExpClf::ExpClf() : semaphore_{ CountingSemaphore::getInstance() }, Boost(false) + { + validHyperparameters = {}; + } + // + // Parents + // + void ExpClf::add_active_parents(const std::vector& active_parents) + { + for (const auto& parent : active_parents) + aode_.add_active_parent(parent); + } + void ExpClf::add_active_parent(int parent) + { + aode_.add_active_parent(parent); + } + void ExpClf::remove_last_parent() + { + aode_.remove_last_parent(); + } + // + // Predict + // + std::vector ExpClf::predict_spode(std::vector>& test_data, int parent) + { + int test_size = test_data[0].size(); + int sample_size = test_data.size(); + auto predictions = std::vector(test_size); + + int chunk_size = std::min(150, int(test_size / semaphore_.getMaxCount()) + 1); + std::vector threads; + auto worker = [&](const std::vector>& samples, int begin, int chunk, int sample_size, std::vector& predictions) { + std::string threadName = "(V)PWorker-" + std::to_string(begin) + "-" + std::to_string(chunk); +#if defined(__linux__) + pthread_setname_np(pthread_self(), threadName.c_str()); +#else + pthread_setname_np(threadName.c_str()); +#endif + std::vector instance(sample_size); + for (int sample = begin; sample < begin + chunk; ++sample) { + for (int feature = 0; feature < sample_size; ++feature) { + instance[feature] = samples[feature][sample]; + } + predictions[sample] = aode_.predict_spode(instance, parent); + } + semaphore_.release(); + }; + for (int begin = 0; begin < test_size; begin += chunk_size) { + int chunk = std::min(chunk_size, test_size - begin); + semaphore_.acquire(); + threads.emplace_back(worker, test_data, begin, chunk, sample_size, std::ref(predictions)); + } + for (auto& thread : threads) { + thread.join(); + } + return predictions; + } + torch::Tensor ExpClf::predict(torch::Tensor& X) + { + auto X_ = TensorUtils::to_matrix(X); + torch::Tensor y = torch::tensor(predict(X_)); + return y; + } + torch::Tensor ExpClf::predict_proba(torch::Tensor& X) + { + auto X_ = TensorUtils::to_matrix(X); + auto probabilities = predict_proba(X_); + auto n_samples = X.size(1); + int n_classes = probabilities[0].size(); + auto y = torch::zeros({ n_samples, n_classes }); + for (int i = 0; i < n_samples; i++) { + for (int j = 0; j < n_classes; j++) { + y[i][j] = probabilities[i][j]; + } + } + return y; + } + float ExpClf::score(torch::Tensor& X, torch::Tensor& y) + { + auto X_ = TensorUtils::to_matrix(X); + auto y_ = TensorUtils::to_vector(y); + return score(X_, y_); + } + std::vector> ExpClf::predict_proba(const std::vector>& test_data) + { + int test_size = test_data[0].size(); + int sample_size = test_data.size(); + auto probabilities = std::vector>(test_size, std::vector(aode_.statesClass())); + + int chunk_size = std::min(150, int(test_size / semaphore_.getMaxCount()) + 1); + std::vector threads; + auto worker = [&](const std::vector>& samples, int begin, int chunk, int sample_size, std::vector>& predictions) { + std::string threadName = "(V)PWorker-" + std::to_string(begin) + "-" + std::to_string(chunk); +#if defined(__linux__) + pthread_setname_np(pthread_self(), threadName.c_str()); +#else + pthread_setname_np(threadName.c_str()); +#endif + + std::vector instance(sample_size); + for (int sample = begin; sample < begin + chunk; ++sample) { + for (int feature = 0; feature < sample_size; ++feature) { + instance[feature] = samples[feature][sample]; + } + predictions[sample] = aode_.predict_proba(instance); + } + semaphore_.release(); + }; + for (int begin = 0; begin < test_size; begin += chunk_size) { + int chunk = std::min(chunk_size, test_size - begin); + semaphore_.acquire(); + threads.emplace_back(worker, test_data, begin, chunk, sample_size, std::ref(probabilities)); + } + for (auto& thread : threads) { + thread.join(); + } + return probabilities; + } + std::vector ExpClf::predict(std::vector>& test_data) + { + if (!fitted) { + throw std::logic_error(CLASSIFIER_NOT_FITTED); + } + auto probabilities = predict_proba(test_data); + std::vector predictions(probabilities.size(), 0); + + for (size_t i = 0; i < probabilities.size(); i++) { + predictions[i] = std::distance(probabilities[i].begin(), std::max_element(probabilities[i].begin(), probabilities[i].end())); + } + + return predictions; + } + float ExpClf::score(std::vector>& test_data, std::vector& labels) + { + Timer timer; + timer.start(); + std::vector predictions = predict(test_data); + int correct = 0; + + for (size_t i = 0; i < predictions.size(); i++) { + if (predictions[i] == labels[i]) { + correct++; + } + } + if (debug) { + std::cout << "* Time to predict: " << timer.getDurationString() << std::endl; + } + return static_cast(correct) / predictions.size(); + } + + // + // statistics + // + int ExpClf::getNumberOfNodes() const + { + return aode_.getNumberOfNodes(); + } + int ExpClf::getNumberOfEdges() const + { + return aode_.getNumberOfEdges(); + } + int ExpClf::getNumberOfStates() const + { + return aode_.getNumberOfStates(); + } + int ExpClf::getClassNumStates() const + { + return aode_.statesClass(); + } + + +} \ No newline at end of file diff --git a/src/experimental_clfs/ExpClf.h b/src/experimental_clfs/ExpClf.h new file mode 100644 index 0000000..fc6d3ec --- /dev/null +++ b/src/experimental_clfs/ExpClf.h @@ -0,0 +1,66 @@ +// *************************************************************** +// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez +// SPDX-FileType: SOURCE +// SPDX-License-Identifier: MIT +// *************************************************************** + +#ifndef EXPCLF_H +#define EXPCLF_H +#include +#include +#include +#include +#include +#include +#include +#include "common/Timer.hpp" +#include "CountingSemaphore.hpp" +#include "Xaode.hpp" + +namespace platform { + class ExpClf : public bayesnet::Boost { + public: + ExpClf(); + virtual ~ExpClf() = default; + std::vector predict(std::vector>& X) override; + torch::Tensor predict(torch::Tensor& X) override; + torch::Tensor predict_proba(torch::Tensor& X) override; + std::vector predict_spode(std::vector>& test_data, int parent); + std::vector> predict_proba(const std::vector>& X); + float score(std::vector>& X, std::vector& y) override; + float score(torch::Tensor& X, torch::Tensor& y) override; + int getNumberOfNodes() const override; + int getNumberOfEdges() const override; + int getNumberOfStates() const override; + int getClassNumStates() const override; + std::vector show() const override { return {}; } + std::vector topological_order() override { return {}; } + std::string dump_cpt() const override { return ""; } + void setDebug(bool debug) { this->debug = debug; } + bayesnet::status_t getStatus() const override { return status; } + std::vector getNotes() const override { return notes; } + std::vector graph(const std::string& title = "") const override { return {}; } + void add_active_parents(const std::vector& active_parents); + void add_active_parent(int parent); + void remove_last_parent(); + protected: + bool debug = false; + Xaode aode_; + torch::Tensor weights_; + const std::string CLASSIFIER_NOT_FITTED = "Classifier has not been fitted"; + inline void normalize_weights(int num_instances) + { + double sum = weights_.sum().item(); + if (sum == 0) { + weights_ = torch::full({ num_instances }, 1.0); + } else { + for (int i = 0; i < weights_.size(0); ++i) { + weights_[i] = weights_[i].item() * num_instances / sum; + } + } + } + private: + CountingSemaphore& semaphore_; + }; +} +#endif // EXPCLF_H \ No newline at end of file diff --git a/src/experimental_clfs/ExpEnsemble.cpp b/src/experimental_clfs/ExpEnsemble.cpp new file mode 100644 index 0000000..1c9dd22 --- /dev/null +++ b/src/experimental_clfs/ExpEnsemble.cpp @@ -0,0 +1,158 @@ +// *************************************************************** +// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez +// SPDX-FileType: SOURCE +// SPDX-License-Identifier: MIT +// *************************************************************** + +#include "ExpEnsemble.h" +#include "TensorUtils.hpp" + +namespace platform { + ExpEnsemble::ExpEnsemble() : semaphore_{ CountingSemaphore::getInstance() }, Boost(false) + { + validHyperparameters = {}; + } + // + // Parents + // + void ExpEnsemble::add_model(std::unique_ptr model) + { + models.push_back(std::move(model)); + n_models++; + } + void ExpEnsemble::remove_last_model() + { + models.pop_back(); + n_models--; + } + // + // Predict + // + torch::Tensor ExpEnsemble::predict(torch::Tensor& X) + { + auto X_ = TensorUtils::to_matrix(X); + torch::Tensor y = torch::tensor(predict(X_)); + return y; + } + torch::Tensor ExpEnsemble::predict_proba(torch::Tensor& X) + { + auto X_ = TensorUtils::to_matrix(X); + auto probabilities = predict_proba(X_); + auto n_samples = X.size(1); + int n_classes = probabilities[0].size(); + auto y = torch::zeros({ n_samples, n_classes }); + for (int i = 0; i < n_samples; i++) { + for (int j = 0; j < n_classes; j++) { + y[i][j] = probabilities[i][j]; + } + } + return y; + } + float ExpEnsemble::score(torch::Tensor& X, torch::Tensor& y) + { + auto X_ = TensorUtils::to_matrix(X); + auto y_ = TensorUtils::to_vector(y); + return score(X_, y_); + } + std::vector> ExpEnsemble::predict_proba(const std::vector>& test_data) + { + int test_size = test_data[0].size(); + int sample_size = test_data.size(); + auto probabilities = std::vector>(test_size, std::vector(getClassNumStates())); + int chunk_size = std::min(150, int(test_size / semaphore_.getMaxCount()) + 1); + std::vector threads; + auto worker = [&](const std::vector>& samples, int begin, int chunk, int sample_size, std::vector>& predictions) { + std::string threadName = "(V)PWorker-" + std::to_string(begin) + "-" + std::to_string(chunk); +#if defined(__linux__) + pthread_setname_np(pthread_self(), threadName.c_str()); +#else + pthread_setname_np(threadName.c_str()); +#endif + + std::vector instance(sample_size); + for (int sample = begin; sample < begin + chunk; ++sample) { + for (int feature = 0; feature < sample_size; ++feature) { + instance[feature] = samples[feature][sample]; + } + // predictions[sample] = aode_.predict_proba(instance); + } + semaphore_.release(); + }; + for (int begin = 0; begin < test_size; begin += chunk_size) { + int chunk = std::min(chunk_size, test_size - begin); + semaphore_.acquire(); + threads.emplace_back(worker, test_data, begin, chunk, sample_size, std::ref(probabilities)); + } + for (auto& thread : threads) { + thread.join(); + } + return probabilities; + } + std::vector ExpEnsemble::predict(std::vector>& test_data) + { + if (!fitted) { + throw std::logic_error(CLASSIFIER_NOT_FITTED); + } + auto probabilities = predict_proba(test_data); + std::vector predictions(probabilities.size(), 0); + + for (size_t i = 0; i < probabilities.size(); i++) { + predictions[i] = std::distance(probabilities[i].begin(), std::max_element(probabilities[i].begin(), probabilities[i].end())); + } + + return predictions; + } + float ExpEnsemble::score(std::vector>& test_data, std::vector& labels) + { + Timer timer; + timer.start(); + std::vector predictions = predict(test_data); + int correct = 0; + + for (size_t i = 0; i < predictions.size(); i++) { + if (predictions[i] == labels[i]) { + correct++; + } + } + if (debug) { + std::cout << "* Time to predict: " << timer.getDurationString() << std::endl; + } + return static_cast(correct) / predictions.size(); + } + + // + // statistics + // + int ExpEnsemble::getNumberOfNodes() const + { + if (models_.empty()) { + return 0; + } + return n_models * (models_.at(0)->getNFeatures() + 1); + } + int ExpEnsemble::getNumberOfEdges() const + { + if (models_.empty()) { + return 0; + } + return n_models * (2 * models_.at(0)->getNFeatures() - 1); + } + int ExpEnsemble::getNumberOfStates() const + { + if (models_.empty()) { + return 0; + } + auto states = models_.at(0)->getStates(); + int nFeatures = models_.at(0)->getNFeatures(); + return std::accumulate(states.begin(), states.end(), 0) * nFeatures * n_models; + } + int ExpEnsemble::getClassNumStates() const + { + if (models_.empty()) { + return 0; + } + return models_.at(0)->statesClass(); + } + + +} \ No newline at end of file diff --git a/src/experimental_clfs/ExpEnsemble.h b/src/experimental_clfs/ExpEnsemble.h new file mode 100644 index 0000000..2da97b9 --- /dev/null +++ b/src/experimental_clfs/ExpEnsemble.h @@ -0,0 +1,66 @@ +// *************************************************************** +// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez +// SPDX-FileType: SOURCE +// SPDX-License-Identifier: MIT +// *************************************************************** + +#ifndef EXPENSEMBLE_H +#define EXPENSEMBLE_H +#include +#include +#include +#include +#include +#include +#include +#include "common/Timer.hpp" +#include "CountingSemaphore.hpp" +#include "XSpode.hpp" + +namespace platform { + class ExpEnsemble : public bayesnet::Boost { + public: + ExpEnsemble(); + virtual ~ExpEnsemble() = default; + std::vector predict(std::vector>& X) override; + torch::Tensor predict(torch::Tensor& X) override; + torch::Tensor predict_proba(torch::Tensor& X) override; + std::vector predict_spode(std::vector>& test_data, int parent); + std::vector> predict_proba(const std::vector>& X); + float score(std::vector>& X, std::vector& y) override; + float score(torch::Tensor& X, torch::Tensor& y) override; + int getNumberOfNodes() const override; + int getNumberOfEdges() const override; + int getNumberOfStates() const override; + int getClassNumStates() const override; + std::vector show() const override { return {}; } + std::vector topological_order() override { return {}; } + std::string dump_cpt() const override { return ""; } + void setDebug(bool debug) { this->debug = debug; } + bayesnet::status_t getStatus() const override { return status; } + std::vector getNotes() const override { return notes; } + std::vector graph(const std::string& title = "") const override { return {}; } + protected: + void add_model(std::unique_ptr model); + void remove_last_model(); + bool debug = false; + std::vector > models_; + torch::Tensor weights_; + std::vector significanceModels_; + const std::string CLASSIFIER_NOT_FITTED = "Classifier has not been fitted"; + inline void normalize_weights(int num_instances) + { + double sum = weights_.sum().item(); + if (sum == 0) { + weights_ = torch::full({ num_instances }, 1.0); + } else { + for (int i = 0; i < weights_.size(0); ++i) { + weights_[i] = weights_[i].item() * num_instances / sum; + } + } + } + private: + CountingSemaphore& semaphore_; + }; +} +#endif // EXPENSEMBLE_H \ No newline at end of file diff --git a/src/experimental_clfs/TensorUtils.hpp b/src/experimental_clfs/TensorUtils.hpp new file mode 100644 index 0000000..6f09859 --- /dev/null +++ b/src/experimental_clfs/TensorUtils.hpp @@ -0,0 +1,51 @@ +#ifndef TENSORUTILS_HPP +#define TENSORUTILS_HPP +#include +#include +namespace platform { + class TensorUtils { + public: + static std::vector> to_matrix(const torch::Tensor& X) + { + // Ensure tensor is contiguous in memory + auto X_contig = X.contiguous(); + + // Access tensor data pointer directly + auto data_ptr = X_contig.data_ptr(); + + // IF you are using int64_t as the data type, use the following line + //auto data_ptr = X_contig.data_ptr(); + //std::vector> data(X.size(0), std::vector(X.size(1))); + + // Prepare output container + std::vector> data(X.size(0), std::vector(X.size(1))); + + // Fill the 2D vector in a single loop using pointer arithmetic + int rows = X.size(0); + int cols = X.size(1); + for (int i = 0; i < rows; ++i) { + std::copy(data_ptr + i * cols, data_ptr + (i + 1) * cols, data[i].begin()); + } + return data; + } + template + static std::vector to_vector(const torch::Tensor& y) + { + // Ensure the tensor is contiguous in memory + auto y_contig = y.contiguous(); + + // Access data pointer + auto data_ptr = y_contig.data_ptr(); + + // Prepare output container + std::vector data(y.size(0)); + + // Copy data efficiently + std::copy(data_ptr, data_ptr + y.size(0), data.begin()); + + return data; + } + }; +} + +#endif // TENSORUTILS_HPP \ No newline at end of file diff --git a/src/experimental_clfs/XA1DE.cpp b/src/experimental_clfs/XA1DE.cpp new file mode 100644 index 0000000..ba7dd70 --- /dev/null +++ b/src/experimental_clfs/XA1DE.cpp @@ -0,0 +1,20 @@ +// *************************************************************** +// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez +// SPDX-FileType: SOURCE +// SPDX-License-Identifier: MIT +// *************************************************************** + +#include "XA1DE.h" +#include "TensorUtils.hpp" + +namespace platform { + void XA1DE::trainModel(const torch::Tensor& weights, const bayesnet::Smoothing_t smoothing) + { + auto X = TensorUtils::to_matrix(dataset.slice(0, 0, dataset.size(0) - 1)); + auto y = TensorUtils::to_vector(dataset.index({ -1, "..." })); + int num_instances = X[0].size(); + weights_ = torch::full({ num_instances }, 1.0); + //normalize_weights(num_instances); + aode_.fit(X, y, features, className, states, weights_, true, smoothing); + } +} diff --git a/src/experimental_clfs/XA1DE.h b/src/experimental_clfs/XA1DE.h new file mode 100644 index 0000000..0c1aefc --- /dev/null +++ b/src/experimental_clfs/XA1DE.h @@ -0,0 +1,26 @@ +// *************************************************************** +// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez +// SPDX-FileType: SOURCE +// SPDX-License-Identifier: MIT +// *************************************************************** + +#ifndef XA1DE_H +#define XA1DE_H +#include "Xaode.hpp" +#include "ExpClf.h" +#include + +namespace platform { + class XA1DE : public ExpClf { + public: + XA1DE() = default; + virtual ~XA1DE() override = default; + std::string getVersion() override { return version; }; + protected: + void buildModel(const torch::Tensor& weights) override {}; + void trainModel(const torch::Tensor& weights, const bayesnet::Smoothing_t smoothing) override; + private: + std::string version = "1.0.0"; + }; +} +#endif // XA1DE_H \ No newline at end of file diff --git a/src/experimental_clfs/XBAODE.cpp b/src/experimental_clfs/XBAODE.cpp new file mode 100644 index 0000000..9fd29d5 --- /dev/null +++ b/src/experimental_clfs/XBAODE.cpp @@ -0,0 +1,183 @@ +// *************************************************************** +// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez +// SPDX-FileType: SOURCE +// SPDX-License-Identifier: MIT +// *************************************************************** +#include +#include +#include +#include +#include +#include "XBAODE.h" +#include "XSpode.hpp" +#include "TensorUtils.hpp" +#include + +namespace platform { + XBAODE::XBAODE() + { + validHyperparameters = { "alpha_block", "order", "convergence", "convergence_best", "bisection", "threshold", "maxTolerance", + "predict_voting", "select_features" }; + } + void XBAODE::add_model(std::unique_ptr model) + { + models.push_back(std::move(model)); + n_models++; + } + void XBAODE::remove_last_model() + { + models.pop_back(); + n_models--; + } + void XBAODE::trainModel(const torch::Tensor& weights, const bayesnet::Smoothing_t smoothing) + { + fitted = true; + X_train_ = TensorUtils::to_matrix(X_train); + y_train_ = TensorUtils::to_vector(y_train); + X_test_ = TensorUtils::to_matrix(X_test); + y_test_ = TensorUtils::to_vector(y_test); + maxTolerance = 3; + // + // Logging setup + // + // loguru::set_thread_name("XBAODE"); + // loguru::g_stderr_verbosity = loguru::Verbosity_OFF; + // loguru::add_file("XBAODE.log", loguru::Truncate, loguru::Verbosity_MAX); + + // Algorithm based on the adaboost algorithm for classification + // as explained in Ensemble methods (Zhi-Hua Zhou, 2012) + double alpha_t = 0; + weights_ = torch::full({ m }, 1.0 / static_cast(m), torch::kFloat64); // m initialized in Classifier.cc + significanceModels.resize(n, 0.0); // n initialized in Classifier.cc + bool finished = false; + std::vector featuresUsed; + n_models = 0; + std::unique_ptr model; + if (selectFeatures) { + featuresUsed = featureSelection(weights_); + for (const auto& parent : featuresUsed) { + model = std::unique_ptr(new XSpode(parent)); + model->fit(X_train_, y_train_, weights_, smoothing); + std::cout << model->getNFeatures() << std::endl; + add_model(std::move(model)); + } + notes.push_back("Used features in initialization: " + std::to_string(featuresUsed.size()) + " of " + std::to_string(features.size()) + " with " + select_features_algorithm); + auto ypred = ExpEnsemble::predict(X_train); + std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_); + // Update significance of the models + for (const auto& parent : featuresUsed) { + significanceModels_[parent] = alpha_t; + } + n_models = featuresUsed.size(); + // VLOG_SCOPE_F(1, "SelectFeatures. alpha_t: %f n_models: %d", alpha_t, n_models); + if (finished) { + return; + } + } + int numItemsPack = 0; // The counter of the models inserted in the current pack + // Variables to control the accuracy finish condition + double priorAccuracy = 0.0; + double improvement = 1.0; + double convergence_threshold = 1e-4; + int tolerance = 0; // number of times the accuracy is lower than the convergence_threshold + // Step 0: Set the finish condition + // epsilon sub t > 0.5 => inverse the weights policy + // validation error is not decreasing + // run out of features + bool ascending = order_algorithm == bayesnet::Orders.ASC; + std::mt19937 g{ 173 }; + while (!finished) { + // Step 1: Build ranking with mutual information + auto featureSelection = metrics.SelectKBestWeighted(weights_, ascending, n); // Get all the features sorted + if (order_algorithm == bayesnet::Orders.RAND) { + std::shuffle(featureSelection.begin(), featureSelection.end(), g); + } + // Remove used features + featureSelection.erase(remove_if(featureSelection.begin(), featureSelection.end(), [&](auto x) + { return std::find(featuresUsed.begin(), featuresUsed.end(), x) != featuresUsed.end();}), + featureSelection.end() + ); + int k = bisection ? pow(2, tolerance) : 1; + int counter = 0; // The model counter of the current pack + // VLOG_SCOPE_F(1, "counter=%d k=%d featureSelection.size: %zu", counter, k, featureSelection.size()); + while (counter++ < k && featureSelection.size() > 0) { + auto feature = featureSelection[0]; + featureSelection.erase(featureSelection.begin()); + model = std::unique_ptr(new XSpode(feature)); + model->fit(X_train_, y_train_, weights_, smoothing); + std::vector ypred; + if (alpha_block) { + // + // Compute the prediction with the current ensemble + model + // + // Add the model to the ensemble + significanceModels[feature] = 1.0; + add_model(std::move(model)); + // Compute the prediction + ypred = ExpEnsemble::predict(X_train_); + // Remove the model from the ensemble + significanceModels[feature] = 0.0; + model = std::move(models_.back()); + remove_last_model(); + } else { + ypred = model->predict(X_train_); + } + // Step 3.1: Compute the classifier amout of say + auto ypred_t = torch::tensor(ypred); + std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred_t, weights_); + // Step 3.4: Store classifier and its accuracy to weigh its future vote + numItemsPack++; + featuresUsed.push_back(feature); + add_model(std::move(model)); + significanceModels[feature] = alpha_t; + // VLOG_SCOPE_F(2, "finished: %d numItemsPack: %d n_models: %d featuresUsed: %zu", finished, numItemsPack, n_models, featuresUsed.size()); + } // End of the pack + if (convergence && !finished) { + auto y_val_predict = ExpEnsemble::predict(X_test); + double accuracy = (y_val_predict == y_test).sum().item() / (double)y_test.size(0); + if (priorAccuracy == 0) { + priorAccuracy = accuracy; + } else { + improvement = accuracy - priorAccuracy; + } + if (improvement < convergence_threshold) { + // VLOG_SCOPE_F(3, " (improvement=threshold) Reset. tolerance: %d numItemsPack: %d improvement: %f prior: %f current: %f", tolerance, numItemsPack, improvement, priorAccuracy, accuracy); + tolerance = 0; // Reset the counter if the model performs better + numItemsPack = 0; + } + if (convergence_best) { + // Keep the best accuracy until now as the prior accuracy + priorAccuracy = std::max(accuracy, priorAccuracy); + } else { + // Keep the last accuray obtained as the prior accuracy + priorAccuracy = accuracy; + } + } + // VLOG_SCOPE_F(1, "tolerance: %d featuresUsed.size: %zu features.size: %zu", tolerance, featuresUsed.size(), features.size()); + finished = finished || tolerance > maxTolerance || featuresUsed.size() == features.size(); + } + if (tolerance > maxTolerance) { + if (numItemsPack < n_models) { + notes.push_back("Convergence threshold reached & " + std::to_string(numItemsPack) + " models eliminated"); + // VLOG_SCOPE_F(4, "Convergence threshold reached & %d models eliminated of %d", numItemsPack, n_models); + for (int i = featuresUsed.size() - 1; i >= featuresUsed.size() - numItemsPack; --i) { + remove_last_model(); + significanceModels[featuresUsed[i]] = 0.0; + } + // VLOG_SCOPE_F(4, "*Convergence threshold %d models left & %d features used.", n_models, featuresUsed.size()); + } else { + notes.push_back("Convergence threshold reached & 0 models eliminated"); + // VLOG_SCOPE_F(4, "Convergence threshold reached & 0 models eliminated n_models=%d numItemsPack=%d", n_models, numItemsPack); + } + } + if (featuresUsed.size() != features.size()) { + notes.push_back("Used features in train: " + std::to_string(featuresUsed.size()) + " of " + std::to_string(features.size())); + status = bayesnet::WARNING; + } + notes.push_back("Number of models: " + std::to_string(n_models)); + return; + } +} \ No newline at end of file diff --git a/src/experimental_clfs/XBAODE.h b/src/experimental_clfs/XBAODE.h new file mode 100644 index 0000000..e0fb33e --- /dev/null +++ b/src/experimental_clfs/XBAODE.h @@ -0,0 +1,35 @@ +// *************************************************************** +// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez +// SPDX-FileType: SOURCE +// SPDX-License-Identifier: MIT +// *************************************************************** + +#ifndef XBAODE_H +#define XBAODE_H +#include +#include +#include +#include +#include +#include "common/Timer.hpp" +#include "ExpEnsemble.h" + +namespace platform { + class XBAODE : public Boost { + + // Hay que hacer un vector de modelos entrenados y hacer un predict ensemble con todos ellos + // Probar XA1DE con smooth original y laplace y comprobar diferencias si se pasan pesos a 1 o a 1/m + public: + XBAODE(); + std::string getVersion() override { return version; }; + protected: + void trainModel(const torch::Tensor& weights, const bayesnet::Smoothing_t smoothing) override; + private: + void add_model(std::unique_ptr model); + void remove_last_model(); + std::vector> X_train_, X_test_; + std::vector y_train_, y_test_; + std::string version = "0.9.7"; + }; +} +#endif // XBAODE_H \ No newline at end of file diff --git a/src/experimental_clfs/XSpode.hpp b/src/experimental_clfs/XSpode.hpp new file mode 100644 index 0000000..42c7b49 --- /dev/null +++ b/src/experimental_clfs/XSpode.hpp @@ -0,0 +1,436 @@ +#ifndef XSPODE_H +#define XSPODE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "CountingSemaphore.hpp" + + +namespace platform { + + class XSpode : public bayesnet::Classifier { + public: + // -------------------------------------- + // Constructor + // + // Supply which feature index is the single super-parent (“spIndex”). + // -------------------------------------- + explicit XSpode(int spIndex) + : superParent_{ spIndex }, + nFeatures_{ 0 }, + statesClass_{ 0 }, + fitted_{ false }, + alpha_{ 1.0 }, + initializer_{ 1.0 }, + semaphore_{ CountingSemaphore::getInstance() } : bayesnet::Classifier(bayesnet::Network()) + { + } + + // -------------------------------------- + // fit + // -------------------------------------- + // + // Trains the SPODE given data: + // X: X[f][n] is the f-th feature value for instance n + // y: y[n] is the class value for instance n + // states: a map or array that tells how many distinct states each feature and the class can take + // + // For example, states_.back() is the number of class states, + // and states_[f] is the number of distinct values for feature f. + // + // We only store conditional probabilities for: + // p(x_sp| c) (the super-parent feature) + // p(x_child| c, x_sp) for all child ≠ sp + // + // The “weights” can be a vector of per-instance weights; if not used, pass them as 1.0. + // -------------------------------------- + void fit(const std::vector>& X, + const std::vector& y, + const torch::Tensor& weights, const bayesnet::Smoothing_t smoothing) + { + int numInstances = static_cast(y.size()); + nFeatures_ = static_cast(X.size()); + + // Derive the number of states for each feature and for the class. + // (This is just one approach; adapt to match your environment.) + // Here, we assume the user also gave us the total #states per feature in e.g. statesMap. + // We'll simply reconstruct the integer states_ array. The last entry is statesClass_. + states_.resize(nFeatures_); + for (int f = 0; f < nFeatures_; f++) { + // Suppose you look up in “statesMap” by the feature name, or read directly from X. + // We'll assume states_[f] = max value in X[f] + 1. + auto maxIt = std::max_element(X[f].begin(), X[f].end()); + states_[f] = (*maxIt) + 1; + } + // For the class: states_.back() = max(y)+1 + statesClass_ = (*std::max_element(y.begin(), y.end())) + 1; + + // Initialize counts + classCounts_.resize(statesClass_, 0.0); + // p(x_sp = spVal | c) + // We'll store these counts in spFeatureCounts_[spVal * statesClass_ + c]. + spFeatureCounts_.resize(states_[superParent_] * statesClass_, 0.0); + + // For each child ≠ sp, we store p(childVal| c, spVal) in a separate block of childCounts_. + // childCounts_ will be sized as sum_{child≠sp} (states_[child] * statesClass_ * states_[sp]). + // We also need an offset for each child to index into childCounts_. + childOffsets_.resize(nFeatures_, -1); + int totalSize = 0; + for (int f = 0; f < nFeatures_; f++) { + if (f == superParent_) continue; // skip sp + childOffsets_[f] = totalSize; + // block size for this child's counts: states_[f] * statesClass_ * states_[superParent_] + totalSize += (states_[f] * statesClass_ * states_[superParent_]); + } + childCounts_.resize(totalSize, 0.0); + + // Accumulate raw counts + for (int n = 0; n < numInstances; n++) { + std::vector instance(nFeatures_ + 1); + for (int f = 0; f < nFeatures_; f++) { + instance[f] = X[f][n]; + } + instance[nFeatures_] = y[n]; + addSample(instance, weights[n].item()); + } + + switch (smoothing) { + case bayesnet::Smoothing_t::ORIGINAL: + alpha_ = 1.0 / numInstances; + break; + case bayesnet::Smoothing_t::LAPLACE: + alpha_ = 1.0; + break; + default: + alpha_ = 0.0; // No smoothing + } + initializer_ = initializer_ = std::numeric_limits::max() / (nFeatures_ * nFeatures_); + // Convert raw counts to probabilities + computeProbabilities(); + fitted_ = true; + } + + // -------------------------------------- + // addSample (only valid in COUNTS mode) + // -------------------------------------- + // + // instance has size nFeatures_ + 1, with the class at the end. + // We add 1 to the appropriate counters for each (c, superParentVal, childVal). + // + void addSample(const std::vector& instance, double weight) + { + if (weight <= 0.0) return; + + int c = instance.back(); + // (A) increment classCounts + classCounts_[c] += weight; + + // (B) increment super-parent counts => p(x_sp | c) + int spVal = instance[superParent_]; + spFeatureCounts_[spVal * statesClass_ + c] += weight; + + // (C) increment child counts => p(childVal | c, x_sp) + for (int f = 0; f < nFeatures_; f++) { + if (f == superParent_) continue; + int childVal = instance[f]; + int offset = childOffsets_[f]; + // Compute index in childCounts_. + // Layout: [ offset + (spVal * states_[f] + childVal) * statesClass_ + c ] + int blockSize = states_[f] * statesClass_; + int idx = offset + spVal * blockSize + childVal * statesClass_ + c; + childCounts_[idx] += weight; + } + } + + // -------------------------------------- + // computeProbabilities + // -------------------------------------- + // + // Once all samples are added in COUNTS mode, call this to: + // p(c) + // p(x_sp = spVal | c) + // p(x_child = v | c, x_sp = s_sp) + // + // We store them in the corresponding *Probs_ arrays for inference. + // -------------------------------------- + void computeProbabilities() + { + double totalCount = std::accumulate(classCounts_.begin(), classCounts_.end(), 0.0); + + // p(c) => classPriors_ + classPriors_.resize(statesClass_, 0.0); + if (totalCount <= 0.0) { + // fallback => uniform + double unif = 1.0 / static_cast(statesClass_); + for (int c = 0; c < statesClass_; c++) { + classPriors_[c] = unif; + } + } else { + for (int c = 0; c < statesClass_; c++) { + classPriors_[c] = (classCounts_[c] + alpha_) + / (totalCount + alpha_ * statesClass_); + } + } + + // p(x_sp | c) + spFeatureProbs_.resize(spFeatureCounts_.size()); + // denominator for spVal * statesClass_ + c is just classCounts_[c] + alpha_ * (#states of sp) + int spCard = states_[superParent_]; + for (int spVal = 0; spVal < spCard; spVal++) { + for (int c = 0; c < statesClass_; c++) { + double denom = classCounts_[c] + alpha_ * spCard; + double num = spFeatureCounts_[spVal * statesClass_ + c] + alpha_; + spFeatureProbs_[spVal * statesClass_ + c] = (denom <= 0.0 ? 0.0 : num / denom); + } + } + + // p(x_child | c, x_sp) + childProbs_.resize(childCounts_.size()); + for (int f = 0; f < nFeatures_; f++) { + if (f == superParent_) continue; + int offset = childOffsets_[f]; + int childCard = states_[f]; + + // For each spVal, c, childVal in childCounts_: + for (int spVal = 0; spVal < spCard; spVal++) { + for (int childVal = 0; childVal < childCard; childVal++) { + for (int c = 0; c < statesClass_; c++) { + int idx = offset + spVal * (childCard * statesClass_) + + childVal * statesClass_ + + c; + + double num = childCounts_[idx] + alpha_; + // denominator = spFeatureCounts_[spVal * statesClass_ + c] + alpha_ * (#states of child) + double denom = spFeatureCounts_[spVal * statesClass_ + c] + + alpha_ * childCard; + childProbs_[idx] = (denom <= 0.0 ? 0.0 : num / denom); + } + } + } + } + + } + + // -------------------------------------- + // predict_proba + // -------------------------------------- + // + // For a single instance x of dimension nFeatures_: + // P(c | x) ∝ p(c) × p(x_sp | c) × ∏(child ≠ sp) p(x_child | c, x_sp). + // + // Then we normalize the result. + // -------------------------------------- + std::vector predict_proba(const std::vector& instance) const + { + std::vector probs(statesClass_, 0.0); + + // Multiply p(c) × p(x_sp | c) + int spVal = instance[superParent_]; + for (int c = 0; c < statesClass_; c++) { + double pc = classPriors_[c]; + double pSpC = spFeatureProbs_[spVal * statesClass_ + c]; + probs[c] = pc * pSpC * initializer_; + } + + // Multiply by each child’s probability p(x_child | c, x_sp) + for (int feature = 0; feature < nFeatures_; feature++) { + if (feature == superParent_) continue; // skip sp + int sf = instance[feature]; + int offset = childOffsets_[feature]; + int childCard = states_[feature]; // not used directly, but for clarity + // Index into childProbs_ = offset + spVal*(childCard*statesClass_) + childVal*statesClass_ + c + int base = offset + spVal * (childCard * statesClass_) + sf * statesClass_; + for (int c = 0; c < statesClass_; c++) { + probs[c] *= childProbs_[base + c]; + } + } + + // Normalize + normalize(probs); + return probs; + } + std::vector> predict_proba(const std::vector>& test_data) + { + int test_size = test_data[0].size(); + int sample_size = test_data.size(); + auto probabilities = std::vector>(test_size, std::vector(statesClass_)); + + int chunk_size = std::min(150, int(test_size / semaphore_.getMaxCount()) + 1); + std::vector threads; + auto worker = [&](const std::vector>& samples, int begin, int chunk, int sample_size, std::vector>& predictions) { + std::string threadName = "(V)PWorker-" + std::to_string(begin) + "-" + std::to_string(chunk); +#if defined(__linux__) + pthread_setname_np(pthread_self(), threadName.c_str()); +#else + pthread_setname_np(threadName.c_str()); +#endif + + std::vector instance(sample_size); + for (int sample = begin; sample < begin + chunk; ++sample) { + for (int feature = 0; feature < sample_size; ++feature) { + instance[feature] = samples[feature][sample]; + } + predictions[sample] = predict_proba(instance); + } + semaphore_.release(); + }; + for (int begin = 0; begin < test_size; begin += chunk_size) { + int chunk = std::min(chunk_size, test_size - begin); + semaphore_.acquire(); + threads.emplace_back(worker, test_data, begin, chunk, sample_size, std::ref(probabilities)); + } + for (auto& thread : threads) { + thread.join(); + } + return probabilities; + } + + // -------------------------------------- + // predict + // -------------------------------------- + // + // Return the class argmax( P(c|x) ). + // -------------------------------------- + int predict(const std::vector& instance) const + { + auto p = predict_proba(instance); + return static_cast(std::distance(p.begin(), + std::max_element(p.begin(), p.end()))); + } + std::vector predict(std::vector>& test_data) + { + if (!fitted_) { + throw std::logic_error(CLASSIFIER_NOT_FITTED); + } + auto probabilities = predict_proba(test_data); + std::vector predictions(probabilities.size(), 0); + + for (size_t i = 0; i < probabilities.size(); i++) { + predictions[i] = std::distance(probabilities[i].begin(), std::max_element(probabilities[i].begin(), probabilities[i].end())); + } + + return predictions; + } + + // -------------------------------------- + // Utility: normalize + // -------------------------------------- + void normalize(std::vector& v) const + { + double sum = 0.0; + for (auto val : v) { sum += val; } + if (sum <= 0.0) { + return; + } + for (auto& val : v) { + val /= sum; + } + } + + // -------------------------------------- + // debug printing, if desired + // -------------------------------------- + std::string to_string() const + { + std::ostringstream oss; + oss << "---- SPODE Model ----\n" + << "nFeatures_ = " << nFeatures_ << "\n" + << "superParent_ = " << superParent_ << "\n" + << "statesClass_ = " << statesClass_ << "\n" + << "\n"; + + oss << "States: ["; + for (int s : states_) oss << s << " "; + oss << "]\n"; + + oss << "classCounts_: ["; + for (double c : classCounts_) oss << c << " "; + oss << "]\n"; + + oss << "classPriors_: ["; + for (double c : classPriors_) oss << c << " "; + oss << "]\n"; + + oss << "spFeatureCounts_: size = " << spFeatureCounts_.size() << "\n["; + for (double c : spFeatureCounts_) oss << c << " "; + oss << "]\n"; + + oss << "spFeatureProbs_: size = " << spFeatureProbs_.size() << "\n["; + for (double c : spFeatureProbs_) oss << c << " "; + oss << "]\n"; + + oss << "childCounts_: size = " << childCounts_.size() << "\n["; + for (double cc : childCounts_) oss << cc << " "; + oss << "]\n"; + + oss << "childProbs_: size = " << childProbs_.size() << "\n["; + for (double cp : childProbs_) oss << cp << " "; + oss << "]\n"; + + oss << "childOffsets_: ["; + for (int co : childOffsets_) oss << co << " "; + oss << "]\n"; + + oss << "---------------------\n"; + return oss.str(); + } + int statesClass() const { return statesClass_; } + int getNFeatures() const { return nFeatures_; } + int getNumberOfStates() const + { + return std::accumulate(states_.begin(), states_.end(), 0) * nFeatures_; + } + int getNumberOfEdges() const + { + return nFeatures_ * (2 * nFeatures_ - 1); + } + std::vector& getStates() { return states_; } + + private: + // -------------------------------------- + // MEMBERS + // -------------------------------------- + + int superParent_; // which feature is the single super-parent + int nFeatures_; + int statesClass_; + bool fitted_ = false; + std::vector states_; // [states_feat0, ..., states_feat(N-1)] (class not included in this array) + + const std::string CLASSIFIER_NOT_FITTED = "Classifier has not been fitted"; + + // Class counts + std::vector classCounts_; // [c], accumulative + std::vector classPriors_; // [c], after normalization + + // For p(x_sp = spVal | c) + std::vector spFeatureCounts_; // [spVal * statesClass_ + c] + std::vector spFeatureProbs_; // same shape, after normalization + + // For p(x_child = childVal | x_sp = spVal, c) + // childCounts_ is big enough to hold all child features except sp: + // For each child f, we store childOffsets_[f] as the start index, then + // childVal, spVal, c => the data. + std::vector childCounts_; + std::vector childProbs_; + std::vector childOffsets_; + + double alpha_ = 1.0; + double initializer_; // for numerical stability + CountingSemaphore& semaphore_; + }; + +} // namespace platform + +#endif // XSPODE_H diff --git a/src/experimental_clfs/Xaode.hpp b/src/experimental_clfs/Xaode.hpp new file mode 100644 index 0000000..76d2bf6 --- /dev/null +++ b/src/experimental_clfs/Xaode.hpp @@ -0,0 +1,478 @@ +// *************************************************************** +// SPDX-FileCopyrightText: Copyright 2025 Ricardo Montañana Gómez +// SPDX-FileType: SOURCE +// SPDX-License-Identifier: MIT +// *************************************************************** +// Based on the Geoff. I. Webb A1DE java algorithm +// https://weka.sourceforge.io/packageMetaData/AnDE/Latest.html + +#ifndef XAODE_H +#define XAODE_H +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace platform { + class Xaode { + public: + // ------------------------------------------------------- + // The Xaode can be EMPTY (just created), in COUNTS mode (accumulating raw counts) + // or PROBS mode (storing conditional probabilities). + enum class MatrixState { + EMPTY, + COUNTS, + PROBS + }; + std::vector significance_models_; + Xaode() : nFeatures_{ 0 }, statesClass_{ 0 }, matrixState_{ MatrixState::EMPTY } {} + // ------------------------------------------------------- + // fit + // ------------------------------------------------------- + // + // Classifiers interface + // all parameter decide if the model is initialized with all the parents active or none of them + // + // states.size() = nFeatures + 1, + // where states.back() = number of class states. + // + // We'll store: + // 1) p(x_i=si | c) in classFeatureProbs_ + // 2) p(x_j=sj | c, x_i=si) in data_, with i i is "superparent," j is "child." + // + // Internally, in COUNTS mode, data_ accumulates raw counts, then + // computeProbabilities(...) normalizes them into conditionals. + void fit(std::vector>& X, std::vector& y, const std::vector& features, const std::string& className, std::map>& states, const torch::Tensor& weights, const bool all_parents, const bayesnet::Smoothing_t smoothing) + { + int num_instances = X[0].size(); + nFeatures_ = X.size(); + + significance_models_.resize(nFeatures_, (all_parents ? 1.0 : 0.0)); + for (int i = 0; i < nFeatures_; i++) { + if (all_parents) active_parents.push_back(i); + states_.push_back(*max_element(X[i].begin(), X[i].end()) + 1); + } + states_.push_back(*max_element(y.begin(), y.end()) + 1); + // + statesClass_ = states_.back(); + classCounts_.resize(statesClass_, 0.0); + classPriors_.resize(statesClass_, 0.0); + // + // Initialize data structures + // + active_parents.resize(nFeatures_); + int totalStates = std::accumulate(states_.begin(), states_.end(), 0) - statesClass_; + + // For p(x_i=si | c), we store them in a 1D array classFeatureProbs_ after we compute. + // We'll need the offsets for each feature i in featureClassOffset_. + featureClassOffset_.resize(nFeatures_); + // We'll store p(x_child=sj | c, x_sp=si) for each pair (i instance(nFeatures_ + 1); + for (int n_instance = 0; n_instance < num_instances; n_instance++) { + for (int feature = 0; feature < nFeatures_; feature++) { + instance[feature] = X[feature][n_instance]; + } + instance[nFeatures_] = y[n_instance]; + addSample(instance, weights[n_instance].item()); + } + switch (smoothing) { + case bayesnet::Smoothing_t::ORIGINAL: + alpha_ = 1.0 / num_instances; + break; + case bayesnet::Smoothing_t::LAPLACE: + alpha_ = 1.0; + break; + default: + alpha_ = 0.0; // No smoothing + } + initializer_ = std::numeric_limits::max() / (nFeatures_ * nFeatures_); + computeProbabilities(); + } + std::string to_string() const + { + std::ostringstream ostream; + ostream << "-------- Xaode.status --------" << std::endl + << "- nFeatures = " << nFeatures_ << std::endl + << "- statesClass = " << statesClass_ << std::endl + << "- matrixState = " << (matrixState_ == MatrixState::COUNTS ? "COUNTS" : "PROBS") << std::endl; + ostream << "- states: size: " << states_.size() << std::endl; + for (int s : states_) ostream << s << " "; ostream << std::endl; + ostream << "- classCounts: size: " << classCounts_.size() << std::endl; + for (double cc : classCounts_) ostream << cc << " "; ostream << std::endl; + ostream << "- classPriors: size: " << classPriors_.size() << std::endl; + for (double cp : classPriors_) ostream << cp << " "; ostream << std::endl; + ostream << "- classFeatureCounts: size: " << classFeatureCounts_.size() << std::endl; + for (double cfc : classFeatureCounts_) ostream << cfc << " "; ostream << std::endl; + ostream << "- classFeatureProbs: size: " << classFeatureProbs_.size() << std::endl; + for (double cfp : classFeatureProbs_) ostream << cfp << " "; ostream << std::endl; + ostream << "- featureClassOffset: size: " << featureClassOffset_.size() << std::endl; + for (int f : featureClassOffset_) ostream << f << " "; ostream << std::endl; + ostream << "- pairOffset_: size: " << pairOffset_.size() << std::endl; + for (int p : pairOffset_) ostream << p << " "; ostream << std::endl; + ostream << "- data: size: " << data_.size() << std::endl; + for (double d : data_) ostream << d << " "; ostream << std::endl; + ostream << "- dataOpp: size: " << dataOpp_.size() << std::endl; + for (double d : dataOpp_) ostream << d << " "; ostream << std::endl; + ostream << "--------------------------------" << std::endl; + std::string output = ostream.str(); + return output; + } + // ------------------------------------------------------- + // addSample (only in COUNTS mode) + // ------------------------------------------------------- + // + // instance should have the class at the end. + // + void addSample(const std::vector& instance, double weight) + { + // + // (A) increment classCounts_ + // (B) increment feature–class counts => for p(x_i|c) + // (C) increment pair (superparent= i, child= j) counts => data_ + // + int c = instance.back(); + if (weight <= 0.0) { + return; + } + // (A) increment classCounts_ + classCounts_[c] += weight; + + // (B,C) + // We'll store raw counts now and turn them into p(child| c, superparent) later. + int idx, fcIndex, sp, sc, i_offset; + for (int parent = 0; parent < nFeatures_; ++parent) { + sp = instance[parent]; + // (B) increment feature–class counts => for p(x_i|c) + fcIndex = (featureClassOffset_[parent] + sp) * statesClass_ + c; + classFeatureCounts_[fcIndex] += weight; + // (C) increment pair (superparent= i, child= j) counts => data_ + i_offset = pairOffset_[featureClassOffset_[parent] + sp]; + for (int child = 0; child < parent; ++child) { + sc = instance[child]; + idx = (i_offset + featureClassOffset_[child] + sc) * statesClass_ + c; + data_[idx] += weight; + } + } + } + // ------------------------------------------------------- + // computeProbabilities + // ------------------------------------------------------- + // + // Once all samples are added in COUNTS mode, call this to: + // 1) compute p(c) => classPriors_ + // 2) compute p(x_i=si | c) => classFeatureProbs_ + // 3) compute p(x_j=sj | c, x_i=si) => data_ (for ij) + // + void computeProbabilities() + { + if (matrixState_ != MatrixState::COUNTS) { + throw std::logic_error("computeProbabilities: must be in COUNTS mode."); + } + double totalCount = std::accumulate(classCounts_.begin(), classCounts_.end(), 0.0); + // (1) p(c) + if (totalCount <= 0.0) { + // fallback => uniform + double unif = 1.0 / statesClass_; + for (int c = 0; c < statesClass_; ++c) { + classPriors_[c] = unif; + } + } else { + for (int c = 0; c < statesClass_; ++c) { + classPriors_[c] = (classCounts_[c] + alpha_) / (totalCount + alpha_ * statesClass_); + } + } + // (2) p(x_i=si | c) => classFeatureProbs_ + int idx, sf; + double denom; + for (int feature = 0; feature < nFeatures_; ++feature) { + sf = states_[feature]; + for (int c = 0; c < statesClass_; ++c) { + denom = classCounts_[c] + alpha_ * sf; + for (int sf_value = 0; sf_value < sf; ++sf_value) { + idx = (featureClassOffset_[feature] + sf_value) * statesClass_ + c; + classFeatureProbs_[idx] = (classFeatureCounts_[idx] + alpha_) / denom; + } + } + } + // getCountFromTable(int classVal, int pIndex, int childIndex) + // (3) p(x_c=sc | c, x_p=sp) => data_(parent,sp,child,sc,c) + // (3) p(x_p=sp | c, x_c=sc) => dataOpp_(child,sc,parent,sp,c) + // C(x_c, x_p, c) + alpha_ + // P(x_p | x_c, c) = ----------------------------------- + // C(x_c, c) + alpha_ + double pcc_count, pc_count, cc_count; + double conditionalProb, oppositeCondProb; + int part1, part2, p1, part2_class, p1_class; + for (int parent = 1; parent < nFeatures_; ++parent) { + for (int sp = 0; sp < states_[parent]; ++sp) { + p1 = featureClassOffset_[parent] + sp; + part1 = pairOffset_[p1]; + p1_class = p1 * statesClass_; + for (int child = 0; child < parent; ++child) { + for (int sc = 0; sc < states_[child]; ++sc) { + part2 = featureClassOffset_[child] + sc; + part2_class = part2 * statesClass_; + for (int c = 0; c < statesClass_; c++) { + idx = (part1 + part2) * statesClass_ + c; + // Parent, Child, Class Count + pcc_count = data_[idx]; + // Parent, Class count + pc_count = classFeatureCounts_[p1_class + c]; + // Child, Class count + cc_count = classFeatureCounts_[part2_class + c]; + // p(x_c=sc | c, x_p=sp) + conditionalProb = (pcc_count + alpha_) / (pc_count + alpha_ * states_[child]); + data_[idx] = conditionalProb; + // p(x_p=sp | c, x_c=sc) + oppositeCondProb = (pcc_count + alpha_) / (cc_count + alpha_ * states_[parent]); + dataOpp_[idx] = oppositeCondProb; + } + } + } + } + } + matrixState_ = MatrixState::PROBS; + } + // ------------------------------------------------------- + // predict_proba_spode + // ------------------------------------------------------- + // + // Single-superparent approach: + // P(c | x) ∝ p(c) * p(x_sp| c) * ∏_{i≠sp} p(x_i | c, x_sp) + // + // 'instance' should have size == nFeatures_ (no class). + // sp in [0..nFeatures_). + // We multiply p(c) * p(x_sp| c) * p(x_i| c, x_sp). + // Then normalize the distribution. + // + std::vector predict_proba_spode(const std::vector& instance, int parent) + { + // accumulates posterior probabilities for each class + auto probs = std::vector(statesClass_); + auto spodeProbs = std::vector(statesClass_, 0.0); + if (std::find(active_parents.begin(), active_parents.end(), parent) == active_parents.end()) { + return spodeProbs; + } + // Initialize the probabilities with the feature|class probabilities x class priors + int localOffset; + int sp = instance[parent]; + localOffset = (featureClassOffset_[parent] + sp) * statesClass_; + for (int c = 0; c < statesClass_; ++c) { + spodeProbs[c] = classFeatureProbs_[localOffset + c] * classPriors_[c] * initializer_; + } + int idx, base, sc, parent_offset; + for (int child = 0; child < nFeatures_; ++child) { + if (child == parent) { + continue; + } + sc = instance[child]; + if (child > parent) { + parent_offset = pairOffset_[featureClassOffset_[child] + sc]; + base = (parent_offset + featureClassOffset_[parent] + sp) * statesClass_; + } else { + parent_offset = pairOffset_[featureClassOffset_[parent] + sp]; + base = (parent_offset + featureClassOffset_[child] + sc) * statesClass_; + } + for (int c = 0; c < statesClass_; ++c) { + /* + * The probability P(xc|xp,c) is stored in dataOpp_, and + * the probability P(xp|xc,c) is stored in data_ + */ + idx = base + c; + double factor = child > parent ? dataOpp_[idx] : data_[idx]; + // double factor = data_[idx]; + spodeProbs[c] *= factor; + } + } + // Normalize the probabilities + normalize(spodeProbs); + return spodeProbs; + } + int predict_spode(const std::vector& instance, int parent) + { + auto probs = predict_proba_spode(instance, parent); + return (int)std::distance(probs.begin(), std::max_element(probs.begin(), probs.end())); + } + // ------------------------------------------------------- + // predict_proba + // ------------------------------------------------------- + // + // P(c | x) ∝ p(c) * ∏_{i} p(x_i | c) * ∏_{i predict_proba(const std::vector& instance) + { + // accumulates posterior probabilities for each class + auto probs = std::vector(statesClass_); + auto spodeProbs = std::vector>(nFeatures_, std::vector(statesClass_)); + // Initialize the probabilities with the feature|class probabilities + int localOffset; + for (int feature = 0; feature < nFeatures_; ++feature) { + // if feature is not in the active_parents, skip it + if (std::find(active_parents.begin(), active_parents.end(), feature) == active_parents.end()) { + continue; + } + localOffset = (featureClassOffset_[feature] + instance[feature]) * statesClass_; + for (int c = 0; c < statesClass_; ++c) { + spodeProbs[feature][c] = classFeatureProbs_[localOffset + c] * classPriors_[c] * initializer_; + } + } + int idx, base, sp, sc, parent_offset; + for (int parent = 1; parent < nFeatures_; ++parent) { + // if parent is not in the active_parents, skip it + if (std::find(active_parents.begin(), active_parents.end(), parent) == active_parents.end()) { + continue; + } + sp = instance[parent]; + parent_offset = pairOffset_[featureClassOffset_[parent] + sp]; + for (int child = 0; child < parent; ++child) { + sc = instance[child]; + if (child > parent) { + parent_offset = pairOffset_[featureClassOffset_[child] + sc]; + base = (parent_offset + featureClassOffset_[parent] + sp) * statesClass_; + } else { + parent_offset = pairOffset_[featureClassOffset_[parent] + sp]; + base = (parent_offset + featureClassOffset_[child] + sc) * statesClass_; + } + for (int c = 0; c < statesClass_; ++c) { + /* + * The probability P(xc|xp,c) is stored in dataOpp_, and + * the probability P(xp|xc,c) is stored in data_ + */ + idx = base + c; + double factor_child = child > parent ? data_[idx] : dataOpp_[idx]; + double factor_parent = child > parent ? dataOpp_[idx] : data_[idx]; + spodeProbs[child][c] *= factor_child; + spodeProbs[parent][c] *= factor_parent; + } + } + } + /* add all the probabilities for each class */ + for (int c = 0; c < statesClass_; ++c) { + for (int i = 0; i < nFeatures_; ++i) { + probs[c] += spodeProbs[i][c] * significance_models_[i]; + } + } + // Normalize the probabilities + normalize(probs); + return probs; + } + void normalize(std::vector& probs) const + { + double sum = std::accumulate(probs.begin(), probs.end(), 0.0); + if (std::isnan(sum)) { + throw std::runtime_error("Can't normalize array. Sum is NaN."); + } + if (sum == 0) { + return; + } + for (int i = 0; i < (int)probs.size(); i++) { + probs[i] /= sum; + } + } + // Returns current mode: INIT, COUNTS or PROBS + MatrixState state() const + { + return matrixState_; + } + int statesClass() const + { + return statesClass_; + } + int nFeatures() const + { + return nFeatures_; + } + int getNumberOfStates() const + { + return std::accumulate(states_.begin(), states_.end(), 0) * nFeatures_; + } + int getNumberOfEdges() const + { + return nFeatures_ * (2 * nFeatures_ - 1); + } + int getNumberOfNodes() const + { + return (nFeatures_ + 1) * nFeatures_; + } + void add_active_parent(int active_parent) + { + active_parents.push_back(active_parent); + } + void remove_last_parent() + { + active_parents.pop_back(); + } + + private: + // ----------- + // MEMBER DATA + // ----------- + std::vector states_; // [states_feat0, ..., states_feat(n-1), statesClass_] + int nFeatures_; + int statesClass_; + + // data_ means p(child=sj | c, superparent= si) after normalization. + // But in COUNTS mode, it accumulates raw counts. + std::vector pairOffset_; + // data_ stores p(child=sj | c, superparent=si) for each pair (i data_; + // dataOpp_ stores p(superparent=si | c, child=sj) for each pair (i dataOpp_; + + // classCounts_[c] + std::vector classCounts_; + std::vector classPriors_; // => p(c) + + // For p(x_i=si| c), we store counts in classFeatureCounts_ => offset by featureClassOffset_[i] + std::vector featureClassOffset_; + std::vector classFeatureCounts_; + std::vector classFeatureProbs_; // => p(x_i=si | c) after normalization + + MatrixState matrixState_; + + double alpha_ = 1.0; // Laplace smoothing + double initializer_ = 1.0; + std::vector active_parents; + }; +} +#endif // XAODE_H \ No newline at end of file diff --git a/src/grid/GridBase.cpp b/src/grid/GridBase.cpp index 3626db7..85bb906 100644 --- a/src/grid/GridBase.cpp +++ b/src/grid/GridBase.cpp @@ -2,9 +2,10 @@ #include #include "common/DotEnv.h" #include "common/Paths.h" -#include "common/DotEnv.h" +#include "common/Colors.h" #include "GridBase.h" + namespace platform { GridBase::GridBase(struct ConfigGrid& config) @@ -63,13 +64,11 @@ namespace platform { * This way a task consists in process all combinations of hyperparameters for a dataset, seed and fold */ auto tasks = json::array(); - auto grid = GridData(Paths::grid_input(config.model)); auto all_datasets = datasets.getNames(); auto datasets_names = filterDatasets(datasets); for (int idx_dataset = 0; idx_dataset < datasets_names.size(); ++idx_dataset) { auto dataset = datasets_names[idx_dataset]; for (const auto& seed : config.seeds) { - auto combinations = grid.getGrid(dataset); for (int n_fold = 0; n_fold < config.n_folds; n_fold++) { json task = { { "dataset", dataset }, @@ -312,4 +311,4 @@ namespace platform { } } -} \ No newline at end of file +} diff --git a/src/grid/GridBase.h b/src/grid/GridBase.h index e496bca..b59088a 100644 --- a/src/grid/GridBase.h +++ b/src/grid/GridBase.h @@ -1,16 +1,12 @@ #ifndef GRIDBASE_H #define GRIDBASE_H #include -#include #include #include #include "common/Datasets.h" -#include "common/Timer.h" -#include "common/Colors.h" +#include "common/Timer.hpp" #include "main/HyperParameters.h" -#include "GridData.h" #include "GridConfig.h" -#include "bayesnet/network/Network.h" namespace platform { @@ -40,4 +36,4 @@ namespace platform { bayesnet::Smoothing_t smooth_type{ bayesnet::Smoothing_t::NONE }; }; } /* namespace platform */ -#endif \ No newline at end of file +#endif diff --git a/src/grid/GridConfig.h b/src/grid/GridConfig.h index a9159f0..6a1acb5 100644 --- a/src/grid/GridConfig.h +++ b/src/grid/GridConfig.h @@ -5,7 +5,7 @@ #include #include #include "common/Datasets.h" -#include "common/Timer.h" +#include "common/Timer.hpp" #include "main/HyperParameters.h" #include "GridData.h" #include "GridConfig.h" diff --git a/src/grid/GridExperiment.h b/src/grid/GridExperiment.h index 81503e3..7329d8a 100644 --- a/src/grid/GridExperiment.h +++ b/src/grid/GridExperiment.h @@ -1,18 +1,14 @@ #ifndef GRIDEXPERIMENT_H #define GRIDEXPERIMENT_H #include -#include #include #include #include #include "common/Datasets.h" -#include "common/DotEnv.h" #include "main/Experiment.h" #include "main/HyperParameters.h" #include "main/ArgumentsExperiment.h" -#include "GridData.h" #include "GridBase.h" -#include "bayesnet/network/Network.h" namespace platform { @@ -39,4 +35,4 @@ namespace platform { void consumer_go(struct ConfigGrid& config, struct ConfigMPI& config_mpi, json& tasks, int n_task, Datasets& datasets, Task_Result* result); }; } /* namespace platform */ -#endif \ No newline at end of file +#endif diff --git a/src/grid/GridSearch.cpp b/src/grid/GridSearch.cpp index 6fd955d..0a8010e 100644 --- a/src/grid/GridSearch.cpp +++ b/src/grid/GridSearch.cpp @@ -1,10 +1,10 @@ #include -#include #include #include #include "main/Models.h" #include "common/Paths.h" #include "common/Utils.h" +#include "common/Colors.h" #include "GridSearch.h" namespace platform { @@ -256,4 +256,4 @@ namespace platform { // std::cout << get_color_rank(config_mpi.rank) << std::flush; } -} /* namespace platform */ \ No newline at end of file +} /* namespace platform */ diff --git a/src/grid/GridSearch.h b/src/grid/GridSearch.h index 6f8ab37..2797b78 100644 --- a/src/grid/GridSearch.h +++ b/src/grid/GridSearch.h @@ -6,7 +6,7 @@ #include #include #include "common/Datasets.h" -#include "common/Timer.h" +#include "common/Timer.hpp" #include "main/HyperParameters.h" #include "GridData.h" #include "GridBase.h" diff --git a/src/main/ArgumentsExperiment.cpp b/src/main/ArgumentsExperiment.cpp index d66a5d0..aa8199e 100644 --- a/src/main/ArgumentsExperiment.cpp +++ b/src/main/ArgumentsExperiment.cpp @@ -178,6 +178,11 @@ namespace platform { } } filesToTest = file_names; + sort(filesToTest.begin(), filesToTest.end(), [](const auto& lhs, const auto& rhs) { + const auto result = mismatch(lhs.cbegin(), lhs.cend(), rhs.cbegin(), rhs.cend(), [](const auto& lhs, const auto& rhs) {return tolower(lhs) == tolower(rhs);}); + + return result.second != rhs.cend() && (result.first == lhs.cend() || tolower(*result.first) < tolower(*result.second)); + }); saveResults = true; if (title == "") { title = "Test " + to_string(file_names.size()) + " datasets " + model_name + " " + to_string(n_folds) + " folds"; diff --git a/src/main/Experiment.cpp b/src/main/Experiment.cpp index 0f54baf..06cbc41 100644 --- a/src/main/Experiment.cpp +++ b/src/main/Experiment.cpp @@ -82,8 +82,6 @@ namespace platform { std::cout << Colors::RESET() << std::endl; } int num = 0; - // Sort files to test to have a consistent order even if --datasets is used - std::stable_sort(filesToTest.begin(), filesToTest.end()); for (auto fileName : filesToTest) { if (!quiet) std::cout << " " << setw(3) << right << num++ << " " << setw(max_name) << left << fileName << right << flush; diff --git a/src/main/Models.h b/src/main/Models.h index e8eb58f..565a96d 100644 --- a/src/main/Models.h +++ b/src/main/Models.h @@ -5,11 +5,15 @@ #include #include #include +#include +#include #include #include #include #include #include +#include +#include #include #include #include @@ -20,6 +24,8 @@ #include #include #include +#include "../experimental_clfs/XA1DE.h" + namespace platform { class Models { public: @@ -42,4 +48,4 @@ namespace platform { Registrar(const std::string& className, function classFactoryFunction); }; } -#endif \ No newline at end of file +#endif diff --git a/src/main/modelRegister.h b/src/main/modelRegister.h index 4578ef2..0dbd269 100644 --- a/src/main/modelRegister.h +++ b/src/main/modelRegister.h @@ -1,39 +1,49 @@ #ifndef MODELREGISTER_H #define MODELREGISTER_H - -static platform::Registrar registrarT("TAN", - [](void) -> bayesnet::BaseClassifier* { return new bayesnet::TAN();}); -static platform::Registrar registrarTLD("TANLd", - [](void) -> bayesnet::BaseClassifier* { return new bayesnet::TANLd();}); -static platform::Registrar registrarS("SPODE", - [](void) -> bayesnet::BaseClassifier* { return new bayesnet::SPODE(2);}); -static platform::Registrar registrarSn("SPnDE", - [](void) -> bayesnet::BaseClassifier* { return new bayesnet::SPnDE({ 0, 1 });}); -static platform::Registrar registrarSLD("SPODELd", - [](void) -> bayesnet::BaseClassifier* { return new bayesnet::SPODELd(2);}); -static platform::Registrar registrarK("KDB", - [](void) -> bayesnet::BaseClassifier* { return new bayesnet::KDB(2);}); -static platform::Registrar registrarKLD("KDBLd", - [](void) -> bayesnet::BaseClassifier* { return new bayesnet::KDBLd(2);}); -static platform::Registrar registrarA("AODE", - [](void) -> bayesnet::BaseClassifier* { return new bayesnet::AODE();}); -static platform::Registrar registrarA2("A2DE", - [](void) -> bayesnet::BaseClassifier* { return new bayesnet::A2DE();}); -static platform::Registrar registrarALD("AODELd", - [](void) -> bayesnet::BaseClassifier* { return new bayesnet::AODELd();}); -static platform::Registrar registrarBA("BoostAODE", - [](void) -> bayesnet::BaseClassifier* { return new bayesnet::BoostAODE();}); -static platform::Registrar registrarBA2("BoostA2DE", - [](void) -> bayesnet::BaseClassifier* { return new bayesnet::BoostA2DE();}); -static platform::Registrar registrarSt("STree", - [](void) -> bayesnet::BaseClassifier* { return new pywrap::STree();}); -static platform::Registrar registrarOdte("Odte", - [](void) -> bayesnet::BaseClassifier* { return new pywrap::ODTE();}); -static platform::Registrar registrarSvc("SVC", - [](void) -> bayesnet::BaseClassifier* { return new pywrap::SVC();}); -static platform::Registrar registrarRaF("RandomForest", - [](void) -> bayesnet::BaseClassifier* { return new pywrap::RandomForest();}); -static platform::Registrar registrarXGB("XGBoost", - [](void) -> bayesnet::BaseClassifier* { return new pywrap::XGBoost();}); - -#endif \ No newline at end of file +namespace platform { + static Registrar registrarT("TAN", + [](void) -> bayesnet::BaseClassifier* { return new bayesnet::TAN();}); + static Registrar registrarTLD("TANLd", + [](void) -> bayesnet::BaseClassifier* { return new bayesnet::TANLd();}); + static Registrar registrarS("SPODE", + [](void) -> bayesnet::BaseClassifier* { return new bayesnet::SPODE(2);}); + static Registrar registrarSn("SPnDE", + [](void) -> bayesnet::BaseClassifier* { return new bayesnet::SPnDE({ 0, 1 });}); + static Registrar registrarSLD("SPODELd", + [](void) -> bayesnet::BaseClassifier* { return new bayesnet::SPODELd(2);}); + static Registrar registrarK("KDB", + [](void) -> bayesnet::BaseClassifier* { return new bayesnet::KDB(2);}); + static Registrar registrarKLD("KDBLd", + [](void) -> bayesnet::BaseClassifier* { return new bayesnet::KDBLd(2);}); + static Registrar registrarA("AODE", + [](void) -> bayesnet::BaseClassifier* { return new bayesnet::AODE();}); + static Registrar registrarA2("A2DE", + [](void) -> bayesnet::BaseClassifier* { return new bayesnet::A2DE();}); + static Registrar registrarALD("AODELd", + [](void) -> bayesnet::BaseClassifier* { return new bayesnet::AODELd();}); + static Registrar registrarBA("BoostAODE", + [](void) -> bayesnet::BaseClassifier* { return new bayesnet::BoostAODE();}); + static Registrar registrarBA2("BoostA2DE", + [](void) -> bayesnet::BaseClassifier* { return new bayesnet::BoostA2DE();}); + static Registrar registrarSt("STree", + [](void) -> bayesnet::BaseClassifier* { return new pywrap::STree();}); + static Registrar registrarOdte("Odte", + [](void) -> bayesnet::BaseClassifier* { return new pywrap::ODTE();}); + static Registrar registrarSvc("SVC", + [](void) -> bayesnet::BaseClassifier* { return new pywrap::SVC();}); + static Registrar registrarRaF("RandomForest", + [](void) -> bayesnet::BaseClassifier* { return new pywrap::RandomForest();}); + static Registrar registrarXGB("XGBoost", + [](void) -> bayesnet::BaseClassifier* { return new pywrap::XGBoost();}); + static Registrar registrarXSPODE("XSPODE", + [](void) -> bayesnet::BaseClassifier* { return new bayesnet::XSpode(0);}); + static Registrar registrarXSP2DE("XSP2DE", + [](void) -> bayesnet::BaseClassifier* { return new bayesnet::XSp2de(0, 1);}); + static Registrar registrarXBAODE("XBAODE", + [](void) -> bayesnet::BaseClassifier* { return new bayesnet::XBAODE();}); + static Registrar registrarXBA2DE("XBA2DE", + [](void) -> bayesnet::BaseClassifier* { return new bayesnet::XBA2DE();}); + static Registrar registrarXA1DE("XA1DE", + [](void) -> bayesnet::BaseClassifier* { return new XA1DE();}); +} +#endif diff --git a/src/manage/ManageScreen.cpp b/src/manage/ManageScreen.cpp index 5e30c15..6648d94 100644 --- a/src/manage/ManageScreen.cpp +++ b/src/manage/ManageScreen.cpp @@ -82,10 +82,12 @@ namespace platform { workbook_close(workbook); } if (didExcel) { - std::cout << Colors::MAGENTA() << "Excel file created: " << Paths::excel() + Paths::excelResults() << std::endl; + excelFileName = Paths::excel() + Paths::excelResults(); + std::cout << Colors::MAGENTA() << "Excel file created: " << excelFileName << std::endl; } std::cout << Colors::RESET() << "Done!" << std::endl; } + std::string ManageScreen::getVersions() { std::string kfold_version = folding::KFold(5, 100).version(); @@ -487,20 +489,19 @@ namespace platform { index_A = index; list("A set to " + std::to_string(index), Colors::GREEN()); break; - case 'B': // set_b or back to list - if (output_type == OutputType::EXPERIMENTS) { - if (index == index_A) { - list("A and B cannot be the same!", Colors::RED()); - break; - } - index_B = index; - list("B set to " + std::to_string(index), Colors::GREEN()); - } else { - // back to show the report - output_type = OutputType::RESULT; - paginator[static_cast(OutputType::DETAIL)].setPage(1); - list(STATUS_OK, STATUS_COLOR); + case 'B': // set_b + if (index == index_A) { + list("A and B cannot be the same!", Colors::RED()); + break; } + index_B = index; + list("B set to " + std::to_string(index), Colors::GREEN()); + break; + case 'b': // back to list + // back to show the report + output_type = OutputType::RESULT; + paginator[static_cast(OutputType::DETAIL)].setPage(1); + list(STATUS_OK, STATUS_COLOR); break; case 'c': if (index_A == -1 || index_B == -1) { diff --git a/src/manage/ManageScreen.h b/src/manage/ManageScreen.h index 40cedc6..7e41896 100644 --- a/src/manage/ManageScreen.h +++ b/src/manage/ManageScreen.h @@ -19,6 +19,7 @@ namespace platform { ~ManageScreen() = default; void doMenu(); void updateSize(int rows, int cols); + std::string getExcelFileName() const { return excelFileName; } private: void list(const std::string& status, const std::string& color); void list_experiments(const std::string& status, const std::string& color); @@ -58,6 +59,7 @@ namespace platform { std::vector paginator; ResultsManager results; lxw_workbook* workbook; + std::string excelFileName; }; } #endif \ No newline at end of file diff --git a/src/reports/ReportConsole.cpp b/src/reports/ReportConsole.cpp index ef0eb97..4cfbdc0 100644 --- a/src/reports/ReportConsole.cpp +++ b/src/reports/ReportConsole.cpp @@ -2,7 +2,7 @@ #include #include "best/BestScore.h" #include "common/CLocale.h" -#include "common/Timer.h" +#include "common/Timer.hpp" #include "ReportConsole.h" #include "main/Scores.h" @@ -84,7 +84,7 @@ namespace platform { } std::vector header_labels = { " #", "Dataset", "Sampl.", "Feat.", "Cls", nodes_label, leaves_label, depth_label, "Score", "Time", "Hyperparameters" }; sheader << Colors::GREEN(); - std::vector header_lengths = { 3, maxDataset, 6, 5, 3, 9, 9, 9, 15, 20, maxHyper }; + std::vector header_lengths = { 3, maxDataset, 6, 6, 3, 13, 13, 13, 15, 20, maxHyper }; for (int i = 0; i < header_labels.size(); i++) { sheader << std::setw(header_lengths[i]) << std::left << header_labels[i] << " "; } @@ -108,11 +108,11 @@ namespace platform { line << std::setw(3) << std::right << index++ << " "; line << std::setw(maxDataset) << std::left << r["dataset"].get() << " "; line << std::setw(6) << std::right << r["samples"].get() << " "; - line << std::setw(5) << std::right << r["features"].get() << " "; + line << std::setw(6) << std::right << r["features"].get() << " "; line << std::setw(3) << std::right << r["classes"].get() << " "; - line << std::setw(9) << std::setprecision(2) << std::fixed << r["nodes"].get() << " "; - line << std::setw(9) << std::setprecision(2) << std::fixed << r["leaves"].get() << " "; - line << std::setw(9) << std::setprecision(2) << std::fixed << r["depth"].get() << " "; + line << std::setw(13) << std::setprecision(2) << std::fixed << r["nodes"].get() << " "; + line << std::setw(13) << std::setprecision(2) << std::fixed << r["leaves"].get() << " "; + line << std::setw(13) << std::setprecision(2) << std::fixed << r["depth"].get() << " "; line << std::setw(8) << std::right << std::setprecision(6) << std::fixed << r["score"].get() << "±" << std::setw(6) << std::setprecision(4) << std::fixed << r["score_std"].get(); const std::string status = compareResult(r["dataset"].get(), r["score"].get()); line << status; @@ -251,7 +251,7 @@ namespace platform { if (train_data) { oss << color_line << std::left << std::setw(maxLine) << output_train[i] << suffix << Colors::BLUE() << " | " << color_line << std::left << std::setw(maxLine) - << output_test[i] << std::endl; + << output_test[i] << std::endl; } else { oss << color_line << output_test[i] << std::endl; } diff --git a/src/results/Result.h b/src/results/Result.h index 78bfd62..3f45c70 100644 --- a/src/results/Result.h +++ b/src/results/Result.h @@ -4,7 +4,7 @@ #include #include #include -#include "common/Timer.h" +#include "common/Timer.hpp" #include "main/HyperParameters.h" #include "main/PartialResult.h"