From 234342f2de535ae9dbaa3ad4a9c060ab5c22ad3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Monta=C3=B1ana=20G=C3=B3mez?= Date: Sun, 10 Dec 2023 22:33:17 +0100 Subject: [PATCH 01/12] Add mpi parameter to b_grid --- src/Platform/GridSearch.h | 4 ++++ src/Platform/b_grid.cc | 32 ++++++++++++++++++++++++++++++-- 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/src/Platform/GridSearch.h b/src/Platform/GridSearch.h index e325ca5..70bbf47 100644 --- a/src/Platform/GridSearch.h +++ b/src/Platform/GridSearch.h @@ -24,6 +24,10 @@ namespace platform { json excluded; std::vector seeds; }; + struct ConfigMPI { + int rank; + int nprocs; + } class GridSearch { public: explicit GridSearch(struct ConfigGrid& config); diff --git a/src/Platform/b_grid.cc b/src/Platform/b_grid.cc index a5af2a6..947a305 100644 --- a/src/Platform/b_grid.cc +++ b/src/Platform/b_grid.cc @@ -31,6 +31,7 @@ void manageArguments(argparse::ArgumentParser& program) group.add_argument("--report").help("Report the computed hyperparameters").default_value(false).implicit_value(true); group.add_argument("--compute").help("Perform computation of the grid output hyperparameters").default_value(false).implicit_value(true); program.add_argument("--discretize").help("Discretize input datasets").default_value((bool)stoi(env.get("discretize"))).implicit_value(true); + program.add_argument("--mpi").help("Use MPI computing grid").default_value(false).implicit_value(true); program.add_argument("--stratified").help("If Stratified KFold is to be done").default_value((bool)stoi(env.get("stratified"))).implicit_value(true); program.add_argument("--quiet").help("Don't display detailed progress").default_value(false).implicit_value(true); program.add_argument("--continue").help("Continue computing from that dataset").default_value(platform::GridSearch::NO_CONTINUE()); @@ -138,6 +139,22 @@ void list_results(json& results, std::string& model) std::cout << Colors::RESET() << std::endl; } +void initialize_mpi(struct platform::ConfigMPI& config) +{ + int provided; + // MPI_Init_thread(nullptr, nullptr, MPI_THREAD_MULTIPLE, &provided); + // if (provided != MPI_THREAD_MULTIPLE) { + // std::cerr << "MPI_Init_thread returned " << provided << " instead of " << MPI_THREAD_MULTIPLE << std::endl; + // exit(1); + // } + MPI_Init(nullptr, nullptr); + int rank, size; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &size); + config.mpi_rank = rank; + config.mpi_size = size; +} + /* * Main @@ -147,6 +164,7 @@ int main(int argc, char** argv) argparse::ArgumentParser program("b_grid"); manageArguments(program); struct platform::ConfigGrid config; + struct platform::ConfigMPI mpi_config; bool dump, compute; try { program.parse_args(argc, argv); @@ -170,6 +188,11 @@ int main(int argc, char** argv) } auto excluded = program.get("exclude"); config.excluded = json::parse(excluded); + if (program.get("mpi")) { + if (!compute) { + throw std::runtime_error("Cannot use --mpi without --compute"); + } + } } catch (const exception& err) { cerr << err.what() << std::endl; @@ -189,8 +212,13 @@ int main(int argc, char** argv) list_dump(config.model); } else { if (compute) { - grid_search.go(); - std::cout << "Process took " << timer.getDurationString() << std::endl; + if (program.get("mpi")) { + initialize_mpi(mpi_config); + grid_search.setMPIConfig(mpi_config); + } else { + grid_search.go(); + std::cout << "Process took " << timer.getDurationString() << std::endl; + } } else { // List results auto results = grid_search.getResults(); From 40ae4ad7f9d1a746f1c93f4bdd267dbe64fd1db7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Montan=CC=83ana?= Date: Mon, 11 Dec 2023 09:06:05 +0100 Subject: [PATCH 02/12] Include mpi in CMakeLists --- CMakeLists.txt | 8 +++++++- README.md | 10 ++++++++++ src/Platform/b_grid.cc | 21 ++++++++++++--------- 3 files changed, 29 insertions(+), 10 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 4d1bc2a..e33b67c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -25,12 +25,18 @@ set(CMAKE_CXX_EXTENSIONS OFF) set(CMAKE_EXPORT_COMPILE_COMMANDS ON) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread") - # Options # ------- option(ENABLE_CLANG_TIDY "Enable to add clang tidy." OFF) option(ENABLE_TESTING "Unit testing build" OFF) option(CODE_COVERAGE "Collect coverage from test library" OFF) +option(MPI_ENABLED "Enable MPI options" ON) + +if (MPI_ENABLED) + find_package(MPI REQUIRED) + message("MPI_CXX_LIBRARIES=${MPI_CXX_LIBRARIES}") + message("MPI_CXX_INCLUDE_DIRS=${MPI_CXX_INCLUDE_DIRS}") +endif (MPI_ENABLED) # Boost Library set(Boost_USE_STATIC_LIBS OFF) diff --git a/README.md b/README.md index 2acf581..ad0dd4a 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,16 @@ Bayesian Network Classifier with libtorch from scratch Before compiling BayesNet. +### MPI + +In Linux just install openmpi & openmpi-devel packages. + +In Mac OS X, install mpich with brew and if cmake doesn't find it, edit mpicxx wrapper to remove the ",-commons,use_dylibs" from final_ldflags + +```bash +vi /opt/homebrew/bin/mpicx +``` + ### boost library [Getting Started]() diff --git a/src/Platform/b_grid.cc b/src/Platform/b_grid.cc index 947a305..069f8a2 100644 --- a/src/Platform/b_grid.cc +++ b/src/Platform/b_grid.cc @@ -141,18 +141,18 @@ void list_results(json& results, std::string& model) void initialize_mpi(struct platform::ConfigMPI& config) { - int provided; + // int provided; // MPI_Init_thread(nullptr, nullptr, MPI_THREAD_MULTIPLE, &provided); // if (provided != MPI_THREAD_MULTIPLE) { // std::cerr << "MPI_Init_thread returned " << provided << " instead of " << MPI_THREAD_MULTIPLE << std::endl; // exit(1); // } - MPI_Init(nullptr, nullptr); - int rank, size; - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - MPI_Comm_size(MPI_COMM_WORLD, &size); - config.mpi_rank = rank; - config.mpi_size = size; + // MPI_Init(nullptr, nullptr); + // int rank, size; + // MPI_Comm_rank(MPI_COMM_WORLD, &rank); + // MPI_Comm_size(MPI_COMM_WORLD, &size); + // config.mpi_rank = rank; + // config.mpi_size = size; } @@ -213,8 +213,11 @@ int main(int argc, char** argv) } else { if (compute) { if (program.get("mpi")) { - initialize_mpi(mpi_config); - grid_search.setMPIConfig(mpi_config); + MPI_Init(nullptr, nullptr); + MPI_Comm_rank(MPI_COMM_WORLD, &config.rank); + MPI_Comm_size(MPI_COMM_WORLD, &config.size); + grid_search.go_mpi(); + MPI_Finzalize(); } else { grid_search.go(); std::cout << "Process took " << timer.getDurationString() << std::endl; From db9e80a70e9065fdfad6b9e628a0f90de9055ade Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Monta=C3=B1ana=20G=C3=B3mez?= Date: Tue, 12 Dec 2023 12:15:22 +0100 Subject: [PATCH 03/12] Create build tasks --- src/Platform/CMakeLists.txt | 3 +- src/Platform/GridSearch.cc | 120 ++++++++++++++++++++++++++++++++++++ src/Platform/GridSearch.h | 8 ++- src/Platform/b_grid.cc | 14 +++-- 4 files changed, 136 insertions(+), 9 deletions(-) diff --git a/src/Platform/CMakeLists.txt b/src/Platform/CMakeLists.txt index d35989f..e475b16 100644 --- a/src/Platform/CMakeLists.txt +++ b/src/Platform/CMakeLists.txt @@ -7,6 +7,7 @@ include_directories(${BayesNet_SOURCE_DIR}/lib/argparse/include) include_directories(${BayesNet_SOURCE_DIR}/lib/json/include) include_directories(${BayesNet_SOURCE_DIR}/lib/libxlsxwriter/include) include_directories(${Python3_INCLUDE_DIRS}) +include_directories(${MPI_CXX_INCLUDE_DIRS}) add_executable(b_best b_best.cc BestResults.cc Result.cc Statistics.cc BestResultsExcel.cc ReportExcel.cc ReportBase.cc Datasets.cc Dataset.cc ExcelFile.cc) add_executable(b_grid b_grid.cc GridSearch.cc GridData.cc HyperParameters.cc Folding.cc Datasets.cc Dataset.cc) @@ -15,7 +16,7 @@ add_executable(b_main b_main.cc Folding.cc Experiment.cc Datasets.cc Dataset.cc add_executable(b_manage b_manage.cc Results.cc ManageResults.cc CommandParser.cc Result.cc ReportConsole.cc ReportExcel.cc ReportBase.cc Datasets.cc Dataset.cc ExcelFile.cc) target_link_libraries(b_best Boost::boost "${XLSXWRITER_LIB}" "${TORCH_LIBRARIES}" ArffFiles mdlp) -target_link_libraries(b_grid BayesNet PyWrap) +target_link_libraries(b_grid BayesNet PyWrap ${MPI_CXX_LIBRARIES}) target_link_libraries(b_list ArffFiles mdlp "${TORCH_LIBRARIES}") target_link_libraries(b_main BayesNet ArffFiles mdlp "${TORCH_LIBRARIES}" PyWrap) target_link_libraries(b_manage "${TORCH_LIBRARIES}" "${XLSXWRITER_LIB}" ArffFiles mdlp) \ No newline at end of file diff --git a/src/Platform/GridSearch.cc b/src/Platform/GridSearch.cc index 9f91c6a..7b629ba 100644 --- a/src/Platform/GridSearch.cc +++ b/src/Platform/GridSearch.cc @@ -63,6 +63,126 @@ namespace platform { return Colors::RESET(); } } + json GridSearch::buildTasks() + { + auto result = json::array(); + auto datasets = Datasets(false, Paths::datasets()); + auto datasets_names = datasets.getNames(); + auto grid = GridData(Paths::grid_input(config.model)); + for (const auto& dataset : datasets_names) { + for (const auto& seed : config.seeds) { + auto combinations = grid.getGrid(dataset); + for (const auto& hyperparam_line : combinations) { + auto hyperparameters = platform::HyperParameters(datasets.getNames(), hyperparam_line); + json task = { + { "dataset", dataset }, + { "seed", seed }, + { "hyperparameters", hyperparameters.get(dataset) } + }; + result.push_back(task); + } + } + } + return result; + } + std::pair GridSearch::partRange(int n_tasks, int nprocs, int rank) + { + int assigned = 0; + int remainder = n_tasks % nprocs; + int start = 0; + if (rank < remainder) { + assigned = n_tasks / nprocs + 1; + } else { + assigned = n_tasks / nprocs; + start = remainder; + } + start += rank * assigned; + int end = start + assigned; + if (rank == nprocs - 1) { + end = n_tasks; + } + return { start, end }; + } + void GridSearch::go_MPI(struct ConfigMPI& config_mpi) + { + /* + * Manager will do the loops dataset, seed, fold (primary) and hyperparameter + * Workers will do the loop fold (nested) + * + * The overall process consists in these steps: + * 1. Manager will broadcast the tasks to all the processes + * 1.1 Broadcast the number of tasks + * 1.2 Broadcast the length of the following string + * 1.2 Broadcast the tasks as a char* string + * 2. Workers will receive the tasks and start the process + * 2.1 A method will tell each worker the range of combinations to process + * 2.2 Each worker will process the combinations and return the best score obtained + * 3. Manager gather the scores from all the workers and get the best hyperparameters + * 3.1 Manager find out which worker has the best score + * 3.2 Manager broadcast the winner worker + * 3.3 The winner worker send the best hyperparameters to manager + * + */ + char* msg; + int tasks_size; + if (config_mpi.rank == config_mpi.manager) { + auto tasks = buildTasks(); + auto tasks_str = tasks.dump(); + tasks_size = tasks_str.size(); + msg = new char[tasks_size + 1]; + strcpy(msg, tasks_str.c_str()); + } + // + // 1. Manager will broadcast the tasks to all the processes + // + MPI_Bcast(&tasks_size, 1, MPI_INT, config_mpi.manager, MPI_COMM_WORLD); + if (config_mpi.rank != config_mpi.manager) { + msg = new char[tasks_size + 1]; + } + MPI_Bcast(msg, tasks_size + 1, MPI_CHAR, config_mpi.manager, MPI_COMM_WORLD); + json tasks = json::parse(msg); + delete[] msg; + // + // 2. All Workers will receive the tasks and start the process + // + int num_tasks = tasks.size(); + auto [start, end] = partRange(num_tasks, config_mpi.n_procs, config_mpi.rank); + // 2.2 Each worker will process the combinations and return the best score obtained + for (int i = start; i < end; ++i) { + auto task = tasks[i]; + auto dataset = task["dataset"].get(); + auto seed = task["seed"].get(); + auto hyperparam_line = task["hyperparameters"]; + auto datasets = Datasets(config.discretize, Paths::datasets()); + auto [X, y] = datasets.getTensors(dataset); + auto states = datasets.getStates(dataset); + auto features = datasets.getFeatures(dataset); + auto className = datasets.getClassName(dataset); + double bestScore = 0.0; + json bestHyperparameters; + // First level fold + Fold* fold; + if (config.stratified) + fold = new StratifiedKFold(config.n_folds, y, seed); + else + fold = new KFold(config.n_folds, y.size(0), seed); + for (int nfold = 0; nfold < config.n_folds; nfold++) { + + auto clf = Models::instance()->create(config.model); + auto valid = clf->getValidHyperparameters(); + hyperparameters.check(valid, dataset); + clf->setHyperparameters(hyperparameters.get(dataset)); + auto [train, test] = fold->getFold(nfold); + auto train_t = torch::tensor(train); + auto test_t = torch::tensor(test); + auto X_train = X.index({ "...", train_t }); + auto y_train = y.index({ train_t }); + auto X_test = X.index({ "...", test + } + + } + } + } void GridSearch::go() { timer.start(); diff --git a/src/Platform/GridSearch.h b/src/Platform/GridSearch.h index 70bbf47..330696d 100644 --- a/src/Platform/GridSearch.h +++ b/src/Platform/GridSearch.h @@ -26,12 +26,14 @@ namespace platform { }; struct ConfigMPI { int rank; - int nprocs; - } + int n_procs; + int manager; + }; class GridSearch { public: explicit GridSearch(struct ConfigGrid& config); void go(); + void go_MPI(struct ConfigMPI& config_mpi); ~GridSearch() = default; json getResults(); static inline std::string NO_CONTINUE() { return "NO_CONTINUE"; } @@ -42,6 +44,8 @@ namespace platform { pair processFileSingle(std::string fileName, Datasets& datasets, std::vector& combinations); pair processFileNested(std::string fileName, Datasets& datasets, std::vector& combinations); struct ConfigGrid config; + pair partRange(int n_tasks, int nprocs, int rank); + json buildTasks(); Timer timer; // used to measure the time of the whole process }; } /* namespace platform */ diff --git a/src/Platform/b_grid.cc b/src/Platform/b_grid.cc index 069f8a2..dde5d14 100644 --- a/src/Platform/b_grid.cc +++ b/src/Platform/b_grid.cc @@ -2,6 +2,7 @@ #include #include #include +#include #include "DotEnv.h" #include "Models.h" #include "modelRegister.h" @@ -164,7 +165,6 @@ int main(int argc, char** argv) argparse::ArgumentParser program("b_grid"); manageArguments(program); struct platform::ConfigGrid config; - struct platform::ConfigMPI mpi_config; bool dump, compute; try { program.parse_args(argc, argv); @@ -213,11 +213,13 @@ int main(int argc, char** argv) } else { if (compute) { if (program.get("mpi")) { - MPI_Init(nullptr, nullptr); - MPI_Comm_rank(MPI_COMM_WORLD, &config.rank); - MPI_Comm_size(MPI_COMM_WORLD, &config.size); - grid_search.go_mpi(); - MPI_Finzalize(); + struct platform::ConfigMPI mpi_config; + mpi_config.manager = 0; // which process is the manager + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_config.rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_config.n_procs); + grid_search.go_MPI(mpi_config); + MPI_Finalize(); } else { grid_search.go(); std::cout << "Process took " << timer.getDurationString() << std::endl; From dbf2f355029b395eb961a265df145f27a37a7fe5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Montan=CC=83ana?= Date: Tue, 12 Dec 2023 18:57:57 +0100 Subject: [PATCH 04/12] First compiling version --- src/Platform/GridSearch.cc | 124 ++++++++++++++++++++++++------------- src/Platform/GridSearch.h | 1 + src/Platform/b_grid.cc | 4 +- 3 files changed, 84 insertions(+), 45 deletions(-) diff --git a/src/Platform/GridSearch.cc b/src/Platform/GridSearch.cc index 7b629ba..4f1f6e7 100644 --- a/src/Platform/GridSearch.cc +++ b/src/Platform/GridSearch.cc @@ -38,6 +38,39 @@ namespace platform { } return json(); } + vector GridSearch::processDatasets(Datasets& datasets) + { + // Load datasets + auto datasets_names = datasets.getNames(); + if (config.continue_from != NO_CONTINUE()) { + // Continue previous execution: + if (std::find(datasets_names.begin(), datasets_names.end(), config.continue_from) == datasets_names.end()) { + throw std::invalid_argument("Dataset " + config.continue_from + " not found"); + } + // Remove datasets already processed + vector< string >::iterator it = datasets_names.begin(); + while (it != datasets_names.end()) { + if (*it != config.continue_from) { + it = datasets_names.erase(it); + } else { + if (config.only) + ++it; + else + break; + } + } + } + // Exclude datasets + for (const auto& name : config.excluded) { + auto dataset = name.get(); + auto it = std::find(datasets_names.begin(), datasets_names.end(), dataset); + if (it == datasets_names.end()) { + throw std::invalid_argument("Dataset " + dataset + " already excluded or doesn't exist!"); + } + datasets_names.erase(it); + } + return datasets_names; + } void showProgressComb(const int num, const int n_folds, const int total, const std::string& color) { int spaces = int(log(total) / log(10)) + 1; @@ -67,7 +100,7 @@ namespace platform { { auto result = json::array(); auto datasets = Datasets(false, Paths::datasets()); - auto datasets_names = datasets.getNames(); + auto datasets_names = processDatasets(datasets); auto grid = GridData(Paths::grid_input(config.model)); for (const auto& dataset : datasets_names) { for (const auto& seed : config.seeds) { @@ -103,6 +136,10 @@ namespace platform { } return { start, end }; } + void status(struct ConfigMPI& config_mpi, std::string status) + { + std::cout << "* (" << config_mpi.rank << "): " << status << std::endl; + } void GridSearch::go_MPI(struct ConfigMPI& config_mpi) { /* @@ -148,12 +185,13 @@ namespace platform { int num_tasks = tasks.size(); auto [start, end] = partRange(num_tasks, config_mpi.n_procs, config_mpi.rank); // 2.2 Each worker will process the combinations and return the best score obtained + auto datasets = Datasets(config.discretize, Paths::datasets()); for (int i = start; i < end; ++i) { auto task = tasks[i]; auto dataset = task["dataset"].get(); auto seed = task["seed"].get(); auto hyperparam_line = task["hyperparameters"]; - auto datasets = Datasets(config.discretize, Paths::datasets()); + status(config_mpi, "Processing dataset " + dataset + " with seed " + std::to_string(seed) + " and hyperparameters " + hyperparam_line.dump()); auto [X, y] = datasets.getTensors(dataset); auto states = datasets.getStates(dataset); auto features = datasets.getFeatures(dataset); @@ -167,20 +205,53 @@ namespace platform { else fold = new KFold(config.n_folds, y.size(0), seed); for (int nfold = 0; nfold < config.n_folds; nfold++) { - - auto clf = Models::instance()->create(config.model); - auto valid = clf->getValidHyperparameters(); - hyperparameters.check(valid, dataset); - clf->setHyperparameters(hyperparameters.get(dataset)); + status(config_mpi, "Processing fold " + std::to_string(nfold + 1)); auto [train, test] = fold->getFold(nfold); auto train_t = torch::tensor(train); auto test_t = torch::tensor(test); auto X_train = X.index({ "...", train_t }); auto y_train = y.index({ train_t }); - auto X_test = X.index({ "...", test - } - + auto X_test = X.index({ "...", test_t }); + auto y_test = y.index({ test_t }); + auto num = 0; + json result_fold; + double hypScore = 0.0; + double bestHypScore = 0.0; + json bestHypHyperparameters; + Fold* nested_fold; + if (config.stratified) + nested_fold = new StratifiedKFold(config.nested, y_train, seed); + else + nested_fold = new KFold(config.nested, y_train.size(0), seed); + for (int n_nested_fold = 0; n_nested_fold < config.nested; n_nested_fold++) { + // Nested level fold + status(config_mpi, "Processing nested fold " + std::to_string(n_nested_fold + 1)); + auto [train_nested, test_nested] = nested_fold->getFold(n_nested_fold); + auto train_nested_t = torch::tensor(train_nested); + auto test_nested_t = torch::tensor(test_nested); + auto X_nexted_train = X_train.index({ "...", train_nested_t }); + auto y_nested_train = y_train.index({ train_nested_t }); + auto X_nested_test = X_train.index({ "...", test_nested_t }); + auto y_nested_test = y_train.index({ test_nested_t }); + // Build Classifier with selected hyperparameters + auto hyperparameters = platform::HyperParameters(datasets.getNames(), hyperparam_line); + auto clf = Models::instance()->create(config.model); + auto valid = clf->getValidHyperparameters(); + hyperparameters.check(valid, dataset); + clf->setHyperparameters(hyperparameters.get(dataset)); + // Train model + clf->fit(X_nexted_train, y_nested_train, features, className, states); + // Test model + hypScore += clf->score(X_nested_test, y_nested_test); + } + delete nested_fold; + hypScore /= config.nested; + if (hypScore > bestHypScore) { + bestHypScore = hypScore; + bestHypHyperparameters = hyperparam_line; + } } + delete fold; } } void GridSearch::go() @@ -391,39 +462,6 @@ namespace platform { } return { goatScore, goatHyperparameters }; } - vector GridSearch::processDatasets(Datasets& datasets) - { - // Load datasets - auto datasets_names = datasets.getNames(); - if (config.continue_from != NO_CONTINUE()) { - // Continue previous execution: - if (std::find(datasets_names.begin(), datasets_names.end(), config.continue_from) == datasets_names.end()) { - throw std::invalid_argument("Dataset " + config.continue_from + " not found"); - } - // Remove datasets already processed - vector< string >::iterator it = datasets_names.begin(); - while (it != datasets_names.end()) { - if (*it != config.continue_from) { - it = datasets_names.erase(it); - } else { - if (config.only) - ++it; - else - break; - } - } - } - // Exclude datasets - for (const auto& name : config.excluded) { - auto dataset = name.get(); - auto it = std::find(datasets_names.begin(), datasets_names.end(), dataset); - if (it == datasets_names.end()) { - throw std::invalid_argument("Dataset " + dataset + " already excluded or doesn't exist!"); - } - datasets_names.erase(it); - } - return datasets_names; - } json GridSearch::initializeResults() { // Load previous results diff --git a/src/Platform/GridSearch.h b/src/Platform/GridSearch.h index 330696d..4c757fa 100644 --- a/src/Platform/GridSearch.h +++ b/src/Platform/GridSearch.h @@ -2,6 +2,7 @@ #define GRIDSEARCH_H #include #include +#include #include #include "Datasets.h" #include "HyperParameters.h" diff --git a/src/Platform/b_grid.cc b/src/Platform/b_grid.cc index dde5d14..e5bcd03 100644 --- a/src/Platform/b_grid.cc +++ b/src/Platform/b_grid.cc @@ -189,8 +189,8 @@ int main(int argc, char** argv) auto excluded = program.get("exclude"); config.excluded = json::parse(excluded); if (program.get("mpi")) { - if (!compute) { - throw std::runtime_error("Cannot use --mpi without --compute"); + if (!compute || config.nested == 0) { + throw std::runtime_error("Cannot use --mpi without --compute or without --nested"); } } } From b73f4be1463943cfe83ec8d88831109675081e62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Monta=C3=B1ana=20G=C3=B3mez?= Date: Thu, 14 Dec 2023 15:55:08 +0100 Subject: [PATCH 05/12] First try with complete algorithm --- src/Platform/GridSearch.cc | 251 ++++++++++++++++++++++++------------- src/Platform/GridSearch.h | 7 +- src/Platform/b_grid.cc | 2 +- 3 files changed, 171 insertions(+), 89 deletions(-) diff --git a/src/Platform/GridSearch.cc b/src/Platform/GridSearch.cc index 4f1f6e7..e196f7b 100644 --- a/src/Platform/GridSearch.cc +++ b/src/Platform/GridSearch.cc @@ -96,29 +96,32 @@ namespace platform { return Colors::RESET(); } } - json GridSearch::buildTasks() + json GridSearch::build_tasks_mpi() { - auto result = json::array(); + auto tasks = json::array(); + auto grid = GridData(Paths::grid_input(config.model)); auto datasets = Datasets(false, Paths::datasets()); auto datasets_names = processDatasets(datasets); - auto grid = GridData(Paths::grid_input(config.model)); for (const auto& dataset : datasets_names) { for (const auto& seed : config.seeds) { auto combinations = grid.getGrid(dataset); - for (const auto& hyperparam_line : combinations) { - auto hyperparameters = platform::HyperParameters(datasets.getNames(), hyperparam_line); + for (int n_fold = 0; n_fold < config.n_folds; n_fold++) { json task = { { "dataset", dataset }, { "seed", seed }, - { "hyperparameters", hyperparameters.get(dataset) } + { "fold", n_fold} }; - result.push_back(task); + tasks.push_back(task); } } } - return result; + // It's important to shuffle the array so heavy datasets are spread across the Workers + std::random_device rd; + std::mt19937 g(rd()); + std::shuffle(tasks.begin(), tasks.end(), g); + return tasks; } - std::pair GridSearch::partRange(int n_tasks, int nprocs, int rank) + std::pair GridSearch::part_range_mpi(int n_tasks, int nprocs, int rank) { int assigned = 0; int remainder = n_tasks % nprocs; @@ -140,11 +143,98 @@ namespace platform { { std::cout << "* (" << config_mpi.rank << "): " << status << std::endl; } - void GridSearch::go_MPI(struct ConfigMPI& config_mpi) + void GridSearch::process_task_mpi(struct ConfigMPI& config_mpi, json& task, Datasets& datasets, json& results) + { + // Process the task and store the result in the results json + auto grid = GridData(Paths::grid_input(config.model)); + auto dataset = task["dataset"].get(); + auto seed = task["seed"].get(); + auto n_fold = task["fold"].get(); + // Generate the hyperparamters combinations + auto combinations = grid.getGrid(dataset); + status(config_mpi, "Processing dataset " + dataset + " with seed " + std::to_string(seed) + " and fold " + std::to_string(n_fold)); + auto [X, y] = datasets.getTensors(dataset); + auto states = datasets.getStates(dataset); + auto features = datasets.getFeatures(dataset); + auto className = datasets.getClassName(dataset); + // + // Start working on task + // + Fold* fold; + if (config.stratified) + fold = new StratifiedKFold(config.n_folds, y, seed); + else + fold = new KFold(config.n_folds, y.size(0), seed); + auto [train, test] = fold->getFold(n_fold); + auto train_t = torch::tensor(train); + auto test_t = torch::tensor(test); + auto X_train = X.index({ "...", train_t }); + auto y_train = y.index({ train_t }); + auto X_test = X.index({ "...", test_t }); + auto y_test = y.index({ test_t }); + auto num = 0; + double best_fold_score = 0.0; + json best_fold_hyper; + for (const auto& hyperparam_line : combinations) { + //status(config_mpi, "* Dataset: " + dataset + " Fold: " + std::to_string(n_fold) + " Processing hyperparameters: " + std::to_string(++num) + "/" + std::to_string(combinations.size())); + auto hyperparameters = platform::HyperParameters(datasets.getNames(), hyperparam_line); + Fold* nested_fold; + if (config.stratified) + nested_fold = new StratifiedKFold(config.nested, y_train, seed); + else + nested_fold = new KFold(config.nested, y_train.size(0), seed); + double score = 0.0; + for (int n_nested_fold = 0; n_nested_fold < config.nested; n_nested_fold++) { + // Nested level fold + auto [train_nested, test_nested] = nested_fold->getFold(n_nested_fold); + auto train_nested_t = torch::tensor(train_nested); + auto test_nested_t = torch::tensor(test_nested); + auto X_nested_train = X_train.index({ "...", train_nested_t }); + auto y_nested_train = y_train.index({ train_nested_t }); + auto X_nested_test = X_train.index({ "...", test_nested_t }); + auto y_nested_test = y_train.index({ test_nested_t }); + // Build Classifier with selected hyperparameters + auto clf = Models::instance()->create(config.model); + auto valid = clf->getValidHyperparameters(); + hyperparameters.check(valid, dataset); + clf->setHyperparameters(hyperparameters.get(dataset)); + // Train model + clf->fit(X_nested_train, y_nested_train, features, className, states); + // Test model + score += clf->score(X_nested_test, y_nested_test); + } + delete nested_fold; + score /= config.nested; + if (score > best_fold_score) { + best_fold_score = score; + best_fold_hyper = hyperparam_line; + } + } + delete fold; + // Build Classifier with the best hyperparameters to obtain the best score + auto hyperparameters = platform::HyperParameters(datasets.getNames(), best_fold_hyper); + auto clf = Models::instance()->create(config.model); + auto valid = clf->getValidHyperparameters(); + hyperparameters.check(valid, dataset); + clf->setHyperparameters(best_fold_hyper); + clf->fit(X_train, y_train, features, className, states); + best_fold_score = clf->score(X_test, y_test); + // Save results + results[dataset][std::to_string(n_fold)]["score"] = best_fold_score; + results[dataset][std::to_string(n_fold)]["hyperparameters"] = best_fold_hyper; + results[dataset][std::to_string(n_fold)]["hyperparameters"] = seed; + status(config_mpi, "Finished dataset " + dataset + " with seed " + std::to_string(seed) + " and fold " + std::to_string(n_fold) + " score " + std::to_string(best_fold_score)); + } + void GridSearch::go_mpi(struct ConfigMPI& config_mpi) { /* - * Manager will do the loops dataset, seed, fold (primary) and hyperparameter - * Workers will do the loop fold (nested) + * Each task is a json object with the following structure: + * { + * "dataset": "dataset_name", + * "seed": # of seed to use, + * "model": "model_name", + * "Fold": # of fold to process + * } * * The overall process consists in these steps: * 1. Manager will broadcast the tasks to all the processes @@ -152,18 +242,18 @@ namespace platform { * 1.2 Broadcast the length of the following string * 1.2 Broadcast the tasks as a char* string * 2. Workers will receive the tasks and start the process - * 2.1 A method will tell each worker the range of combinations to process - * 2.2 Each worker will process the combinations and return the best score obtained - * 3. Manager gather the scores from all the workers and get the best hyperparameters - * 3.1 Manager find out which worker has the best score - * 3.2 Manager broadcast the winner worker - * 3.3 The winner worker send the best hyperparameters to manager - * + * 2.1 A method will tell each worker the range of tasks to process + * 2.2 Each worker will process the tasks and generate the best score for each task + * 3. Manager gather the scores from all the workers and find out the best hyperparameters for each dataset + * 3.1 Obtain the maximum size of the results message of all the workers + * 3.2 Gather all the results from the workers into the manager + * 3.3 Compile the results from all the workers + * 3.4 Filter the best hyperparameters for each dataset */ char* msg; int tasks_size; if (config_mpi.rank == config_mpi.manager) { - auto tasks = buildTasks(); + auto tasks = build_tasks_mpi(); auto tasks_str = tasks.dump(); tasks_size = tasks_str.size(); msg = new char[tasks_size + 1]; @@ -183,75 +273,66 @@ namespace platform { // 2. All Workers will receive the tasks and start the process // int num_tasks = tasks.size(); - auto [start, end] = partRange(num_tasks, config_mpi.n_procs, config_mpi.rank); - // 2.2 Each worker will process the combinations and return the best score obtained + // 2.1 A method will tell each worker the range of tasks to process + auto [start, end] = part_range_mpi(num_tasks, config_mpi.n_procs, config_mpi.rank); + // 2.2 Each worker will process the tasks and return the best scores obtained auto datasets = Datasets(config.discretize, Paths::datasets()); + json results; for (int i = start; i < end; ++i) { - auto task = tasks[i]; - auto dataset = task["dataset"].get(); - auto seed = task["seed"].get(); - auto hyperparam_line = task["hyperparameters"]; - status(config_mpi, "Processing dataset " + dataset + " with seed " + std::to_string(seed) + " and hyperparameters " + hyperparam_line.dump()); - auto [X, y] = datasets.getTensors(dataset); - auto states = datasets.getStates(dataset); - auto features = datasets.getFeatures(dataset); - auto className = datasets.getClassName(dataset); - double bestScore = 0.0; - json bestHyperparameters; - // First level fold - Fold* fold; - if (config.stratified) - fold = new StratifiedKFold(config.n_folds, y, seed); - else - fold = new KFold(config.n_folds, y.size(0), seed); - for (int nfold = 0; nfold < config.n_folds; nfold++) { - status(config_mpi, "Processing fold " + std::to_string(nfold + 1)); - auto [train, test] = fold->getFold(nfold); - auto train_t = torch::tensor(train); - auto test_t = torch::tensor(test); - auto X_train = X.index({ "...", train_t }); - auto y_train = y.index({ train_t }); - auto X_test = X.index({ "...", test_t }); - auto y_test = y.index({ test_t }); - auto num = 0; - json result_fold; - double hypScore = 0.0; - double bestHypScore = 0.0; - json bestHypHyperparameters; - Fold* nested_fold; - if (config.stratified) - nested_fold = new StratifiedKFold(config.nested, y_train, seed); - else - nested_fold = new KFold(config.nested, y_train.size(0), seed); - for (int n_nested_fold = 0; n_nested_fold < config.nested; n_nested_fold++) { - // Nested level fold - status(config_mpi, "Processing nested fold " + std::to_string(n_nested_fold + 1)); - auto [train_nested, test_nested] = nested_fold->getFold(n_nested_fold); - auto train_nested_t = torch::tensor(train_nested); - auto test_nested_t = torch::tensor(test_nested); - auto X_nexted_train = X_train.index({ "...", train_nested_t }); - auto y_nested_train = y_train.index({ train_nested_t }); - auto X_nested_test = X_train.index({ "...", test_nested_t }); - auto y_nested_test = y_train.index({ test_nested_t }); - // Build Classifier with selected hyperparameters - auto hyperparameters = platform::HyperParameters(datasets.getNames(), hyperparam_line); - auto clf = Models::instance()->create(config.model); - auto valid = clf->getValidHyperparameters(); - hyperparameters.check(valid, dataset); - clf->setHyperparameters(hyperparameters.get(dataset)); - // Train model - clf->fit(X_nexted_train, y_nested_train, features, className, states); - // Test model - hypScore += clf->score(X_nested_test, y_nested_test); - } - delete nested_fold; - hypScore /= config.nested; - if (hypScore > bestHypScore) { - bestHypScore = hypScore; - bestHypHyperparameters = hyperparam_line; + // Process task + process_task_mpi(config_mpi, tasks[i], datasets, results); + } + int size = results.dump().size() + 1; + int max_size = 0; + // + // 3. Manager gather the scores from all the workers and find out the best hyperparameters for each dataset + // + //3.1 Obtain the maximum size of the results message of all the workers + MPI_Reduce(&size, &max_size, 1, MPI_INT, MPI_MAX, config_mpi.manager, MPI_COMM_WORLD); + // Assign the memory to the message and initialize it to 0s + char* total; + msg = new char[max_size] {}; + strncpy(msg, results.dump().c_str(), size); + if (config_mpi.rank == config_mpi.manager) { + total = new char[max_size * config_mpi.n_procs] {}; + } + // 3.2 Gather all the results from the workers into the manager + MPI_Gather(msg, max_size, MPI_CHAR, total, max_size * config_mpi.n_procs, MPI_CHAR, config_mpi.manager, MPI_COMM_WORLD); + delete[] msg; + if (config_mpi.rank == config_mpi.manager) { + json total_results; + json best_results; + // 3.3 Compile the results from all the workers + for (int i = 0; i < config_mpi.n_procs; ++i) { + json partial_results = json::parse(total + i * max_size); + for (auto& [dataset, folds] : partial_results.items()) { + for (auto& [fold, result] : folds.items()) { + total_results[dataset][fold] = result; + } } } - delete fold; + delete[] total; + // 3.4 Filter the best hyperparameters for each dataset + auto grid = GridData(Paths::grid_input(config.model)); + for (auto& [dataset, folds] : total_results.items()) { + double best_score = 0.0; + json best_hyper; + for (auto& [fold, result] : folds.items()) { + if (result["score"] > best_score) { + best_score = result["score"]; + best_hyper = result["hyperparameters"]; + } + } + json result = { + { "score", best_score }, + { "hyperparameters", best_hyper }, + { "date", get_date() + " " + get_time() }, + { "grid", grid.getInputGrid(dataset) }, + { "duration", 0 } + }; + best_results[dataset] = result; + } + save(total_results); } } void GridSearch::go() diff --git a/src/Platform/GridSearch.h b/src/Platform/GridSearch.h index 4c757fa..c00b2ee 100644 --- a/src/Platform/GridSearch.h +++ b/src/Platform/GridSearch.h @@ -34,7 +34,7 @@ namespace platform { public: explicit GridSearch(struct ConfigGrid& config); void go(); - void go_MPI(struct ConfigMPI& config_mpi); + void go_mpi(struct ConfigMPI& config_mpi); ~GridSearch() = default; json getResults(); static inline std::string NO_CONTINUE() { return "NO_CONTINUE"; } @@ -45,8 +45,9 @@ namespace platform { pair processFileSingle(std::string fileName, Datasets& datasets, std::vector& combinations); pair processFileNested(std::string fileName, Datasets& datasets, std::vector& combinations); struct ConfigGrid config; - pair partRange(int n_tasks, int nprocs, int rank); - json buildTasks(); + pair part_range_mpi(int n_tasks, int nprocs, int rank); + json build_tasks_mpi(); + void process_task_mpi(struct ConfigMPI& config_mpi, json& task, Datasets& datasets, json& results); Timer timer; // used to measure the time of the whole process }; } /* namespace platform */ diff --git a/src/Platform/b_grid.cc b/src/Platform/b_grid.cc index e5bcd03..4439192 100644 --- a/src/Platform/b_grid.cc +++ b/src/Platform/b_grid.cc @@ -218,7 +218,7 @@ int main(int argc, char** argv) MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_config.rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_config.n_procs); - grid_search.go_MPI(mpi_config); + grid_search.go_mpi(mpi_config); MPI_Finalize(); } else { grid_search.go(); From ffe6d37436e8192f00e3abc2e044ffa7bedf3a5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Montan=CC=83ana?= Date: Thu, 14 Dec 2023 21:06:43 +0100 Subject: [PATCH 06/12] Add messages to control trace --- src/Platform/GridSearch.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/Platform/GridSearch.cc b/src/Platform/GridSearch.cc index e196f7b..d03807d 100644 --- a/src/Platform/GridSearch.cc +++ b/src/Platform/GridSearch.cc @@ -297,9 +297,11 @@ namespace platform { total = new char[max_size * config_mpi.n_procs] {}; } // 3.2 Gather all the results from the workers into the manager + std::cout << "(" << config_mpi.rank << ")" << msg << std::endl; MPI_Gather(msg, max_size, MPI_CHAR, total, max_size * config_mpi.n_procs, MPI_CHAR, config_mpi.manager, MPI_COMM_WORLD); delete[] msg; if (config_mpi.rank == config_mpi.manager) { + std::cout << "Manager taking final control!" << std::endl; json total_results; json best_results; // 3.3 Compile the results from all the workers @@ -312,6 +314,7 @@ namespace platform { } } delete[] total; + std::cout << "Total results: " << total_results.dump() << std::endl; // 3.4 Filter the best hyperparameters for each dataset auto grid = GridData(Paths::grid_input(config.model)); for (auto& [dataset, folds] : total_results.items()) { @@ -332,8 +335,10 @@ namespace platform { }; best_results[dataset] = result; } + std::cout << "Best results: " << best_results.dump() << std::endl; save(total_results); } + std::cout << "Process " << config_mpi.rank << " finished!" << std::endl; } void GridSearch::go() { From 19586a3a5abaf489537cbbc2db408f5932cf247b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Monta=C3=B1ana=20G=C3=B3mez?= Date: Fri, 15 Dec 2023 01:54:13 +0100 Subject: [PATCH 07/12] Fix pesky error allocating memory in workers --- src/Platform/GridSearch.cc | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/Platform/GridSearch.cc b/src/Platform/GridSearch.cc index d03807d..6afb141 100644 --- a/src/Platform/GridSearch.cc +++ b/src/Platform/GridSearch.cc @@ -222,7 +222,7 @@ namespace platform { // Save results results[dataset][std::to_string(n_fold)]["score"] = best_fold_score; results[dataset][std::to_string(n_fold)]["hyperparameters"] = best_fold_hyper; - results[dataset][std::to_string(n_fold)]["hyperparameters"] = seed; + results[dataset][std::to_string(n_fold)]["seed"] = seed; status(config_mpi, "Finished dataset " + dataset + " with seed " + std::to_string(seed) + " and fold " + std::to_string(n_fold) + " score " + std::to_string(best_fold_score)); } void GridSearch::go_mpi(struct ConfigMPI& config_mpi) @@ -288,18 +288,19 @@ namespace platform { // 3. Manager gather the scores from all the workers and find out the best hyperparameters for each dataset // //3.1 Obtain the maximum size of the results message of all the workers - MPI_Reduce(&size, &max_size, 1, MPI_INT, MPI_MAX, config_mpi.manager, MPI_COMM_WORLD); + MPI_Allreduce(&size, &max_size, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); // Assign the memory to the message and initialize it to 0s - char* total; - msg = new char[max_size] {}; + status(config_mpi, "Max size of the results message: " + std::to_string(max_size)); + status(config_mpi, "size of my message " + std::to_string(size)); + char* total = NULL; + msg = new char[max_size]; strncpy(msg, results.dump().c_str(), size); if (config_mpi.rank == config_mpi.manager) { - total = new char[max_size * config_mpi.n_procs] {}; + total = new char[max_size * config_mpi.n_procs]; } // 3.2 Gather all the results from the workers into the manager std::cout << "(" << config_mpi.rank << ")" << msg << std::endl; - MPI_Gather(msg, max_size, MPI_CHAR, total, max_size * config_mpi.n_procs, MPI_CHAR, config_mpi.manager, MPI_COMM_WORLD); - delete[] msg; + MPI_Gather(msg, max_size, MPI_CHAR, total, max_size, MPI_CHAR, config_mpi.manager, MPI_COMM_WORLD); if (config_mpi.rank == config_mpi.manager) { std::cout << "Manager taking final control!" << std::endl; json total_results; @@ -338,6 +339,7 @@ namespace platform { std::cout << "Best results: " << best_results.dump() << std::endl; save(total_results); } + delete[] msg; std::cout << "Process " << config_mpi.rank << " finished!" << std::endl; } void GridSearch::go() From b5b5b48864d67d02c7f954c072ea756d3feba3a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Monta=C3=B1ana=20G=C3=B3mez?= Date: Fri, 15 Dec 2023 18:09:17 +0100 Subject: [PATCH 08/12] Update grid progress bar output --- src/Platform/GridSearch.cc | 27 +++++++++++++-------------- src/Platform/b_grid.cc | 5 +++++ 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/src/Platform/GridSearch.cc b/src/Platform/GridSearch.cc index 6afb141..de7e060 100644 --- a/src/Platform/GridSearch.cc +++ b/src/Platform/GridSearch.cc @@ -119,6 +119,12 @@ namespace platform { std::random_device rd; std::mt19937 g(rd()); std::shuffle(tasks.begin(), tasks.end(), g); + std::cout << "Tasks size: " << tasks.size() << std::endl; + std::cout << "|"; + for (int i = 0; i < tasks.size(); ++i) { + std::cout << (i + 1) % 10; + } + std::cout << "|" << std::endl << "|" << std::flush; return tasks; } std::pair GridSearch::part_range_mpi(int n_tasks, int nprocs, int rank) @@ -139,9 +145,10 @@ namespace platform { } return { start, end }; } - void status(struct ConfigMPI& config_mpi, std::string status) + std::string get_color_rank(int rank) { - std::cout << "* (" << config_mpi.rank << "): " << status << std::endl; + auto colors = { Colors::RED(), Colors::GREEN(), Colors::BLUE(), Colors::MAGENTA(), Colors::CYAN() }; + return *(colors.begin() + rank % colors.size()); } void GridSearch::process_task_mpi(struct ConfigMPI& config_mpi, json& task, Datasets& datasets, json& results) { @@ -152,7 +159,6 @@ namespace platform { auto n_fold = task["fold"].get(); // Generate the hyperparamters combinations auto combinations = grid.getGrid(dataset); - status(config_mpi, "Processing dataset " + dataset + " with seed " + std::to_string(seed) + " and fold " + std::to_string(n_fold)); auto [X, y] = datasets.getTensors(dataset); auto states = datasets.getStates(dataset); auto features = datasets.getFeatures(dataset); @@ -176,7 +182,6 @@ namespace platform { double best_fold_score = 0.0; json best_fold_hyper; for (const auto& hyperparam_line : combinations) { - //status(config_mpi, "* Dataset: " + dataset + " Fold: " + std::to_string(n_fold) + " Processing hyperparameters: " + std::to_string(++num) + "/" + std::to_string(combinations.size())); auto hyperparameters = platform::HyperParameters(datasets.getNames(), hyperparam_line); Fold* nested_fold; if (config.stratified) @@ -223,7 +228,7 @@ namespace platform { results[dataset][std::to_string(n_fold)]["score"] = best_fold_score; results[dataset][std::to_string(n_fold)]["hyperparameters"] = best_fold_hyper; results[dataset][std::to_string(n_fold)]["seed"] = seed; - status(config_mpi, "Finished dataset " + dataset + " with seed " + std::to_string(seed) + " and fold " + std::to_string(n_fold) + " score " + std::to_string(best_fold_score)); + std::cout << get_color_rank(config_mpi.rank) << "*" << std::flush; } void GridSearch::go_mpi(struct ConfigMPI& config_mpi) { @@ -290,8 +295,6 @@ namespace platform { //3.1 Obtain the maximum size of the results message of all the workers MPI_Allreduce(&size, &max_size, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); // Assign the memory to the message and initialize it to 0s - status(config_mpi, "Max size of the results message: " + std::to_string(max_size)); - status(config_mpi, "size of my message " + std::to_string(size)); char* total = NULL; msg = new char[max_size]; strncpy(msg, results.dump().c_str(), size); @@ -299,10 +302,10 @@ namespace platform { total = new char[max_size * config_mpi.n_procs]; } // 3.2 Gather all the results from the workers into the manager - std::cout << "(" << config_mpi.rank << ")" << msg << std::endl; MPI_Gather(msg, max_size, MPI_CHAR, total, max_size, MPI_CHAR, config_mpi.manager, MPI_COMM_WORLD); + delete[] msg; if (config_mpi.rank == config_mpi.manager) { - std::cout << "Manager taking final control!" << std::endl; + std::cout << "|" << std::endl; json total_results; json best_results; // 3.3 Compile the results from all the workers @@ -315,7 +318,6 @@ namespace platform { } } delete[] total; - std::cout << "Total results: " << total_results.dump() << std::endl; // 3.4 Filter the best hyperparameters for each dataset auto grid = GridData(Paths::grid_input(config.model)); for (auto& [dataset, folds] : total_results.items()) { @@ -336,11 +338,8 @@ namespace platform { }; best_results[dataset] = result; } - std::cout << "Best results: " << best_results.dump() << std::endl; - save(total_results); + save(best_results); } - delete[] msg; - std::cout << "Process " << config_mpi.rank << " finished!" << std::endl; } void GridSearch::go() { diff --git a/src/Platform/b_grid.cc b/src/Platform/b_grid.cc index 4439192..d9285e5 100644 --- a/src/Platform/b_grid.cc +++ b/src/Platform/b_grid.cc @@ -219,6 +219,11 @@ int main(int argc, char** argv) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_config.rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_config.n_procs); grid_search.go_mpi(mpi_config); + if (mpi_config.rank == mpi_config.manager) { + auto results = grid_search.getResults(); + list_results(results, config.model); + std::cout << "Process took " << timer.getDurationString() << std::endl; + } MPI_Finalize(); } else { grid_search.go(); From 49b26bd04bffe742284c753af6fc1997162cd5f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Monta=C3=B1ana=20G=C3=B3mez?= Date: Sat, 16 Dec 2023 12:53:25 +0100 Subject: [PATCH 09/12] fix duration output --- src/Platform/GridSearch.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Platform/GridSearch.cc b/src/Platform/GridSearch.cc index de7e060..d7d18b2 100644 --- a/src/Platform/GridSearch.cc +++ b/src/Platform/GridSearch.cc @@ -116,8 +116,7 @@ namespace platform { } } // It's important to shuffle the array so heavy datasets are spread across the Workers - std::random_device rd; - std::mt19937 g(rd()); + std::mt19937 g{ 271 }; // Use fixed seed to obtain the same shuffle std::shuffle(tasks.begin(), tasks.end(), g); std::cout << "Tasks size: " << tasks.size() << std::endl; std::cout << "|"; @@ -258,6 +257,7 @@ namespace platform { char* msg; int tasks_size; if (config_mpi.rank == config_mpi.manager) { + timer.start(); auto tasks = build_tasks_mpi(); auto tasks_str = tasks.dump(); tasks_size = tasks_str.size(); @@ -305,7 +305,7 @@ namespace platform { MPI_Gather(msg, max_size, MPI_CHAR, total, max_size, MPI_CHAR, config_mpi.manager, MPI_COMM_WORLD); delete[] msg; if (config_mpi.rank == config_mpi.manager) { - std::cout << "|" << std::endl; + std::cout << Colors::RESET() << "|" << std::endl; json total_results; json best_results; // 3.3 Compile the results from all the workers From 9b8db37a4bf7edf9dac40229d437c48e1c090d8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Monta=C3=B1ana=20G=C3=B3mez?= Date: Sat, 16 Dec 2023 19:31:45 +0100 Subject: [PATCH 10/12] Fix duration of task not set --- src/Platform/GridSearch.cc | 8 +++++++- src/Platform/Timer.h | 4 ++++ src/Platform/b_grid.cc | 19 +------------------ 3 files changed, 12 insertions(+), 19 deletions(-) diff --git a/src/Platform/GridSearch.cc b/src/Platform/GridSearch.cc index d7d18b2..76c4b4c 100644 --- a/src/Platform/GridSearch.cc +++ b/src/Platform/GridSearch.cc @@ -152,6 +152,8 @@ namespace platform { void GridSearch::process_task_mpi(struct ConfigMPI& config_mpi, json& task, Datasets& datasets, json& results) { // Process the task and store the result in the results json + Timer timer; + timer.start(); auto grid = GridData(Paths::grid_input(config.model)); auto dataset = task["dataset"].get(); auto seed = task["seed"].get(); @@ -227,6 +229,7 @@ namespace platform { results[dataset][std::to_string(n_fold)]["score"] = best_fold_score; results[dataset][std::to_string(n_fold)]["hyperparameters"] = best_fold_hyper; results[dataset][std::to_string(n_fold)]["seed"] = seed; + results[dataset][std::to_string(n_fold)]["duration"] = timer.getDuration(); std::cout << get_color_rank(config_mpi.rank) << "*" << std::flush; } void GridSearch::go_mpi(struct ConfigMPI& config_mpi) @@ -322,19 +325,22 @@ namespace platform { auto grid = GridData(Paths::grid_input(config.model)); for (auto& [dataset, folds] : total_results.items()) { double best_score = 0.0; + double duration = 0.0; json best_hyper; for (auto& [fold, result] : folds.items()) { + duration += result["duration"].get(); if (result["score"] > best_score) { best_score = result["score"]; best_hyper = result["hyperparameters"]; } } + auto timer = Timer(); json result = { { "score", best_score }, { "hyperparameters", best_hyper }, { "date", get_date() + " " + get_time() }, { "grid", grid.getInputGrid(dataset) }, - { "duration", 0 } + { "duration", timer.translate2String(duration) } }; best_results[dataset] = result; } diff --git a/src/Platform/Timer.h b/src/Platform/Timer.h index b44d629..277fb6d 100644 --- a/src/Platform/Timer.h +++ b/src/Platform/Timer.h @@ -28,6 +28,10 @@ namespace platform { std::string getDurationString(bool lapse = false) { double duration = lapse ? getLapse() : getDuration(); + return translate2String(duration); + } + std::string translate2String(double duration) + { double durationShow = duration > 3600 ? duration / 3600 : duration > 60 ? duration / 60 : duration; std::string durationUnit = duration > 3600 ? "h" : duration > 60 ? "m" : "s"; std::stringstream ss; diff --git a/src/Platform/b_grid.cc b/src/Platform/b_grid.cc index d9285e5..5bb6c9c 100644 --- a/src/Platform/b_grid.cc +++ b/src/Platform/b_grid.cc @@ -133,30 +133,13 @@ void list_results(json& results, std::string& model) std::cout << color; std::cout << std::setw(3) << std::right << index++ << " "; std::cout << left << setw(spaces) << key << " " << value["date"].get() - << " " << setw(8) << value["duration"] << " " << setw(8) << setprecision(6) + << " " << setw(8) << value["duration"].get() << " " << setw(8) << setprecision(6) << fixed << right << value["score"].get() << " " << value["hyperparameters"].dump() << std::endl; odd = !odd; } std::cout << Colors::RESET() << std::endl; } -void initialize_mpi(struct platform::ConfigMPI& config) -{ - // int provided; - // MPI_Init_thread(nullptr, nullptr, MPI_THREAD_MULTIPLE, &provided); - // if (provided != MPI_THREAD_MULTIPLE) { - // std::cerr << "MPI_Init_thread returned " << provided << " instead of " << MPI_THREAD_MULTIPLE << std::endl; - // exit(1); - // } - // MPI_Init(nullptr, nullptr); - // int rank, size; - // MPI_Comm_rank(MPI_COMM_WORLD, &rank); - // MPI_Comm_size(MPI_COMM_WORLD, &size); - // config.mpi_rank = rank; - // config.mpi_size = size; -} - - /* * Main */ From 7de11b0e6d73eef4a9e6c609400c883b6c969afe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Monta=C3=B1ana=20G=C3=B3mez?= Date: Sun, 17 Dec 2023 01:45:04 +0100 Subject: [PATCH 11/12] Fix format of duration --- src/Platform/Timer.h | 2 +- src/Platform/b_grid.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Platform/Timer.h b/src/Platform/Timer.h index 277fb6d..dd10d94 100644 --- a/src/Platform/Timer.h +++ b/src/Platform/Timer.h @@ -35,7 +35,7 @@ namespace platform { double durationShow = duration > 3600 ? duration / 3600 : duration > 60 ? duration / 60 : duration; std::string durationUnit = duration > 3600 ? "h" : duration > 60 ? "m" : "s"; std::stringstream ss; - ss << std::setw(7) << std::setprecision(2) << std::fixed << durationShow << " " << durationUnit << " "; + ss << std::setprecision(2) << std::fixed << durationShow << " " << durationUnit; return ss.str(); } }; diff --git a/src/Platform/b_grid.cc b/src/Platform/b_grid.cc index 5bb6c9c..d870353 100644 --- a/src/Platform/b_grid.cc +++ b/src/Platform/b_grid.cc @@ -133,7 +133,7 @@ void list_results(json& results, std::string& model) std::cout << color; std::cout << std::setw(3) << std::right << index++ << " "; std::cout << left << setw(spaces) << key << " " << value["date"].get() - << " " << setw(8) << value["duration"].get() << " " << setw(8) << setprecision(6) + << " " << setw(8) << right << value["duration"].get() << " " << setw(8) << setprecision(6) << fixed << right << value["score"].get() << " " << value["hyperparameters"].dump() << std::endl; odd = !odd; } From 18e8e84284b2c9ebbc99e878769646f326a11dc7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Monta=C3=B1ana?= Date: Sun, 17 Dec 2023 12:19:50 +0100 Subject: [PATCH 12/12] Add openmpi instructions for Oracle Linux --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index ad0dd4a..6ddd7c1 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,11 @@ Before compiling BayesNet. ### MPI -In Linux just install openmpi & openmpi-devel packages. +In Linux just install openmpi & openmpi-devel packages. Only cmake can't find openmpi install (like in Oracle Linux) set the following variable: + +```bash +export MPI_HOME="/usr/lib64/openmpi" +``` In Mac OS X, install mpich with brew and if cmake doesn't find it, edit mpicxx wrapper to remove the ",-commons,use_dylibs" from final_ldflags