From 386faf960eadb2d3d720d5f893d1537f2673205e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Monta=C3=B1ana=20G=C3=B3mez?= Date: Tue, 14 Jan 2025 18:53:11 +0100 Subject: [PATCH] Refactor grid classes and add summary of tasks at the end --- cmake/modules/CodeCoverage.cmake | 8 +- lib/Files | 2 +- lib/catch2 | 2 +- lib/json | 2 +- lib/libxlsxwriter | 2 +- src/commands/b_grid.cpp | 4 +- src/grid/GridBase.cpp | 232 +++++++++++++++++++ src/grid/GridBase.h | 22 +- src/grid/GridConfig.h | 9 +- src/grid/GridExperiment.cpp | 306 +++++++++++++++++++++---- src/grid/GridExperiment.h | 232 +------------------ src/grid/GridSearch.cpp | 368 +++++++++++++++++++------------ src/grid/GridSearch.h | 230 +------------------ 13 files changed, 761 insertions(+), 658 deletions(-) diff --git a/cmake/modules/CodeCoverage.cmake b/cmake/modules/CodeCoverage.cmake index d4a039f..670dea8 100644 --- a/cmake/modules/CodeCoverage.cmake +++ b/cmake/modules/CodeCoverage.cmake @@ -137,7 +137,7 @@ include(CMakeParseArguments) -option(CODE_COVERAGE_VERBOSE "Verbose information" FALSE) +option(CODE_COVERAGE_VERBOSE "Verbose information" TRUE) # Check prereqs find_program( GCOV_PATH gcov ) @@ -160,7 +160,11 @@ foreach(LANG ${LANGUAGES}) endif() elseif(NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "GNU" AND NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(LLVM)?[Ff]lang") - message(FATAL_ERROR "Compiler is not GNU or Flang! Aborting...") + if ("${LANG}" MATCHES "CUDA") + message(STATUS "Ignoring CUDA") + else() + message(FATAL_ERROR "Compiler is not GNU or Flang! Aborting...") + endif() endif() endforeach() diff --git a/lib/Files b/lib/Files index a4329f5..18c79f6 160000 --- a/lib/Files +++ b/lib/Files @@ -1 +1 @@ -Subproject commit a4329f5f9dfdb18ee3faa63bd5b665f2f253b8d2 +Subproject commit 18c79f6d4894d6b7a6cbfad0239bf9bfd68d3bb4 diff --git a/lib/catch2 b/lib/catch2 index 0321d2f..914aeec 160000 --- a/lib/catch2 +++ b/lib/catch2 @@ -1 +1 @@ -Subproject commit 0321d2fce328b5e2ad106a8230ff20e0d5bf5501 +Subproject commit 914aeecfe23b1e16af6ea675a4fb5dbd5a5b8d0a diff --git a/lib/json b/lib/json index 620034e..48e7b4c 160000 --- a/lib/json +++ b/lib/json @@ -1 +1 @@ -Subproject commit 620034ececc93991c5c1183b73c3768d81ca84b3 +Subproject commit 48e7b4c23b089c088c11e51c824d78d0f0949b40 diff --git a/lib/libxlsxwriter b/lib/libxlsxwriter index 8206bda..14f1351 160000 --- a/lib/libxlsxwriter +++ b/lib/libxlsxwriter @@ -1 +1 @@ -Subproject commit 8206bda64ab66db608a920b09d35d163464aad1b +Subproject commit 14f13513cb140092a913a91fce719ff7dc36e332 diff --git a/src/commands/b_grid.cpp b/src/commands/b_grid.cpp index 168d00a..b6fe208 100644 --- a/src/commands/b_grid.cpp +++ b/src/commands/b_grid.cpp @@ -318,7 +318,7 @@ void experiment(argparse::ArgumentParser& program) auto env = platform::DotEnv(); config.platform = env.get("platform"); platform::Paths::createPath(platform::Paths::grid()); - // auto grid_experiment = platform::GridExperiment(config); + auto grid_experiment = platform::GridExperiment(config); platform::Timer timer; timer.start(); struct platform::ConfigMPI mpi_config; @@ -329,7 +329,7 @@ void experiment(argparse::ArgumentParser& program) if (mpi_config.n_procs < 2) { throw std::runtime_error("Cannot use --compute with less than 2 mpi processes, try mpirun -np 2 ..."); } - // grid_experiment.go(mpi_config); + grid_experiment.go(mpi_config); if (mpi_config.rank == mpi_config.manager) { // auto results = grid_experiment.loadResults(); // list_results(results, config.model); diff --git a/src/grid/GridBase.cpp b/src/grid/GridBase.cpp index f64b45c..51d839e 100644 --- a/src/grid/GridBase.cpp +++ b/src/grid/GridBase.cpp @@ -1,3 +1,5 @@ +#include +#include #include "common/DotEnv.h" #include "common/Paths.h" #include "GridBase.h" @@ -18,5 +20,235 @@ namespace platform { exit(1); } } + std::string GridBase::get_color_rank(int rank) + { + auto colors = { Colors::WHITE(), Colors::RED(), Colors::GREEN(), Colors::BLUE(), Colors::MAGENTA(), Colors::CYAN(), Colors::YELLOW(), Colors::BLACK() }; + std::string id = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; + auto idx = rank % id.size(); + return *(colors.begin() + rank % colors.size()) + id[idx]; + }; + json GridBase::build_tasks() + { + /* + * Each task is a json object with the following structure: + * { + * "dataset": "dataset_name", + * "idx_dataset": idx_dataset, // used to identify the dataset in the results + * // this index is relative to the list of used datasets in the actual run not to the whole datasets list + * "seed": # of seed to use, + * "fold": # of fold to process + * } + */ + auto tasks = json::array(); + auto grid = GridData(Paths::grid_input(config.model)); + auto datasets = Datasets(false, Paths::datasets()); + auto all_datasets = datasets.getNames(); + auto datasets_names = filterDatasets(datasets); + for (int idx_dataset = 0; idx_dataset < datasets_names.size(); ++idx_dataset) { + auto dataset = datasets_names[idx_dataset]; + for (const auto& seed : config.seeds) { + auto combinations = grid.getGrid(dataset); + for (int n_fold = 0; n_fold < config.n_folds; n_fold++) { + json task = { + { "dataset", dataset }, + { "idx_dataset", idx_dataset}, + { "seed", seed }, + { "fold", n_fold}, + }; + tasks.push_back(task); + } + } + } + // Shuffle the array so heavy datasets are eas ier spread across the workers + std::mt19937 g{ 271 }; // Use fixed seed to obtain the same shuffle + std::shuffle(tasks.begin(), tasks.end(), g); + std::cout << "* Number of tasks: " << tasks.size() << std::endl; + std::cout << separator << std::flush; + for (int i = 0; i < tasks.size(); ++i) { + if ((i + 1) % 10 == 0) + std::cout << separator; + else + std::cout << (i + 1) % 10; + } + std::cout << separator << std::endl << separator << std::flush; + return tasks; + } + void GridBase::summary(json& all_results, json& tasks, struct ConfigMPI& config_mpi) + { + // Report the tasks done by each worker, showing dataset number, seed, fold and time spent + // The format I want to show is: + // worker, dataset, seed, fold, time + // with headers + std::cout << Colors::RESET() << "* Summary of tasks done by each worker" << std::endl; + json worker_tasks = json::array(); + for (int i = 0; i < config_mpi.n_procs; ++i) { + worker_tasks.push_back(json::array()); + } + int max_dataset = 7; + for (const auto& [key, results] : all_results.items()) { + auto dataset = key; + if (dataset.size() > max_dataset) + max_dataset = dataset.size(); + for (const auto& result : results) { + int n_task = result["task"].get(); + json task = tasks[n_task]; + auto seed = task["seed"].get(); + auto fold = task["fold"].get(); + auto time = result["time"].get(); + auto worker = result["process"].get(); + json line = { + { "dataset", dataset }, + { "seed", seed }, + { "fold", fold }, + { "time", time } + }; + worker_tasks[worker].push_back(line); + } + } + std::cout << Colors::MAGENTA() << " W " << setw(max_dataset) << std::left << "Dataset"; + std::cout << " Seed Fold Time" << std::endl; + std::cout << "=== " << std::string(max_dataset, '=') << " ==== ==== " << std::string(15, '=') << std::endl; + for (int worker = 0; worker < config_mpi.n_procs; ++worker) { + auto color = (worker % 2) ? Colors::CYAN() : Colors::BLUE(); + std::cout << color << std::right << setw(3) << worker << " "; + if (worker == config_mpi.manager) { + std::cout << "Manager" << std::endl; + continue; + } + if (worker_tasks[worker].empty()) { + std::cout << "No tasks" << std::endl; + continue; + } + bool first = true; + double total = 0.0; + int num_tasks = 0; + for (const auto& task : worker_tasks[worker]) { + num_tasks++; + if (!first) + std::cout << std::string(4, ' '); + else + first = false; + std::cout << std::left << setw(max_dataset) << task["dataset"].get(); + std::cout << " " << setw(4) << std::right << task["seed"].get(); + std::cout << " " << setw(4) << task["fold"].get(); + std::cout << " " << setw(15) << std::setprecision(7) << std::fixed << task["time"].get() << std::endl; + total += task["time"].get(); + } + if (num_tasks > 1) { + std::cout << Colors::MAGENTA() << setw(3) << std::right << num_tasks; + std::cout << setw(max_dataset) << " Total..." << std::string(10, '.'); + std::cout << setw(15) << std::setprecision(7) << std::fixed << total << std::endl; + } + } + } + void GridBase::go(struct ConfigMPI& config_mpi) + { + /* + * Each task is a json object with the following structure: + * { + * "dataset": "dataset_name", + * "idx_dataset": idx_dataset, // used to identify the dataset in the results + * // this index is relative to the list of used datasets in the actual run not to the whole datasets list + * "seed": # of seed to use, + * "fold": # of fold to process + * } + * + * This way a task consists in process all combinations of hyperparameters for a dataset, seed and fold + * + * The overall process consists in these steps: + * 0. Create the MPI result type & tasks + * 0.1 Create the MPI result type + * 0.2 Manager creates the tasks + * 1. Manager will broadcast the tasks to all the processes + * 1.1 Broadcast the number of tasks + * 1.2 Broadcast the length of the following string + * 1.2 Broadcast the tasks as a char* string + * 2a. Producer delivers the tasks to the consumers + * 2a.1 Producer will loop to send all the tasks to the consumers and receive the results + * 2a.2 Producer will send the end message to all the consumers + * 2b. Consumers process the tasks and send the results to the producer + * 2b.1 Consumers announce to the producer that they are ready to receive a task + * 2b.2 Consumers receive the task from the producer and process it + * 2b.3 Consumers send the result to the producer + * 3. Manager select the bests scores for each dataset + * 3.1 Loop thru all the results obtained from each outer fold (task) and select the best + * 3.2 Save the results + * 3.3 Summary of jobs done + */ + // + // 0.1 Create the MPI result type + // + Task_Result result; + int tasks_size; + MPI_Datatype MPI_Result; + MPI_Datatype type[10] = { MPI_UNSIGNED, MPI_UNSIGNED, MPI_INT, MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE, MPI_INT, MPI_INT }; + int blocklen[10] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; + MPI_Aint disp[10]; + disp[0] = offsetof(Task_Result, idx_dataset); + disp[1] = offsetof(Task_Result, idx_combination); + disp[2] = offsetof(Task_Result, n_fold); + disp[3] = offsetof(Task_Result, score); + disp[4] = offsetof(Task_Result, time); + disp[5] = offsetof(Task_Result, nodes); + disp[6] = offsetof(Task_Result, leaves); + disp[7] = offsetof(Task_Result, depth); + disp[8] = offsetof(Task_Result, process); + disp[9] = offsetof(Task_Result, task); + MPI_Type_create_struct(10, blocklen, disp, type, &MPI_Result); + MPI_Type_commit(&MPI_Result); + // + // 0.2 Manager creates the tasks + // + char* msg; + json tasks; + if (config_mpi.rank == config_mpi.manager) { + timer.start(); + tasks = build_tasks(); + auto tasks_str = tasks.dump(); + tasks_size = tasks_str.size(); + msg = new char[tasks_size + 1]; + strcpy(msg, tasks_str.c_str()); + } + // + // 1. Manager will broadcast the tasks to all the processes + // + MPI_Bcast(&tasks_size, 1, MPI_INT, config_mpi.manager, MPI_COMM_WORLD); + if (config_mpi.rank != config_mpi.manager) { + msg = new char[tasks_size + 1]; + } + MPI_Bcast(msg, tasks_size + 1, MPI_CHAR, config_mpi.manager, MPI_COMM_WORLD); + tasks = json::parse(msg); + delete[] msg; + auto env = platform::DotEnv(); + auto datasets = Datasets(config.discretize, Paths::datasets(), env.get("discretize_algo")); + + if (config_mpi.rank == config_mpi.manager) { + // + // 2a. Producer delivers the tasks to the consumers + // + auto datasets_names = filterDatasets(datasets); + json all_results = producer(datasets_names, tasks, config_mpi, MPI_Result); + std::cout << separator << std::endl; + // + // 3. Manager select the bests sccores for each dataset + // + auto results = initializeResults(); + select_best_results_folds(results, all_results, config.model); + // + // 3.2 Save the results + // + save(results); + // + // 3.3 Summary of jobs done + // + if (!config.quiet) + summary(all_results, tasks, config_mpi); + } else { + // + // 2b. Consumers process the tasks and send the results to the producer + // + consumer(datasets, tasks, config, config_mpi, MPI_Result); + } + } } \ No newline at end of file diff --git a/src/grid/GridBase.h b/src/grid/GridBase.h index 70f519a..9b29196 100644 --- a/src/grid/GridBase.h +++ b/src/grid/GridBase.h @@ -19,23 +19,23 @@ namespace platform { public: explicit GridBase(struct ConfigGrid& config); ~GridBase() = default; + void go(struct ConfigMPI& config_mpi); protected: - virtual json build_tasks() = 0; virtual void save(json& results) = 0; + virtual std::vector filterDatasets(Datasets& datasets) const = 0; + virtual json initializeResults() = 0; + virtual json producer(std::vector& names, json& tasks, struct ConfigMPI& config_mpi, MPI_Datatype& MPI_Result) = 0; + virtual void consumer(Datasets& datasets, json& tasks, struct ConfigGrid& config, struct ConfigMPI& config_mpi, MPI_Datatype& MPI_Result) = 0; + virtual void select_best_results_folds(json& results, json& all_results, std::string& model) = 0; + virtual json store_result(std::vector& names, Task_Result& result, json& results) = 0; + virtual void consumer_go(struct ConfigGrid& config, struct ConfigMPI& config_mpi, json& tasks, int n_task, Datasets& datasets, Task_Result* result) = 0; + std::string get_color_rank(int rank); + json build_tasks(); + void summary(json& all_results, json& tasks, struct ConfigMPI& config_mpi); struct ConfigGrid config; Timer timer; // used to measure the time of the whole process const std::string separator = "|"; bayesnet::Smoothing_t smooth_type{ bayesnet::Smoothing_t::NONE }; }; - class MPI_Base { - public: - static std::string get_color_rank(int rank) - { - auto colors = { Colors::WHITE(), Colors::RED(), Colors::GREEN(), Colors::BLUE(), Colors::MAGENTA(), Colors::CYAN(), Colors::YELLOW(), Colors::BLACK() }; - std::string id = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; - auto idx = rank % id.size(); - return *(colors.begin() + rank % colors.size()) + id[idx]; - } - }; } /* namespace platform */ #endif \ No newline at end of file diff --git a/src/grid/GridConfig.h b/src/grid/GridConfig.h index ee9065e..dbd8675 100644 --- a/src/grid/GridConfig.h +++ b/src/grid/GridConfig.h @@ -38,8 +38,13 @@ namespace platform { uint idx_dataset; uint idx_combination; int n_fold; - double score; - double time; + double score; // Experiment: Score test, no score train in this case + double time; // Experiment: Time train+test, no time train and/or time test in this case + double nodes; // Experiment specific + double leaves; // Experiment specific + double depth; // Experiment specific + int process; + int task; } Task_Result; const int TAG_QUERY = 1; const int TAG_RESULT = 2; diff --git a/src/grid/GridExperiment.cpp b/src/grid/GridExperiment.cpp index 180ae71..7d1df34 100644 --- a/src/grid/GridExperiment.cpp +++ b/src/grid/GridExperiment.cpp @@ -4,12 +4,10 @@ #include #include "main/Models.h" #include "common/Paths.h" -#include "common/Colors.h" #include "common/Utils.h" #include "GridExperiment.h" namespace platform { - GridExperiment::GridExperiment(struct ConfigGrid& config) : GridBase(config) { } @@ -21,56 +19,59 @@ namespace platform { } return json(); } - json GridExperiment::build_tasks() + std::vector GridExperiment::filterDatasets(Datasets& datasets) const { - /* - * Each task is a json object with the following structure: - * { - * "dataset": "dataset_name", - * "idx_dataset": idx_dataset, // used to identify the dataset in the results - * // this index is relative to the list of used datasets in the actual run not to the whole datasets list - * "seed": # of seed to use, - * "fold": # of fold to process - * } - */ - auto tasks = json::array(); - auto grid = GridData(Paths::grid_input(config.model)); - auto datasets = Datasets(false, Paths::datasets()); - auto all_datasets = datasets.getNames(); - auto datasets_names = all_datasets; - for (int idx_dataset = 0; idx_dataset < datasets_names.size(); ++idx_dataset) { - auto dataset = datasets_names[idx_dataset]; - for (const auto& seed : config.seeds) { - auto combinations = grid.getGrid(dataset); - for (int n_fold = 0; n_fold < config.n_folds; n_fold++) { - json task = { - { "dataset", dataset }, - { "idx_dataset", idx_dataset}, - { "seed", seed }, - { "fold", n_fold}, - }; - tasks.push_back(task); + // Load datasets + auto datasets_names = datasets.getNames(); + if (config.continue_from != NO_CONTINUE()) { + // Continue previous execution: + if (std::find(datasets_names.begin(), datasets_names.end(), config.continue_from) == datasets_names.end()) { + throw std::invalid_argument("Dataset " + config.continue_from + " not found"); + } + // Remove datasets already processed + std::vector::iterator it = datasets_names.begin(); + while (it != datasets_names.end()) { + if (*it != config.continue_from) { + it = datasets_names.erase(it); + } else { + if (config.only) + ++it; + else + break; } } } - // Shuffle the array so heavy datasets are eas ier spread across the workers - std::mt19937 g{ 271 }; // Use fixed seed to obtain the same shuffle - std::shuffle(tasks.begin(), tasks.end(), g); - std::cout << "* Number of tasks: " << tasks.size() << std::endl; - std::cout << separator << std::flush; - for (int i = 0; i < tasks.size(); ++i) { - if ((i + 1) % 10 == 0) - std::cout << separator; - else - std::cout << (i + 1) % 10; + // Exclude datasets + for (const auto& name : config.excluded) { + auto dataset = name.get(); + auto it = std::find(datasets_names.begin(), datasets_names.end(), dataset); + if (it == datasets_names.end()) { + throw std::invalid_argument("Dataset " + dataset + " already excluded or doesn't exist!"); + } + datasets_names.erase(it); } - std::cout << separator << std::endl << separator << std::flush; - return tasks; + return datasets_names; } json GridExperiment::initializeResults() { // Load previous results if continue is set json results; + if (config.continue_from != NO_CONTINUE()) { + if (!config.quiet) + std::cout << Colors::RESET() << "* Loading previous results" << std::endl; + try { + std::ifstream file(Paths::grid_output(config.model)); + if (file.is_open()) { + results = json::parse(file); + results = results["results"]; + } + } + catch (const std::exception& e) { + std::cerr << "* There were no previous results" << std::endl; + std::cerr << "* Initizalizing new results" << std::endl; + results = json(); + } + } return results; } void GridExperiment::save(json& results) @@ -92,5 +93,226 @@ namespace platform { }; file << output.dump(4); } + // + // + // + json GridExperiment::producer(std::vector& names, json& tasks, struct ConfigMPI& config_mpi, MPI_Datatype& MPI_Result) + { + Task_Result result; + json results; + int num_tasks = tasks.size(); + // + // 2a.1 Producer will loop to send all the tasks to the consumers and receive the results + // + for (int i = 0; i < num_tasks; ++i) { + MPI_Status status; + MPI_Recv(&result, 1, MPI_Result, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); + if (status.MPI_TAG == TAG_RESULT) { + //Store result + store_result(names, result, results); + } + MPI_Send(&i, 1, MPI_INT, status.MPI_SOURCE, TAG_TASK, MPI_COMM_WORLD); + } + // + // 2a.2 Producer will send the end message to all the consumers + // + for (int i = 0; i < config_mpi.n_procs - 1; ++i) { + MPI_Status status; + MPI_Recv(&result, 1, MPI_Result, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); + if (status.MPI_TAG == TAG_RESULT) { + //Store result + store_result(names, result, results); + } + MPI_Send(&i, 1, MPI_INT, status.MPI_SOURCE, TAG_END, MPI_COMM_WORLD); + } + return results; + } + void GridExperiment::consumer(Datasets& datasets, json& tasks, struct ConfigGrid& config, struct ConfigMPI& config_mpi, MPI_Datatype& MPI_Result) + { + Task_Result result; + // + // 2b.1 Consumers announce to the producer that they are ready to receive a task + // + MPI_Send(&result, 1, MPI_Result, config_mpi.manager, TAG_QUERY, MPI_COMM_WORLD); + int task; + while (true) { + MPI_Status status; + // + // 2b.2 Consumers receive the task from the producer and process it + // + MPI_Recv(&task, 1, MPI_INT, config_mpi.manager, MPI_ANY_TAG, MPI_COMM_WORLD, &status); + if (status.MPI_TAG == TAG_END) { + break; + } + consumer_go(config, config_mpi, tasks, task, datasets, &result); + // + // 2b.3 Consumers send the result to the producer + // + MPI_Send(&result, 1, MPI_Result, config_mpi.manager, TAG_RESULT, MPI_COMM_WORLD); + } + } + void GridExperiment::select_best_results_folds(json& results, json& all_results, std::string& model) + { + Timer timer; + auto grid = GridData(Paths::grid_input(model)); + // + // Select the best result of the computed outer folds + // + for (const auto& result : all_results.items()) { + // each result has the results of all the outer folds as each one were a different task + double best_score = 0.0; + json best; + for (const auto& result_fold : result.value()) { + double score = result_fold["score"].get(); + if (score > best_score) { + best_score = score; + best = result_fold; + } + } + auto dataset = result.key(); + auto combinations = grid.getGrid(dataset); + json json_best = { + { "score", best_score }, + { "hyperparameters", combinations[best["combination"].get()] }, + { "date", get_date() + " " + get_time() }, + { "grid", grid.getInputGrid(dataset) }, + { "duration", timer.translate2String(best["time"].get()) } + }; + results[dataset] = json_best; + } + } + json GridExperiment::store_result(std::vector& names, Task_Result& result, json& results) + { + json json_result = { + { "score", result.score }, + { "combination", result.idx_combination }, + { "fold", result.n_fold }, + { "time", result.time }, + { "dataset", result.idx_dataset }, + { "process", result.process }, + { "task", result.task } + }; + auto name = names[result.idx_dataset]; + if (!results.contains(name)) { + results[name] = json::array(); + } + results[name].push_back(json_result); + return results; + } + void GridExperiment::consumer_go(struct ConfigGrid& config, struct ConfigMPI& config_mpi, json& tasks, int n_task, Datasets& datasets, Task_Result* result) + { + // + // initialize + // + Timer timer; + timer.start(); + json task = tasks[n_task]; + auto model = config.model; + auto grid = GridData(Paths::grid_input(model)); + auto dataset_name = task["dataset"].get(); + auto idx_dataset = task["idx_dataset"].get(); + auto seed = task["seed"].get(); + auto n_fold = task["fold"].get(); + bool stratified = config.stratified; + bayesnet::Smoothing_t smooth; + if (config.smooth_strategy == "ORIGINAL") + smooth = bayesnet::Smoothing_t::ORIGINAL; + else if (config.smooth_strategy == "LAPLACE") + smooth = bayesnet::Smoothing_t::LAPLACE; + else if (config.smooth_strategy == "CESTNIK") + smooth = bayesnet::Smoothing_t::CESTNIK; + // + // Generate the hyperparameters combinations + // + auto& dataset = datasets.getDataset(dataset_name); + auto combinations = grid.getGrid(dataset_name); + dataset.load(); + auto [X, y] = dataset.getTensors(); + auto features = dataset.getFeatures(); + auto className = dataset.getClassName(); + // + // Start working on task + // + folding::Fold* fold; + if (stratified) + fold = new folding::StratifiedKFold(config.n_folds, y, seed); + else + fold = new folding::KFold(config.n_folds, y.size(0), seed); + auto [train, test] = fold->getFold(n_fold); + auto [X_train, X_test, y_train, y_test] = dataset.getTrainTestTensors(train, test); + auto states = dataset.getStates(); // Get the states of the features Once they are discretized + float best_fold_score = 0.0; + int best_idx_combination = -1; + json best_fold_hyper; + for (int idx_combination = 0; idx_combination < combinations.size(); ++idx_combination) { + auto hyperparam_line = combinations[idx_combination]; + auto hyperparameters = platform::HyperParameters(datasets.getNames(), hyperparam_line); + folding::Fold* nested_fold; + if (config.stratified) + nested_fold = new folding::StratifiedKFold(config.nested, y_train, seed); + else + nested_fold = new folding::KFold(config.nested, y_train.size(0), seed); + double score = 0.0; + for (int n_nested_fold = 0; n_nested_fold < config.nested; n_nested_fold++) { + // + // Nested level fold + // + auto [train_nested, test_nested] = nested_fold->getFold(n_nested_fold); + auto train_nested_t = torch::tensor(train_nested); + auto test_nested_t = torch::tensor(test_nested); + auto X_nested_train = X_train.index({ "...", train_nested_t }); + auto y_nested_train = y_train.index({ train_nested_t }); + auto X_nested_test = X_train.index({ "...", test_nested_t }); + auto y_nested_test = y_train.index({ test_nested_t }); + // + // Build Classifier with selected hyperparameters + // + auto clf = Models::instance()->create(config.model); + auto valid = clf->getValidHyperparameters(); + hyperparameters.check(valid, dataset_name); + clf->setHyperparameters(hyperparameters.get(dataset_name)); + // + // Train model + // + clf->fit(X_nested_train, y_nested_train, features, className, states, smooth); + // + // Test model + // + score += clf->score(X_nested_test, y_nested_test); + } + delete nested_fold; + score /= config.nested; + if (score > best_fold_score) { + best_fold_score = score; + best_idx_combination = idx_combination; + best_fold_hyper = hyperparam_line; + } + } + delete fold; + // + // Build Classifier with the best hyperparameters to obtain the best score + // + auto hyperparameters = platform::HyperParameters(datasets.getNames(), best_fold_hyper); + auto clf = Models::instance()->create(config.model); + auto valid = clf->getValidHyperparameters(); + hyperparameters.check(valid, dataset_name); + clf->setHyperparameters(best_fold_hyper); + clf->fit(X_train, y_train, features, className, states, smooth); + best_fold_score = clf->score(X_test, y_test); + // + // Return the result + // + result->idx_dataset = task["idx_dataset"].get(); + result->idx_combination = best_idx_combination; + result->score = best_fold_score; + result->n_fold = n_fold; + result->time = timer.getDuration(); + result->process = config_mpi.rank; + result->task = n_task; + // + // Update progress bar + // + std::cout << get_color_rank(config_mpi.rank) << std::flush; + } } /* namespace platform */ \ No newline at end of file diff --git a/src/grid/GridExperiment.h b/src/grid/GridExperiment.h index 556152b..61efe10 100644 --- a/src/grid/GridExperiment.h +++ b/src/grid/GridExperiment.h @@ -18,234 +18,16 @@ namespace platform { explicit GridExperiment(struct ConfigGrid& config); ~GridExperiment() = default; json loadResults(); - void go(struct ConfigMPI& config_mpi); + static inline std::string NO_CONTINUE() { return "NO_CONTINUE"; } private: void save(json& results); json initializeResults(); - json build_tasks(); - }; - /* ************************************************************************************************************* - // - // MPI Experiment Functions - // - ************************************************************************************************************* */ - class MPI_EXPERIMENT :public MPI_Base { - public: - static json producer(std::vector& names, json& tasks, struct ConfigMPI& config_mpi, MPI_Datatype& MPI_Result) - { - Task_Result result; - json results; - int num_tasks = tasks.size(); - - // - // 2a.1 Producer will loop to send all the tasks to the consumers and receive the results - // - for (int i = 0; i < num_tasks; ++i) { - MPI_Status status; - MPI_Recv(&result, 1, MPI_Result, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); - if (status.MPI_TAG == TAG_RESULT) { - //Store result - store_result(names, result, results); - } - MPI_Send(&i, 1, MPI_INT, status.MPI_SOURCE, TAG_TASK, MPI_COMM_WORLD); - } - // - // 2a.2 Producer will send the end message to all the consumers - // - for (int i = 0; i < config_mpi.n_procs - 1; ++i) { - MPI_Status status; - MPI_Recv(&result, 1, MPI_Result, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); - if (status.MPI_TAG == TAG_RESULT) { - //Store result - store_result(names, result, results); - } - MPI_Send(&i, 1, MPI_INT, status.MPI_SOURCE, TAG_END, MPI_COMM_WORLD); - } - return results; - } - static void consumer(Datasets& datasets, json& tasks, struct ConfigGrid& config, struct ConfigMPI& config_mpi, MPI_Datatype& MPI_Result) - { - Task_Result result; - // - // 2b.1 Consumers announce to the producer that they are ready to receive a task - // - MPI_Send(&result, 1, MPI_Result, config_mpi.manager, TAG_QUERY, MPI_COMM_WORLD); - int task; - while (true) { - MPI_Status status; - // - // 2b.2 Consumers receive the task from the producer and process it - // - MPI_Recv(&task, 1, MPI_INT, config_mpi.manager, MPI_ANY_TAG, MPI_COMM_WORLD, &status); - if (status.MPI_TAG == TAG_END) { - break; - } - consumer_go(config, config_mpi, tasks, task, datasets, &result); - // - // 2b.3 Consumers send the result to the producer - // - MPI_Send(&result, 1, MPI_Result, config_mpi.manager, TAG_RESULT, MPI_COMM_WORLD); - } - } - static void select_best_results_folds(json& results, json& all_results, std::string& model) - { - Timer timer; - auto grid = GridData(Paths::grid_input(model)); - // - // Select the best result of the computed outer folds - // - for (const auto& result : all_results.items()) { - // each result has the results of all the outer folds as each one were a different task - double best_score = 0.0; - json best; - for (const auto& result_fold : result.value()) { - double score = result_fold["score"].get(); - if (score > best_score) { - best_score = score; - best = result_fold; - } - } - auto dataset = result.key(); - auto combinations = grid.getGrid(dataset); - json json_best = { - { "score", best_score }, - { "hyperparameters", combinations[best["combination"].get()] }, - { "date", get_date() + " " + get_time() }, - { "grid", grid.getInputGrid(dataset) }, - { "duration", timer.translate2String(best["time"].get()) } - }; - results[dataset] = json_best; - } - } - static json store_result(std::vector& names, Task_Result& result, json& results) - { - json json_result = { - { "score", result.score }, - { "combination", result.idx_combination }, - { "fold", result.n_fold }, - { "time", result.time }, - { "dataset", result.idx_dataset } - }; - auto name = names[result.idx_dataset]; - if (!results.contains(name)) { - results[name] = json::array(); - } - results[name].push_back(json_result); - return results; - } - static void consumer_go(struct ConfigGrid& config, struct ConfigMPI& config_mpi, json& tasks, int n_task, Datasets& datasets, Task_Result* result) - { - // - // initialize - // - Timer timer; - timer.start(); - json task = tasks[n_task]; - auto model = config.model; - auto grid = GridData(Paths::grid_input(model)); - auto dataset_name = task["dataset"].get(); - auto idx_dataset = task["idx_dataset"].get(); - auto seed = task["seed"].get(); - auto n_fold = task["fold"].get(); - bool stratified = config.stratified; - bayesnet::Smoothing_t smooth; - if (config.smooth_strategy == "ORIGINAL") - smooth = bayesnet::Smoothing_t::ORIGINAL; - else if (config.smooth_strategy == "LAPLACE") - smooth = bayesnet::Smoothing_t::LAPLACE; - else if (config.smooth_strategy == "CESTNIK") - smooth = bayesnet::Smoothing_t::CESTNIK; - // - // Generate the hyperparameters combinations - // - auto& dataset = datasets.getDataset(dataset_name); - auto combinations = grid.getGrid(dataset_name); - dataset.load(); - auto [X, y] = dataset.getTensors(); - auto features = dataset.getFeatures(); - auto className = dataset.getClassName(); - // - // Start working on task - // - folding::Fold* fold; - if (stratified) - fold = new folding::StratifiedKFold(config.n_folds, y, seed); - else - fold = new folding::KFold(config.n_folds, y.size(0), seed); - auto [train, test] = fold->getFold(n_fold); - auto [X_train, X_test, y_train, y_test] = dataset.getTrainTestTensors(train, test); - auto states = dataset.getStates(); // Get the states of the features Once they are discretized - float best_fold_score = 0.0; - int best_idx_combination = -1; - json best_fold_hyper; - for (int idx_combination = 0; idx_combination < combinations.size(); ++idx_combination) { - auto hyperparam_line = combinations[idx_combination]; - auto hyperparameters = platform::HyperParameters(datasets.getNames(), hyperparam_line); - folding::Fold* nested_fold; - if (config.stratified) - nested_fold = new folding::StratifiedKFold(config.nested, y_train, seed); - else - nested_fold = new folding::KFold(config.nested, y_train.size(0), seed); - double score = 0.0; - for (int n_nested_fold = 0; n_nested_fold < config.nested; n_nested_fold++) { - // - // Nested level fold - // - auto [train_nested, test_nested] = nested_fold->getFold(n_nested_fold); - auto train_nested_t = torch::tensor(train_nested); - auto test_nested_t = torch::tensor(test_nested); - auto X_nested_train = X_train.index({ "...", train_nested_t }); - auto y_nested_train = y_train.index({ train_nested_t }); - auto X_nested_test = X_train.index({ "...", test_nested_t }); - auto y_nested_test = y_train.index({ test_nested_t }); - // - // Build Classifier with selected hyperparameters - // - auto clf = Models::instance()->create(config.model); - auto valid = clf->getValidHyperparameters(); - hyperparameters.check(valid, dataset_name); - clf->setHyperparameters(hyperparameters.get(dataset_name)); - // - // Train model - // - clf->fit(X_nested_train, y_nested_train, features, className, states, smooth); - // - // Test model - // - score += clf->score(X_nested_test, y_nested_test); - } - delete nested_fold; - score /= config.nested; - if (score > best_fold_score) { - best_fold_score = score; - best_idx_combination = idx_combination; - best_fold_hyper = hyperparam_line; - } - } - delete fold; - // - // Build Classifier with the best hyperparameters to obtain the best score - // - auto hyperparameters = platform::HyperParameters(datasets.getNames(), best_fold_hyper); - auto clf = Models::instance()->create(config.model); - auto valid = clf->getValidHyperparameters(); - hyperparameters.check(valid, dataset_name); - clf->setHyperparameters(best_fold_hyper); - clf->fit(X_train, y_train, features, className, states, smooth); - best_fold_score = clf->score(X_test, y_test); - // - // Return the result - // - result->idx_dataset = task["idx_dataset"].get(); - result->idx_combination = best_idx_combination; - result->score = best_fold_score; - result->n_fold = n_fold; - result->time = timer.getDuration(); - // - // Update progress bar - // - std::cout << get_color_rank(config_mpi.rank) << std::flush; - } + std::vector filterDatasets(Datasets& datasets) const; + json producer(std::vector& names, json& tasks, struct ConfigMPI& config_mpi, MPI_Datatype& MPI_Result); + void consumer(Datasets& datasets, json& tasks, struct ConfigGrid& config, struct ConfigMPI& config_mpi, MPI_Datatype& MPI_Result); + void select_best_results_folds(json& results, json& all_results, std::string& model); + json store_result(std::vector& names, Task_Result& result, json& results); + void consumer_go(struct ConfigGrid& config, struct ConfigMPI& config_mpi, json& tasks, int n_task, Datasets& datasets, Task_Result* result); }; } /* namespace platform */ #endif \ No newline at end of file diff --git a/src/grid/GridSearch.cpp b/src/grid/GridSearch.cpp index e6b7b74..cb71154 100644 --- a/src/grid/GridSearch.cpp +++ b/src/grid/GridSearch.cpp @@ -52,157 +52,13 @@ namespace platform { } return datasets_names; } - json GridSearch::build_tasks() - { - /* - * Each task is a json object with the following structure: - * { - * "dataset": "dataset_name", - * "idx_dataset": idx_dataset, // used to identify the dataset in the results - * // this index is relative to the list of used datasets in the actual run not to the whole datasets list - * "seed": # of seed to use, - * "fold": # of fold to process - * } - */ - auto tasks = json::array(); - auto grid = GridData(Paths::grid_input(config.model)); - auto datasets = Datasets(false, Paths::datasets()); - auto all_datasets = datasets.getNames(); - auto datasets_names = filterDatasets(datasets); - for (int idx_dataset = 0; idx_dataset < datasets_names.size(); ++idx_dataset) { - auto dataset = datasets_names[idx_dataset]; - for (const auto& seed : config.seeds) { - auto combinations = grid.getGrid(dataset); - for (int n_fold = 0; n_fold < config.n_folds; n_fold++) { - json task = { - { "dataset", dataset }, - { "idx_dataset", idx_dataset}, - { "seed", seed }, - { "fold", n_fold}, - }; - tasks.push_back(task); - } - } - } - // Shuffle the array so heavy datasets are eas ier spread across the workers - std::mt19937 g{ 271 }; // Use fixed seed to obtain the same shuffle - std::shuffle(tasks.begin(), tasks.end(), g); - std::cout << "* Number of tasks: " << tasks.size() << std::endl; - std::cout << separator << std::flush; - for (int i = 0; i < tasks.size(); ++i) { - if ((i + 1) % 10 == 0) - std::cout << separator; - else - std::cout << (i + 1) % 10; - } - std::cout << separator << std::endl << separator << std::flush; - return tasks; - } - void GridSearch::go(struct ConfigMPI& config_mpi) - { - /* - * Each task is a json object with the following structure: - * { - * "dataset": "dataset_name", - * "idx_dataset": idx_dataset, // used to identify the dataset in the results - * // this index is relative to the list of used datasets in the actual run not to the whole datasets list - * "seed": # of seed to use, - * "fold": # of fold to process - * } - * - * This way a task consists in process all combinations of hyperparameters for a dataset, seed and fold - * - * The overall process consists in these steps: - * 0. Create the MPI result type & tasks - * 0.1 Create the MPI result type - * 0.2 Manager creates the tasks - * 1. Manager will broadcast the tasks to all the processes - * 1.1 Broadcast the number of tasks - * 1.2 Broadcast the length of the following string - * 1.2 Broadcast the tasks as a char* string - * 2a. Producer delivers the tasks to the consumers - * 2a.1 Producer will loop to send all the tasks to the consumers and receive the results - * 2a.2 Producer will send the end message to all the consumers - * 2b. Consumers process the tasks and send the results to the producer - * 2b.1 Consumers announce to the producer that they are ready to receive a task - * 2b.2 Consumers receive the task from the producer and process it - * 2b.3 Consumers send the result to the producer - * 3. Manager select the bests scores for each dataset - * 3.1 Loop thru all the results obtained from each outer fold (task) and select the best - * 3.2 Save the results - */ - // - // 0.1 Create the MPI result type - // - Task_Result result; - int tasks_size; - MPI_Datatype MPI_Result; - MPI_Datatype type[5] = { MPI_UNSIGNED, MPI_UNSIGNED, MPI_INT, MPI_DOUBLE, MPI_DOUBLE }; - int blocklen[5] = { 1, 1, 1, 1, 1 }; - MPI_Aint disp[5]; - disp[0] = offsetof(Task_Result, idx_dataset); - disp[1] = offsetof(Task_Result, idx_combination); - disp[2] = offsetof(Task_Result, n_fold); - disp[3] = offsetof(Task_Result, score); - disp[4] = offsetof(Task_Result, time); - MPI_Type_create_struct(5, blocklen, disp, type, &MPI_Result); - MPI_Type_commit(&MPI_Result); - // - // 0.2 Manager creates the tasks - // - char* msg; - json tasks; - if (config_mpi.rank == config_mpi.manager) { - timer.start(); - tasks = build_tasks(); - auto tasks_str = tasks.dump(); - tasks_size = tasks_str.size(); - msg = new char[tasks_size + 1]; - strcpy(msg, tasks_str.c_str()); - } - // - // 1. Manager will broadcast the tasks to all the processes - // - MPI_Bcast(&tasks_size, 1, MPI_INT, config_mpi.manager, MPI_COMM_WORLD); - if (config_mpi.rank != config_mpi.manager) { - msg = new char[tasks_size + 1]; - } - MPI_Bcast(msg, tasks_size + 1, MPI_CHAR, config_mpi.manager, MPI_COMM_WORLD); - tasks = json::parse(msg); - delete[] msg; - auto env = platform::DotEnv(); - auto datasets = Datasets(config.discretize, Paths::datasets(), env.get("discretize_algo")); - - if (config_mpi.rank == config_mpi.manager) { - // - // 2a. Producer delivers the tasks to the consumers - // - auto datasets_names = filterDatasets(datasets); - json all_results = MPI_SEARCH::producer(datasets_names, tasks, config_mpi, MPI_Result); - std::cout << separator << std::endl; - // - // 3. Manager select the bests sccores for each dataset - // - auto results = initializeResults(); - MPI_SEARCH::select_best_results_folds(results, all_results, config.model); - // - // 3.2 Save the results - // - save(results); - } else { - // - // 2b. Consumers process the tasks and send the results to the producer - // - MPI_SEARCH::consumer(datasets, tasks, config, config_mpi, MPI_Result); - } - } json GridSearch::initializeResults() { // Load previous results if continue is set json results; if (config.continue_from != NO_CONTINUE()) { if (!config.quiet) - std::cout << "* Loading previous results" << std::endl; + std::cout << Colors::RESET() << "* Loading previous results" << std::endl; try { std::ifstream file(Paths::grid_output(config.model)); if (file.is_open()) { @@ -237,4 +93,226 @@ namespace platform { }; file << output.dump(4); } + // + // + // + json GridSearch::producer(std::vector& names, json& tasks, struct ConfigMPI& config_mpi, MPI_Datatype& MPI_Result) + { + Task_Result result; + json results; + int num_tasks = tasks.size(); + // + // 2a.1 Producer will loop to send all the tasks to the consumers and receive the results + // + for (int i = 0; i < num_tasks; ++i) { + MPI_Status status; + MPI_Recv(&result, 1, MPI_Result, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); + if (status.MPI_TAG == TAG_RESULT) { + //Store result + store_result(names, result, results); + + } + MPI_Send(&i, 1, MPI_INT, status.MPI_SOURCE, TAG_TASK, MPI_COMM_WORLD); + } + // + // 2a.2 Producer will send the end message to all the consumers + // + for (int i = 0; i < config_mpi.n_procs - 1; ++i) { + MPI_Status status; + MPI_Recv(&result, 1, MPI_Result, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); + if (status.MPI_TAG == TAG_RESULT) { + //Store result + store_result(names, result, results); + } + MPI_Send(&i, 1, MPI_INT, status.MPI_SOURCE, TAG_END, MPI_COMM_WORLD); + } + return results; + } + void GridSearch::consumer(Datasets& datasets, json& tasks, struct ConfigGrid& config, struct ConfigMPI& config_mpi, MPI_Datatype& MPI_Result) + { + Task_Result result; + // + // 2b.1 Consumers announce to the producer that they are ready to receive a task + // + MPI_Send(&result, 1, MPI_Result, config_mpi.manager, TAG_QUERY, MPI_COMM_WORLD); + int task; + while (true) { + MPI_Status status; + // + // 2b.2 Consumers receive the task from the producer and process it + // + MPI_Recv(&task, 1, MPI_INT, config_mpi.manager, MPI_ANY_TAG, MPI_COMM_WORLD, &status); + if (status.MPI_TAG == TAG_END) { + break; + } + consumer_go(config, config_mpi, tasks, task, datasets, &result); + // + // 2b.3 Consumers send the result to the producer + // + MPI_Send(&result, 1, MPI_Result, config_mpi.manager, TAG_RESULT, MPI_COMM_WORLD); + } + } + void GridSearch::select_best_results_folds(json& results, json& all_results, std::string& model) + { + Timer timer; + auto grid = GridData(Paths::grid_input(model)); + // + // Select the best result of the computed outer folds + // + for (const auto& result : all_results.items()) { + // each result has the results of all the outer folds as each one were a different task + double best_score = 0.0; + json best; + for (const auto& result_fold : result.value()) { + double score = result_fold["score"].get(); + if (score > best_score) { + best_score = score; + best = result_fold; + } + } + auto dataset = result.key(); + auto combinations = grid.getGrid(dataset); + json json_best = { + { "score", best_score }, + { "hyperparameters", combinations[best["combination"].get()] }, + { "date", get_date() + " " + get_time() }, + { "grid", grid.getInputGrid(dataset) }, + { "duration", timer.translate2String(best["time"].get()) } + }; + results[dataset] = json_best; + } + } + json GridSearch::store_result(std::vector& names, Task_Result& result, json& results) + { + json json_result = { + { "score", result.score }, + { "combination", result.idx_combination }, + { "fold", result.n_fold }, + { "time", result.time }, + { "dataset", result.idx_dataset }, + { "process", result.process }, + { "task", result.task } + }; + auto name = names[result.idx_dataset]; + if (!results.contains(name)) { + results[name] = json::array(); + } + results[name].push_back(json_result); + return results; + } + void GridSearch::consumer_go(struct ConfigGrid& config, struct ConfigMPI& config_mpi, json& tasks, int n_task, Datasets& datasets, Task_Result* result) + { + // + // initialize + // + Timer timer; + timer.start(); + json task = tasks[n_task]; + auto model = config.model; + auto grid = GridData(Paths::grid_input(model)); + auto dataset_name = task["dataset"].get(); + auto idx_dataset = task["idx_dataset"].get(); + auto seed = task["seed"].get(); + auto n_fold = task["fold"].get(); + bool stratified = config.stratified; + bayesnet::Smoothing_t smooth; + if (config.smooth_strategy == "ORIGINAL") + smooth = bayesnet::Smoothing_t::ORIGINAL; + else if (config.smooth_strategy == "LAPLACE") + smooth = bayesnet::Smoothing_t::LAPLACE; + else if (config.smooth_strategy == "CESTNIK") + smooth = bayesnet::Smoothing_t::CESTNIK; + // + // Generate the hyperparameters combinations + // + auto& dataset = datasets.getDataset(dataset_name); + auto combinations = grid.getGrid(dataset_name); + dataset.load(); + auto [X, y] = dataset.getTensors(); + auto features = dataset.getFeatures(); + auto className = dataset.getClassName(); + // + // Start working on task + // + folding::Fold* fold; + if (stratified) + fold = new folding::StratifiedKFold(config.n_folds, y, seed); + else + fold = new folding::KFold(config.n_folds, y.size(0), seed); + auto [train, test] = fold->getFold(n_fold); + auto [X_train, X_test, y_train, y_test] = dataset.getTrainTestTensors(train, test); + auto states = dataset.getStates(); // Get the states of the features Once they are discretized + float best_fold_score = 0.0; + int best_idx_combination = -1; + json best_fold_hyper; + for (int idx_combination = 0; idx_combination < combinations.size(); ++idx_combination) { + auto hyperparam_line = combinations[idx_combination]; + auto hyperparameters = platform::HyperParameters(datasets.getNames(), hyperparam_line); + folding::Fold* nested_fold; + if (config.stratified) + nested_fold = new folding::StratifiedKFold(config.nested, y_train, seed); + else + nested_fold = new folding::KFold(config.nested, y_train.size(0), seed); + double score = 0.0; + for (int n_nested_fold = 0; n_nested_fold < config.nested; n_nested_fold++) { + // + // Nested level fold + // + auto [train_nested, test_nested] = nested_fold->getFold(n_nested_fold); + auto train_nested_t = torch::tensor(train_nested); + auto test_nested_t = torch::tensor(test_nested); + auto X_nested_train = X_train.index({ "...", train_nested_t }); + auto y_nested_train = y_train.index({ train_nested_t }); + auto X_nested_test = X_train.index({ "...", test_nested_t }); + auto y_nested_test = y_train.index({ test_nested_t }); + // + // Build Classifier with selected hyperparameters + // + auto clf = Models::instance()->create(config.model); + auto valid = clf->getValidHyperparameters(); + hyperparameters.check(valid, dataset_name); + clf->setHyperparameters(hyperparameters.get(dataset_name)); + // + // Train model + // + clf->fit(X_nested_train, y_nested_train, features, className, states, smooth); + // + // Test model + // + score += clf->score(X_nested_test, y_nested_test); + } + delete nested_fold; + score /= config.nested; + if (score > best_fold_score) { + best_fold_score = score; + best_idx_combination = idx_combination; + best_fold_hyper = hyperparam_line; + } + } + delete fold; + // + // Build Classifier with the best hyperparameters to obtain the best score + // + auto hyperparameters = platform::HyperParameters(datasets.getNames(), best_fold_hyper); + auto clf = Models::instance()->create(config.model); + auto valid = clf->getValidHyperparameters(); + hyperparameters.check(valid, dataset_name); + clf->setHyperparameters(best_fold_hyper); + clf->fit(X_train, y_train, features, className, states, smooth); + best_fold_score = clf->score(X_test, y_test); + // + // Return the result + // + result->idx_dataset = task["idx_dataset"].get(); + result->idx_combination = best_idx_combination; + result->score = best_fold_score; + result->n_fold = n_fold; + result->time = timer.getDuration(); + result->process = config_mpi.rank; + result->task = n_task; + // + // Update progress bar + // + std::cout << get_color_rank(config_mpi.rank) << std::flush; + } } /* namespace platform */ \ No newline at end of file diff --git a/src/grid/GridSearch.h b/src/grid/GridSearch.h index 6593ef9..f6ca4bc 100644 --- a/src/grid/GridSearch.h +++ b/src/grid/GridSearch.h @@ -21,235 +21,15 @@ namespace platform { ~GridSearch() = default; json loadResults(); static inline std::string NO_CONTINUE() { return "NO_CONTINUE"; } - void go(struct ConfigMPI& config_mpi); private: void save(json& results); json initializeResults(); std::vector filterDatasets(Datasets& datasets) const; - json build_tasks(); - }; - /* ************************************************************************************************************* - // - // MPI Search Functions - // - ************************************************************************************************************* */ - class MPI_SEARCH :public MPI_Base { - public: - static json producer(std::vector& names, json& tasks, struct ConfigMPI& config_mpi, MPI_Datatype& MPI_Result) - { - Task_Result result; - json results; - int num_tasks = tasks.size(); - - // - // 2a.1 Producer will loop to send all the tasks to the consumers and receive the results - // - for (int i = 0; i < num_tasks; ++i) { - MPI_Status status; - MPI_Recv(&result, 1, MPI_Result, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); - if (status.MPI_TAG == TAG_RESULT) { - //Store result - store_result(names, result, results); - } - MPI_Send(&i, 1, MPI_INT, status.MPI_SOURCE, TAG_TASK, MPI_COMM_WORLD); - } - // - // 2a.2 Producer will send the end message to all the consumers - // - for (int i = 0; i < config_mpi.n_procs - 1; ++i) { - MPI_Status status; - MPI_Recv(&result, 1, MPI_Result, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); - if (status.MPI_TAG == TAG_RESULT) { - //Store result - store_result(names, result, results); - } - MPI_Send(&i, 1, MPI_INT, status.MPI_SOURCE, TAG_END, MPI_COMM_WORLD); - } - return results; - } - static void consumer(Datasets& datasets, json& tasks, struct ConfigGrid& config, struct ConfigMPI& config_mpi, MPI_Datatype& MPI_Result) - { - Task_Result result; - // - // 2b.1 Consumers announce to the producer that they are ready to receive a task - // - MPI_Send(&result, 1, MPI_Result, config_mpi.manager, TAG_QUERY, MPI_COMM_WORLD); - int task; - while (true) { - MPI_Status status; - // - // 2b.2 Consumers receive the task from the producer and process it - // - MPI_Recv(&task, 1, MPI_INT, config_mpi.manager, MPI_ANY_TAG, MPI_COMM_WORLD, &status); - if (status.MPI_TAG == TAG_END) { - break; - } - consumer_go(config, config_mpi, tasks, task, datasets, &result); - // - // 2b.3 Consumers send the result to the producer - // - MPI_Send(&result, 1, MPI_Result, config_mpi.manager, TAG_RESULT, MPI_COMM_WORLD); - } - } - static void select_best_results_folds(json& results, json& all_results, std::string& model) - { - Timer timer; - auto grid = GridData(Paths::grid_input(model)); - // - // Select the best result of the computed outer folds - // - for (const auto& result : all_results.items()) { - // each result has the results of all the outer folds as each one were a different task - double best_score = 0.0; - json best; - for (const auto& result_fold : result.value()) { - double score = result_fold["score"].get(); - if (score > best_score) { - best_score = score; - best = result_fold; - } - } - auto dataset = result.key(); - auto combinations = grid.getGrid(dataset); - json json_best = { - { "score", best_score }, - { "hyperparameters", combinations[best["combination"].get()] }, - { "date", get_date() + " " + get_time() }, - { "grid", grid.getInputGrid(dataset) }, - { "duration", timer.translate2String(best["time"].get()) } - }; - results[dataset] = json_best; - } - } - static json store_result(std::vector& names, Task_Result& result, json& results) - { - json json_result = { - { "score", result.score }, - { "combination", result.idx_combination }, - { "fold", result.n_fold }, - { "time", result.time }, - { "dataset", result.idx_dataset } - }; - auto name = names[result.idx_dataset]; - if (!results.contains(name)) { - results[name] = json::array(); - } - results[name].push_back(json_result); - return results; - } - static void consumer_go(struct ConfigGrid& config, struct ConfigMPI& config_mpi, json& tasks, int n_task, Datasets& datasets, Task_Result* result) - { - // - // initialize - // - Timer timer; - timer.start(); - json task = tasks[n_task]; - auto model = config.model; - auto grid = GridData(Paths::grid_input(model)); - auto dataset_name = task["dataset"].get(); - auto idx_dataset = task["idx_dataset"].get(); - auto seed = task["seed"].get(); - auto n_fold = task["fold"].get(); - bool stratified = config.stratified; - bayesnet::Smoothing_t smooth; - if (config.smooth_strategy == "ORIGINAL") - smooth = bayesnet::Smoothing_t::ORIGINAL; - else if (config.smooth_strategy == "LAPLACE") - smooth = bayesnet::Smoothing_t::LAPLACE; - else if (config.smooth_strategy == "CESTNIK") - smooth = bayesnet::Smoothing_t::CESTNIK; - // - // Generate the hyperparameters combinations - // - auto& dataset = datasets.getDataset(dataset_name); - auto combinations = grid.getGrid(dataset_name); - dataset.load(); - auto [X, y] = dataset.getTensors(); - auto features = dataset.getFeatures(); - auto className = dataset.getClassName(); - // - // Start working on task - // - folding::Fold* fold; - if (stratified) - fold = new folding::StratifiedKFold(config.n_folds, y, seed); - else - fold = new folding::KFold(config.n_folds, y.size(0), seed); - auto [train, test] = fold->getFold(n_fold); - auto [X_train, X_test, y_train, y_test] = dataset.getTrainTestTensors(train, test); - auto states = dataset.getStates(); // Get the states of the features Once they are discretized - float best_fold_score = 0.0; - int best_idx_combination = -1; - json best_fold_hyper; - for (int idx_combination = 0; idx_combination < combinations.size(); ++idx_combination) { - auto hyperparam_line = combinations[idx_combination]; - auto hyperparameters = platform::HyperParameters(datasets.getNames(), hyperparam_line); - folding::Fold* nested_fold; - if (config.stratified) - nested_fold = new folding::StratifiedKFold(config.nested, y_train, seed); - else - nested_fold = new folding::KFold(config.nested, y_train.size(0), seed); - double score = 0.0; - for (int n_nested_fold = 0; n_nested_fold < config.nested; n_nested_fold++) { - // - // Nested level fold - // - auto [train_nested, test_nested] = nested_fold->getFold(n_nested_fold); - auto train_nested_t = torch::tensor(train_nested); - auto test_nested_t = torch::tensor(test_nested); - auto X_nested_train = X_train.index({ "...", train_nested_t }); - auto y_nested_train = y_train.index({ train_nested_t }); - auto X_nested_test = X_train.index({ "...", test_nested_t }); - auto y_nested_test = y_train.index({ test_nested_t }); - // - // Build Classifier with selected hyperparameters - // - auto clf = Models::instance()->create(config.model); - auto valid = clf->getValidHyperparameters(); - hyperparameters.check(valid, dataset_name); - clf->setHyperparameters(hyperparameters.get(dataset_name)); - // - // Train model - // - clf->fit(X_nested_train, y_nested_train, features, className, states, smooth); - // - // Test model - // - score += clf->score(X_nested_test, y_nested_test); - } - delete nested_fold; - score /= config.nested; - if (score > best_fold_score) { - best_fold_score = score; - best_idx_combination = idx_combination; - best_fold_hyper = hyperparam_line; - } - } - delete fold; - // - // Build Classifier with the best hyperparameters to obtain the best score - // - auto hyperparameters = platform::HyperParameters(datasets.getNames(), best_fold_hyper); - auto clf = Models::instance()->create(config.model); - auto valid = clf->getValidHyperparameters(); - hyperparameters.check(valid, dataset_name); - clf->setHyperparameters(best_fold_hyper); - clf->fit(X_train, y_train, features, className, states, smooth); - best_fold_score = clf->score(X_test, y_test); - // - // Return the result - // - result->idx_dataset = task["idx_dataset"].get(); - result->idx_combination = best_idx_combination; - result->score = best_fold_score; - result->n_fold = n_fold; - result->time = timer.getDuration(); - // - // Update progress bar - // - std::cout << get_color_rank(config_mpi.rank) << std::flush; - } + json producer(std::vector& names, json& tasks, struct ConfigMPI& config_mpi, MPI_Datatype& MPI_Result); + void consumer(Datasets& datasets, json& tasks, struct ConfigGrid& config, struct ConfigMPI& config_mpi, MPI_Datatype& MPI_Result); + void select_best_results_folds(json& results, json& all_results, std::string& model); + json store_result(std::vector& names, Task_Result& result, json& results); + void consumer_go(struct ConfigGrid& config, struct ConfigMPI& config_mpi, json& tasks, int n_task, Datasets& datasets, Task_Result* result); }; } /* namespace platform */ #endif \ No newline at end of file