Merge pull request 'Add Friedman Test & post hoc tests to BestResults' (#10) from boost into main
Reviewed-on: #10
This commit is contained in:
commit
623ceed396
@ -30,6 +30,17 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")
|
||||
option(ENABLE_CLANG_TIDY "Enable to add clang tidy." OFF)
|
||||
option(ENABLE_TESTING "Unit testing build" OFF)
|
||||
option(CODE_COVERAGE "Collect coverage from test library" OFF)
|
||||
|
||||
# Boost Library
|
||||
set(Boost_USE_STATIC_LIBS OFF)
|
||||
set(Boost_USE_MULTITHREADED ON)
|
||||
set(Boost_USE_STATIC_RUNTIME OFF)
|
||||
find_package(Boost 1.78.0 REQUIRED)
|
||||
if(Boost_FOUND)
|
||||
message("Boost_INCLUDE_DIRS=${Boost_INCLUDE_DIRS}")
|
||||
include_directories(${Boost_INCLUDE_DIRS})
|
||||
endif()
|
||||
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread")
|
||||
# CMakes modules
|
||||
# --------------
|
||||
|
@ -4,10 +4,14 @@ Bayesian Network Classifier with libtorch from scratch
|
||||
|
||||
## 0. Setup
|
||||
|
||||
### libxlswriter
|
||||
|
||||
Before compiling BayesNet.
|
||||
|
||||
### boost library
|
||||
|
||||
[Getting Started](<https://www.boost.org/doc/libs/1_83_0/more/getting_started/index.html>)
|
||||
|
||||
### libxlswriter
|
||||
|
||||
```bash
|
||||
cd lib/libxlsxwriter
|
||||
make
|
||||
|
@ -2,10 +2,10 @@
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
#include <set>
|
||||
#include "BestResults.h"
|
||||
#include "Result.h"
|
||||
#include "Colors.h"
|
||||
#include "Statistics.h"
|
||||
|
||||
|
||||
|
||||
@ -24,7 +24,6 @@ std::string ftime_to_string(TP tp)
|
||||
buffer << std::put_time(gmt, "%Y-%m-%d %H:%M");
|
||||
return buffer.str();
|
||||
}
|
||||
|
||||
namespace platform {
|
||||
|
||||
string BestResults::build()
|
||||
@ -106,9 +105,10 @@ namespace platform {
|
||||
}
|
||||
throw invalid_argument("Unable to open result file. [" + fileName + "]");
|
||||
}
|
||||
set<string> BestResults::getModels()
|
||||
vector<string> BestResults::getModels()
|
||||
{
|
||||
set<string> models;
|
||||
vector<string> result;
|
||||
auto files = loadResultFiles();
|
||||
if (files.size() == 0) {
|
||||
cerr << Colors::MAGENTA() << "No result files were found!" << Colors::RESET() << endl;
|
||||
@ -121,7 +121,8 @@ namespace platform {
|
||||
// add the model to the vector of models
|
||||
models.insert(fileModel);
|
||||
}
|
||||
return models;
|
||||
result = vector<string>(models.begin(), models.end());
|
||||
return result;
|
||||
}
|
||||
|
||||
void BestResults::buildAll()
|
||||
@ -163,7 +164,7 @@ namespace platform {
|
||||
odd = !odd;
|
||||
}
|
||||
}
|
||||
json BestResults::buildTableResults(set<string> models)
|
||||
json BestResults::buildTableResults(vector<string> models)
|
||||
{
|
||||
int numberOfDatasets = 0;
|
||||
bool first = true;
|
||||
@ -200,35 +201,8 @@ namespace platform {
|
||||
table["dateTable"] = ftime_to_string(maxDate);
|
||||
return table;
|
||||
}
|
||||
map<string, float> assignRanks(vector<pair<string, double>>& ranksOrder)
|
||||
{
|
||||
// sort the ranksOrder vector by value
|
||||
sort(ranksOrder.begin(), ranksOrder.end(), [](const pair<string, double>& a, const pair<string, double>& b) {
|
||||
return a.second > b.second;
|
||||
});
|
||||
//Assign ranks to values and if they are the same they share the same averaged rank
|
||||
map<string, float> ranks;
|
||||
for (int i = 0; i < ranksOrder.size(); i++) {
|
||||
ranks[ranksOrder[i].first] = i + 1.0;
|
||||
}
|
||||
int i = 0;
|
||||
while (i < static_cast<int>(ranksOrder.size())) {
|
||||
int j = i + 1;
|
||||
int sumRanks = ranks[ranksOrder[i].first];
|
||||
while (j < static_cast<int>(ranksOrder.size()) && ranksOrder[i].second == ranksOrder[j].second) {
|
||||
sumRanks += ranks[ranksOrder[j++].first];
|
||||
}
|
||||
if (j > i + 1) {
|
||||
float averageRank = (float)sumRanks / (j - i);
|
||||
for (int k = i; k < j; k++) {
|
||||
ranks[ranksOrder[k].first] = averageRank;
|
||||
}
|
||||
}
|
||||
i = j;
|
||||
}
|
||||
return ranks;
|
||||
}
|
||||
void BestResults::printTableResults(set<string> models, json table)
|
||||
|
||||
void BestResults::printTableResults(vector<string> models, json table)
|
||||
{
|
||||
cout << Colors::GREEN() << "Best results for " << score << " as of " << table.at("dateTable").get<string>() << endl;
|
||||
cout << "------------------------------------------------" << endl;
|
||||
@ -245,6 +219,7 @@ namespace platform {
|
||||
auto i = 0;
|
||||
bool odd = true;
|
||||
map<string, double> totals;
|
||||
int nDatasets = table.begin().value().size();
|
||||
for (const auto& model : models) {
|
||||
totals[model] = 0.0;
|
||||
}
|
||||
@ -254,17 +229,13 @@ namespace platform {
|
||||
cout << color << setw(3) << fixed << right << i++ << " ";
|
||||
cout << setw(25) << left << item.key() << " ";
|
||||
double maxValue = 0;
|
||||
vector<pair<string, double>> ranksOrder;
|
||||
// Find out the max value for this dataset
|
||||
for (const auto& model : models) {
|
||||
double value = table[model].at(item.key()).at(0).get<double>();
|
||||
if (value > maxValue) {
|
||||
maxValue = value;
|
||||
}
|
||||
ranksOrder.push_back({ model, value });
|
||||
}
|
||||
// Assign the ranks
|
||||
auto ranks = assignRanks(ranksOrder);
|
||||
// Print the row with red colors on max values
|
||||
for (const auto& model : models) {
|
||||
string efectiveColor = color;
|
||||
@ -297,22 +268,6 @@ namespace platform {
|
||||
}
|
||||
cout << efectiveColor << setw(12) << setprecision(9) << fixed << totals[model] << " ";
|
||||
}
|
||||
// Output the averaged ranks
|
||||
cout << endl;
|
||||
int min = 1;
|
||||
for (const auto& rank : ranks) {
|
||||
if (rank.second < min) {
|
||||
min = rank.second;
|
||||
}
|
||||
}
|
||||
cout << Colors::GREEN() << setw(30) << " Averaged ranks...........";
|
||||
for (const auto& model : models) {
|
||||
string efectiveColor = Colors::GREEN();
|
||||
if (ranks[model] == min) {
|
||||
efectiveColor = Colors::RED();
|
||||
}
|
||||
cout << efectiveColor << setw(12) << setprecision(10) << fixed << (double)ranks[model] / (double)origin.size() << " ";
|
||||
}
|
||||
cout << endl;
|
||||
}
|
||||
void BestResults::reportAll()
|
||||
@ -322,5 +277,16 @@ namespace platform {
|
||||
json table = buildTableResults(models);
|
||||
// Print the table of results
|
||||
printTableResults(models, table);
|
||||
// Compute the Friedman test
|
||||
if (friedman) {
|
||||
vector<string> datasets;
|
||||
for (const auto& dataset : table.begin().value().items()) {
|
||||
datasets.push_back(dataset.key());
|
||||
}
|
||||
double significance = 0.05;
|
||||
Statistics stats(models, datasets, table, significance);
|
||||
auto result = stats.friedmanTest();
|
||||
stats.postHocHolmTest(result);
|
||||
}
|
||||
}
|
||||
}
|
@ -8,21 +8,22 @@ using json = nlohmann::json;
|
||||
namespace platform {
|
||||
class BestResults {
|
||||
public:
|
||||
explicit BestResults(const string& path, const string& score, const string& model) : path(path), score(score), model(model) {}
|
||||
explicit BestResults(const string& path, const string& score, const string& model, bool friedman) : path(path), score(score), model(model), friedman(friedman) {}
|
||||
string build();
|
||||
void reportSingle();
|
||||
void reportAll();
|
||||
void buildAll();
|
||||
private:
|
||||
set<string> getModels();
|
||||
vector<string> getModels();
|
||||
vector<string> loadResultFiles();
|
||||
json buildTableResults(set<string> models);
|
||||
void printTableResults(set<string> models, json table);
|
||||
json buildTableResults(vector<string> models);
|
||||
void printTableResults(vector<string> models, json table);
|
||||
string bestResultFile();
|
||||
json loadFile(const string& fileName);
|
||||
string path;
|
||||
string score;
|
||||
string model;
|
||||
bool friedman;
|
||||
};
|
||||
}
|
||||
#endif //BESTRESULTS_H
|
@ -8,12 +8,13 @@ include_directories(${BayesNet_SOURCE_DIR}/lib/libxlsxwriter/include)
|
||||
add_executable(main main.cc Folding.cc platformUtils.cc Experiment.cc Datasets.cc Models.cc ReportConsole.cc ReportBase.cc)
|
||||
add_executable(manage manage.cc Results.cc Result.cc ReportConsole.cc ReportExcel.cc ReportBase.cc Datasets.cc platformUtils.cc)
|
||||
add_executable(list list.cc platformUtils Datasets.cc)
|
||||
add_executable(best best.cc BestResults.cc Result.cc)
|
||||
add_executable(best best.cc BestResults.cc Result.cc Statistics.cc)
|
||||
target_link_libraries(main BayesNet ArffFiles mdlp "${TORCH_LIBRARIES}")
|
||||
if (${CMAKE_HOST_SYSTEM_NAME} MATCHES "Linux")
|
||||
target_link_libraries(manage "${TORCH_LIBRARIES}" libxlsxwriter.so ArffFiles mdlp stdc++fs)
|
||||
target_link_libraries(best stdc++fs)
|
||||
target_link_libraries(best Boost::boost stdc++fs)
|
||||
else()
|
||||
target_link_libraries(manage "${TORCH_LIBRARIES}" "${XLSXWRITER_LIB}" ArffFiles mdlp)
|
||||
target_link_libraries(best Boost::boost)
|
||||
endif()
|
||||
target_link_libraries(list ArffFiles mdlp "${TORCH_LIBRARIES}")
|
@ -3,22 +3,13 @@
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
#include "Paths.h"
|
||||
#include "Symbols.h"
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
using json = nlohmann::json;
|
||||
namespace platform {
|
||||
using namespace std;
|
||||
class Symbols {
|
||||
public:
|
||||
inline static const string check_mark{ "\u2714" };
|
||||
inline static const string exclamation{ "\u2757" };
|
||||
inline static const string black_star{ "\u2605" };
|
||||
inline static const string cross{ "\u2717" };
|
||||
inline static const string upward_arrow{ "\u27B6" };
|
||||
inline static const string down_arrow{ "\u27B4" };
|
||||
inline static const string equal_best{ check_mark };
|
||||
inline static const string better_best{ black_star };
|
||||
};
|
||||
|
||||
class ReportBase {
|
||||
public:
|
||||
explicit ReportBase(json data_, bool compare);
|
||||
|
215
src/Platform/Statistics.cc
Normal file
215
src/Platform/Statistics.cc
Normal file
@ -0,0 +1,215 @@
|
||||
#include "Statistics.h"
|
||||
#include "Colors.h"
|
||||
#include "Symbols.h"
|
||||
#include <boost/math/distributions/chi_squared.hpp>
|
||||
#include <boost/math/distributions/normal.hpp>
|
||||
|
||||
namespace platform {
|
||||
|
||||
Statistics::Statistics(vector<string>& models, vector<string>& datasets, json data, double significance) : models(models), datasets(datasets), data(data), significance(significance)
|
||||
{
|
||||
nModels = models.size();
|
||||
nDatasets = datasets.size();
|
||||
};
|
||||
|
||||
void Statistics::fit()
|
||||
{
|
||||
if (nModels < 3 || nDatasets < 3) {
|
||||
cerr << "nModels: " << nModels << endl;
|
||||
cerr << "nDatasets: " << nDatasets << endl;
|
||||
throw runtime_error("Can't make the Friedman test with less than 3 models and/or less than 3 datasets.");
|
||||
}
|
||||
computeRanks();
|
||||
// Set the control model as the one with the lowest average rank
|
||||
controlIdx = distance(ranks.begin(), min_element(ranks.begin(), ranks.end(), [](const auto& l, const auto& r) { return l.second < r.second; }));
|
||||
computeWTL();
|
||||
fitted = true;
|
||||
}
|
||||
map<string, float> assignRanks(vector<pair<string, double>>& ranksOrder)
|
||||
{
|
||||
// sort the ranksOrder vector by value
|
||||
sort(ranksOrder.begin(), ranksOrder.end(), [](const pair<string, double>& a, const pair<string, double>& b) {
|
||||
return a.second > b.second;
|
||||
});
|
||||
//Assign ranks to values and if they are the same they share the same averaged rank
|
||||
map<string, float> ranks;
|
||||
for (int i = 0; i < ranksOrder.size(); i++) {
|
||||
ranks[ranksOrder[i].first] = i + 1.0;
|
||||
}
|
||||
int i = 0;
|
||||
while (i < static_cast<int>(ranksOrder.size())) {
|
||||
int j = i + 1;
|
||||
int sumRanks = ranks[ranksOrder[i].first];
|
||||
while (j < static_cast<int>(ranksOrder.size()) && ranksOrder[i].second == ranksOrder[j].second) {
|
||||
sumRanks += ranks[ranksOrder[j++].first];
|
||||
}
|
||||
if (j > i + 1) {
|
||||
float averageRank = (float)sumRanks / (j - i);
|
||||
for (int k = i; k < j; k++) {
|
||||
ranks[ranksOrder[k].first] = averageRank;
|
||||
}
|
||||
}
|
||||
i = j;
|
||||
}
|
||||
return ranks;
|
||||
}
|
||||
void Statistics::computeRanks()
|
||||
{
|
||||
map<string, float> ranksLine;
|
||||
for (const auto& dataset : datasets) {
|
||||
vector<pair<string, double>> ranksOrder;
|
||||
for (const auto& model : models) {
|
||||
double value = data[model].at(dataset).at(0).get<double>();
|
||||
ranksOrder.push_back({ model, value });
|
||||
}
|
||||
// Assign the ranks
|
||||
ranksLine = assignRanks(ranksOrder);
|
||||
if (ranks.size() == 0) {
|
||||
ranks = ranksLine;
|
||||
} else {
|
||||
for (const auto& rank : ranksLine) {
|
||||
ranks[rank.first] += rank.second;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Average the ranks
|
||||
for (const auto& rank : ranks) {
|
||||
ranks[rank.first] /= nDatasets;
|
||||
}
|
||||
}
|
||||
void Statistics::computeWTL()
|
||||
{
|
||||
// Compute the WTL matrix
|
||||
for (int i = 0; i < nModels; ++i) {
|
||||
wtl[i] = { 0, 0, 0 };
|
||||
}
|
||||
json origin = data.begin().value();
|
||||
for (auto const& item : origin.items()) {
|
||||
auto controlModel = models.at(controlIdx);
|
||||
double controlValue = data[controlModel].at(item.key()).at(0).get<double>();
|
||||
for (int i = 0; i < nModels; ++i) {
|
||||
if (i == controlIdx) {
|
||||
continue;
|
||||
}
|
||||
double value = data[models[i]].at(item.key()).at(0).get<double>();
|
||||
if (value < controlValue) {
|
||||
wtl[i].win++;
|
||||
} else if (value == controlValue) {
|
||||
wtl[i].tie++;
|
||||
} else {
|
||||
wtl[i].loss++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Statistics::postHocHolmTest(bool friedmanResult)
|
||||
{
|
||||
if (!fitted) {
|
||||
fit();
|
||||
}
|
||||
// Reference https://link.springer.com/article/10.1007/s44196-022-00083-8
|
||||
// Post-hoc Holm test
|
||||
// Calculate the p-value for the models paired with the control model
|
||||
map<int, double> stats; // p-value of each model paired with the control model
|
||||
boost::math::normal dist(0.0, 1.0);
|
||||
double diff = sqrt(nModels * (nModels + 1) / (6.0 * nDatasets));
|
||||
for (int i = 0; i < nModels; i++) {
|
||||
if (i == controlIdx) {
|
||||
stats[i] = 0.0;
|
||||
continue;
|
||||
}
|
||||
double z = abs(ranks.at(models[controlIdx]) - ranks.at(models[i])) / diff;
|
||||
double p_value = (long double)2 * (1 - cdf(dist, z));
|
||||
stats[i] = p_value;
|
||||
}
|
||||
// Sort the models by p-value
|
||||
vector<pair<int, double>> statsOrder;
|
||||
for (const auto& stat : stats) {
|
||||
statsOrder.push_back({ stat.first, stat.second });
|
||||
}
|
||||
sort(statsOrder.begin(), statsOrder.end(), [](const pair<int, double>& a, const pair<int, double>& b) {
|
||||
return a.second < b.second;
|
||||
});
|
||||
|
||||
// Holm adjustment
|
||||
for (int i = 0; i < statsOrder.size(); ++i) {
|
||||
auto item = statsOrder.at(i);
|
||||
double before = i == 0 ? 0.0 : statsOrder.at(i - 1).second;
|
||||
double p_value = min((double)1.0, item.second * (nModels - i));
|
||||
p_value = max(before, p_value);
|
||||
statsOrder[i] = { item.first, p_value };
|
||||
}
|
||||
auto color = friedmanResult ? Colors::CYAN() : Colors::YELLOW();
|
||||
cout << color;
|
||||
cout << " *************************************************************************************************************" << endl;
|
||||
cout << " Post-hoc Holm test: H0: 'There is no significant differences between the control model and the other models.'" << endl;
|
||||
cout << " Control model: " << models[controlIdx] << endl;
|
||||
cout << " Model p-value rank win tie loss Status" << endl;
|
||||
cout << " ============ ============ ========= === === ==== =============" << endl;
|
||||
// sort ranks from lowest to highest
|
||||
vector<pair<string, float>> ranksOrder;
|
||||
for (const auto& rank : ranks) {
|
||||
ranksOrder.push_back({ rank.first, rank.second });
|
||||
}
|
||||
sort(ranksOrder.begin(), ranksOrder.end(), [](const pair<string, float>& a, const pair<string, float>& b) {
|
||||
return a.second < b.second;
|
||||
});
|
||||
for (const auto& item : ranksOrder) {
|
||||
if (item.first == models.at(controlIdx)) {
|
||||
continue;
|
||||
}
|
||||
auto idx = distance(models.begin(), find(models.begin(), models.end(), item.first));
|
||||
double pvalue = 0.0;
|
||||
for (const auto& stat : statsOrder) {
|
||||
if (stat.first == idx) {
|
||||
pvalue = stat.second;
|
||||
}
|
||||
}
|
||||
auto colorStatus = pvalue > significance ? Colors::GREEN() : Colors::MAGENTA();
|
||||
auto status = pvalue > significance ? Symbols::check_mark : Symbols::cross;
|
||||
auto textStatus = pvalue > significance ? " accepted H0" : " rejected H0";
|
||||
cout << " " << colorStatus << left << setw(12) << item.first << " " << setprecision(6) << scientific << pvalue << setprecision(7) << fixed << " " << item.second;
|
||||
cout << " " << right << setw(3) << wtl.at(idx).win << " " << setw(3) << wtl.at(idx).tie << " " << setw(4) << wtl.at(idx).loss;
|
||||
cout << " " << status << textStatus << endl;
|
||||
}
|
||||
cout << color << " *************************************************************************************************************" << endl;
|
||||
cout << Colors::RESET();
|
||||
}
|
||||
bool Statistics::friedmanTest()
|
||||
{
|
||||
if (!fitted) {
|
||||
fit();
|
||||
}
|
||||
// Friedman test
|
||||
// Calculate the Friedman statistic
|
||||
cout << Colors::BLUE() << endl;
|
||||
cout << "***************************************************************************************************************" << endl;
|
||||
cout << Colors::GREEN() << "Friedman test: H0: 'There is no significant differences between all the classifiers.'" << Colors::BLUE() << endl;
|
||||
double degreesOfFreedom = nModels - 1.0;
|
||||
double sumSquared = 0;
|
||||
for (const auto& rank : ranks) {
|
||||
sumSquared += pow(rank.second, 2);
|
||||
}
|
||||
// Compute the Friedman statistic as in https://link.springer.com/article/10.1007/s44196-022-00083-8
|
||||
double friedmanQ = 12.0 * nDatasets / (nModels * (nModels + 1)) * (sumSquared - (nModels * pow(nModels + 1, 2)) / 4);
|
||||
cout << "Friedman statistic: " << friedmanQ << endl;
|
||||
// Calculate the critical value
|
||||
boost::math::chi_squared chiSquared(degreesOfFreedom);
|
||||
long double p_value = (long double)1.0 - cdf(chiSquared, friedmanQ);
|
||||
double criticalValue = quantile(chiSquared, 1 - significance);
|
||||
std::cout << "Critical Chi-Square Value for df=" << fixed << (int)degreesOfFreedom
|
||||
<< " and alpha=" << setprecision(2) << fixed << significance << ": " << setprecision(7) << scientific << criticalValue << std::endl;
|
||||
cout << "p-value: " << scientific << p_value << " is " << (p_value < significance ? "less" : "greater") << " than " << setprecision(2) << fixed << significance << endl;
|
||||
bool result;
|
||||
if (p_value < significance) {
|
||||
cout << Colors::GREEN() << "The null hypothesis H0 is rejected." << endl;
|
||||
result = true;
|
||||
} else {
|
||||
cout << Colors::YELLOW() << "The null hypothesis H0 is accepted. Computed p-values will not be significant." << endl;
|
||||
result = false;
|
||||
}
|
||||
cout << Colors::BLUE() << "***************************************************************************************************************" << Colors::RESET() << endl;
|
||||
return result;
|
||||
}
|
||||
} // namespace platform
|
37
src/Platform/Statistics.h
Normal file
37
src/Platform/Statistics.h
Normal file
@ -0,0 +1,37 @@
|
||||
#ifndef STATISTICS_H
|
||||
#define STATISTICS_H
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
using namespace std;
|
||||
using json = nlohmann::json;
|
||||
|
||||
namespace platform {
|
||||
struct WTL {
|
||||
int win;
|
||||
int tie;
|
||||
int loss;
|
||||
};
|
||||
class Statistics {
|
||||
public:
|
||||
Statistics(vector<string>& models, vector<string>& datasets, json data, double significance = 0.05);
|
||||
bool friedmanTest();
|
||||
void postHocHolmTest(bool friedmanResult);
|
||||
private:
|
||||
void fit();
|
||||
void computeRanks();
|
||||
void computeWTL();
|
||||
vector<string> models;
|
||||
vector<string> datasets;
|
||||
json data;
|
||||
double significance;
|
||||
bool fitted = false;
|
||||
int nModels = 0;
|
||||
int nDatasets = 0;
|
||||
int controlIdx = 0;
|
||||
map<int, WTL> wtl;
|
||||
map<string, float> ranks;
|
||||
};
|
||||
}
|
||||
#endif // !STATISTICS_H
|
18
src/Platform/Symbols.h
Normal file
18
src/Platform/Symbols.h
Normal file
@ -0,0 +1,18 @@
|
||||
#ifndef SYMBOLS_H
|
||||
#define SYMBOLS_H
|
||||
#include <string>
|
||||
using namespace std;
|
||||
namespace platform {
|
||||
class Symbols {
|
||||
public:
|
||||
inline static const string check_mark{ "\u2714" };
|
||||
inline static const string exclamation{ "\u2757" };
|
||||
inline static const string black_star{ "\u2605" };
|
||||
inline static const string cross{ "\u2717" };
|
||||
inline static const string upward_arrow{ "\u27B6" };
|
||||
inline static const string down_arrow{ "\u27B4" };
|
||||
inline static const string equal_best{ check_mark };
|
||||
inline static const string better_best{ black_star };
|
||||
};
|
||||
}
|
||||
#endif // !SYMBOLS_H
|
@ -13,12 +13,14 @@ argparse::ArgumentParser manageArguments(int argc, char** argv)
|
||||
program.add_argument("-s", "--score").default_value("").help("Filter results of the score name supplied");
|
||||
program.add_argument("--build").help("build best score results file").default_value(false).implicit_value(true);
|
||||
program.add_argument("--report").help("report of best score results file").default_value(false).implicit_value(true);
|
||||
program.add_argument("--friedman").help("Friedman test").default_value(false).implicit_value(true);
|
||||
try {
|
||||
program.parse_args(argc, argv);
|
||||
auto model = program.get<string>("model");
|
||||
auto score = program.get<string>("score");
|
||||
auto build = program.get<bool>("build");
|
||||
auto report = program.get<bool>("report");
|
||||
auto friedman = program.get<bool>("friedman");
|
||||
if (model == "" || score == "") {
|
||||
throw runtime_error("Model and score name must be supplied");
|
||||
}
|
||||
@ -38,12 +40,18 @@ int main(int argc, char** argv)
|
||||
auto score = program.get<string>("score");
|
||||
auto build = program.get<bool>("build");
|
||||
auto report = program.get<bool>("report");
|
||||
auto friedman = program.get<bool>("friedman");
|
||||
if (friedman && model != "any") {
|
||||
cerr << "Friedman test can only be used with all models" << endl;
|
||||
cerr << program;
|
||||
exit(1);
|
||||
}
|
||||
if (!report && !build) {
|
||||
cerr << "Either build, report or both, have to be selected to do anything!" << endl;
|
||||
cerr << program;
|
||||
exit(1);
|
||||
}
|
||||
auto results = platform::BestResults(platform::Paths::results(), score, model);
|
||||
auto results = platform::BestResults(platform::Paths::results(), score, model, friedman);
|
||||
if (build) {
|
||||
if (model == "any") {
|
||||
results.buildAll();
|
||||
|
Loading…
Reference in New Issue
Block a user