diff --git a/app/app.py b/app/app.py index 8e3aad2..f06521b 100644 --- a/app/app.py +++ b/app/app.py @@ -5,7 +5,7 @@ from flask_login import LoginManager from .config import Config from .models import User, db -from .results.main_select import results +from .results.main_results import results from .admin.main_admin import admin from .main import main diff --git a/app/results/forms.py b/app/results/forms.py new file mode 100644 index 0000000..fa18845 --- /dev/null +++ b/app/results/forms.py @@ -0,0 +1,8 @@ +from flask_wtf import FlaskForm +from wtforms import SubmitField, SelectField +from benchmark.Arguments import ALL_METRICS + + +class RankingForm(FlaskForm): + score = SelectField("Score", choices=ALL_METRICS) + submit = SubmitField("Generate Ranking") diff --git a/app/results/main_select.py b/app/results/main_results.py similarity index 84% rename from app/results/main_select.py rename to app/results/main_results.py index 54a8474..e75c704 100644 --- a/app/results/main_select.py +++ b/app/results/main_results.py @@ -5,7 +5,7 @@ import shutil import xlsxwriter from benchmark.Datasets import Datasets from benchmark.ResultsBase import StubReport -from benchmark.ResultsFiles import Excel, ReportDatasets +from benchmark.ResultsFiles import Excel, ReportDatasets, Benchmark from benchmark.Utils import Files, Folders from flask import ( Blueprint, @@ -14,9 +14,11 @@ from flask import ( request, send_file, url_for, + redirect, ) from dotenv import dotenv_values from flask_login import current_user, login_required +from .forms import RankingForm results = Blueprint("results", __name__, template_folder="templates") @@ -231,3 +233,36 @@ def dataset_report(dataset): results=results, app_config=app_config, ) + + +@results.route("/ranking", methods=["GET", "POST"]) +@login_required +def ranking(): + os.chdir(current_user.benchmark.folder) + form = RankingForm() + if form.validate_on_submit(): + benchmark = Benchmark(score=form.score.data, visualize=False) + try: + benchmark.compile_results() + benchmark.save_results() + benchmark.report(tex_output=False) + benchmark.exreport() + benchmark.excel() + except ValueError as e: + return render_template( + "error.html", message="Couldn't generate ranking", error=str(e) + ) + except KeyError as e: + return render_template( + "error.html", + message="Couldn't generate ranking. It seems that there are " + "partial results for some classifiers", + error=f"Key not found {str(e)}", + ) + return redirect( + url_for( + "results.download", + file_name=benchmark.get_excel_file_name(), + ) + ) + return render_template("ranking.html", form=form) diff --git a/app/results/templates/ranking.html b/app/results/templates/ranking.html new file mode 100644 index 0000000..d9eb8d4 --- /dev/null +++ b/app/results/templates/ranking.html @@ -0,0 +1,13 @@ +{% extends "base.html" %} +{% from 'bootstrap5/form.html' import render_form %} +{% block content %} +