8 Commits
flask ... main

Author SHA1 Message Date
Ricardo Montañana Gómez
cf8fd3454e Update README.md 2025-05-06 14:05:42 +02:00
Ricardo Montañana Gómez
162cdc2da1 Merge pull request #11 from Doctorado-ML/rmontanana-patch-1
Update README.md
2025-05-06 14:05:06 +02:00
Ricardo Montañana Gómez
765112073c Update README.md 2025-05-06 14:04:14 +02:00
69e21584bd Fix tests in python 3.13 2024-12-16 01:27:34 +01:00
419c899c94 Fix some errors in tests 2024-12-16 00:53:11 +01:00
2a2ed81a6c Fix Arff datasets mistake
Fix table_report partial mistake
2024-12-14 23:50:58 +01:00
4c5502611a Update version and copyright 2024-09-18 16:00:31 +02:00
Ricardo Montañana Gómez
70f1da5fc7 Merge pull request #10 from Doctorado-ML/flask
Flask
2024-03-13 16:18:55 +01:00
14 changed files with 48 additions and 41 deletions

View File

@@ -1,12 +1,9 @@
[![CI](https://github.com/Doctorado-ML/benchmark/actions/workflows/main.yml/badge.svg)](https://github.com/Doctorado-ML/benchmark/actions/workflows/main.yml)
[![codecov](https://codecov.io/gh/Doctorado-ML/benchmark/branch/main/graph/badge.svg?token=ZRP937NDSG)](https://codecov.io/gh/Doctorado-ML/benchmark) [![codecov](https://codecov.io/gh/Doctorado-ML/benchmark/branch/main/graph/badge.svg?token=ZRP937NDSG)](https://codecov.io/gh/Doctorado-ML/benchmark)
[![Quality Gate Status](https://sonar.rmontanana.es/api/project_badges/measure?project=benchmark&metric=alert_status&token=336a6e501988888543c3153baa91bad4b9914dd2)](https://sonar.rmontanana.es/dashboard?id=benchmark)
[![Technical Debt](https://sonar.rmontanana.es/api/project_badges/measure?project=benchmark&metric=sqale_index&token=336a6e501988888543c3153baa91bad4b9914dd2)](https://sonar.rmontanana.es/dashboard?id=benchmark)
![https://img.shields.io/badge/python-3.8%2B-blue](https://img.shields.io/badge/python-3.8%2B-brightgreen) ![https://img.shields.io/badge/python-3.8%2B-blue](https://img.shields.io/badge/python-3.8%2B-brightgreen)
# benchmark # benchmark
Benchmarking models Benchmarking Python models
## Experimentation ## Experimentation

View File

@@ -32,6 +32,8 @@ class DatasetsArff:
def get_range_features(X, c_features): def get_range_features(X, c_features):
if c_features.strip() == "all": if c_features.strip() == "all":
return list(range(X.shape[1])) return list(range(X.shape[1]))
if c_features.strip() == "none":
return []
return json.loads(c_features) return json.loads(c_features)
def load(self, name, class_name): def load(self, name, class_name):
@@ -129,29 +131,28 @@ class Datasets:
def _init_names(self, dataset_name): def _init_names(self, dataset_name):
file_name = os.path.join(self.dataset.folder(), Files.index) file_name = os.path.join(self.dataset.folder(), Files.index)
default_class = "class"
self.continuous_features = {} self.continuous_features = {}
with open(file_name) as f: with open(file_name) as f:
sets = f.read().splitlines() sets = f.read().splitlines()
sets = [x for x in sets if not x.startswith("#")] sets = [x for x in sets if not x.startswith("#")]
class_names = [default_class] * len(sets) results = []
if "," in sets[0]:
result = []
class_names = [] class_names = []
for data in sets: for set_name in sets:
name, class_name, features = data.split(",", 2) try:
result.append(name) name, class_name, features = set_name.split(";")
except ValueError:
class_name = "class"
features = "all"
name = set_name
results.append(name)
class_names.append(class_name) class_names.append(class_name)
features = features.strip()
self.continuous_features[name] = features self.continuous_features[name] = features
sets = result
else:
for name in sets:
self.continuous_features[name] = None
# Set as dataset list the dataset passed as argument # Set as dataset list the dataset passed as argument
if dataset_name is None: if dataset_name is None:
return class_names, sets return class_names, results
try: try:
class_name = class_names[sets.index(dataset_name)] class_name = class_names[results.index(dataset_name)]
except ValueError: except ValueError:
raise ValueError(f"Unknown dataset: {dataset_name}") raise ValueError(f"Unknown dataset: {dataset_name}")
return [class_name], [dataset_name] return [class_name], [dataset_name]

View File

@@ -108,10 +108,12 @@ class BaseReport(abc.ABC):
status = ( status = (
Symbols.cross Symbols.cross
if accuracy <= max_value if accuracy <= max_value
else Symbols.upward_arrow else (
Symbols.upward_arrow
if accuracy > max_value if accuracy > max_value
else " " else " "
) )
)
if status != " ": if status != " ":
if status not in self._compare_totals: if status not in self._compare_totals:
self._compare_totals[status] = 1 self._compare_totals[status] = 1
@@ -161,6 +163,11 @@ class StubReport(BaseReport):
def header(self) -> None: def header(self) -> None:
self.title = self.data["title"] self.title = self.data["title"]
self.duration = self.data["duration"] self.duration = self.data["duration"]
self.model = self.data["model"]
self.date = self.data["date"]
self.time = self.data["time"]
self.metric = self.data["score_name"]
self.platform = self.data["platform"]
def footer(self, accuracy: float) -> None: def footer(self, accuracy: float) -> None:
self.accuracy = accuracy self.accuracy = accuracy
@@ -195,9 +202,11 @@ class Summary:
self.models.add(model) self.models.add(model)
report = StubReport( report = StubReport(
os.path.join( os.path.join(
(
Folders.hidden_results Folders.hidden_results
if self.hidden if self.hidden
else Folders.results, else Folders.results
),
result, result,
) )
) )

View File

@@ -10,7 +10,7 @@ from .Results import Report
from ._version import __version__ from ._version import __version__
__author__ = "Ricardo Montañana Gómez" __author__ = "Ricardo Montañana Gómez"
__copyright__ = "Copyright 2020-2023, Ricardo Montañana Gómez" __copyright__ = "Copyright 2020-2024, Ricardo Montañana Gómez"
__license__ = "MIT License" __license__ = "MIT License"
__author_email__ = "ricardo.montanana@alu.uclm.es" __author_email__ = "ricardo.montanana@alu.uclm.es"

View File

@@ -1 +1 @@
__version__ = "0.5.0" __version__ = "1.0.1"

View File

@@ -88,7 +88,7 @@
<button type="button" <button type="button"
class="btn-close" class="btn-close"
aria-label="Close" aria-label="Close"
onclick="location.href = '/index/{{ compare }}'"></button> onclick="location.href = '{{ back }}'"></button>
<h7> <h7>
<b> <b>
Total score: {{ "%.6f" % (data.results | sum(attribute="score") ) }} Total score: {{ "%.6f" % (data.results | sum(attribute="score") ) }}

View File

@@ -90,7 +90,7 @@
{% endif %} {% endif %}
<h2 class="has-text-white has-background-primary"> <h2 class="has-text-white has-background-primary">
<b> <b>
<button class="delete" onclick="location.href = '/index/{{ compare }}'"></button> <button class="delete" onclick="location.href = '{{ back }}'"></button>
Total score: {{ "%.6f" % (data.results | sum(attribute="score") ) }} Total score: {{ "%.6f" % (data.results | sum(attribute="score") ) }}
</b> </b>
</h2> </h2>

View File

@@ -68,7 +68,7 @@ class ArgumentsTest(TestBase):
test_args = ["-n", "3", "-k", "date"] test_args = ["-n", "3", "-k", "date"]
with self.assertRaises(SystemExit): with self.assertRaises(SystemExit):
arguments.parse(test_args) arguments.parse(test_args)
self.assertRegexpMatches( self.assertRegex(
stderr.getvalue(), stderr.getvalue(),
r"error: the following arguments are required: -m/--model", r"error: the following arguments are required: -m/--model",
) )
@@ -79,7 +79,7 @@ class ArgumentsTest(TestBase):
test_args = ["-n", "3", "-m", "SVC"] test_args = ["-n", "3", "-m", "SVC"]
with self.assertRaises(SystemExit): with self.assertRaises(SystemExit):
arguments.parse(test_args) arguments.parse(test_args)
self.assertRegexpMatches( self.assertRegex(
stderr.getvalue(), stderr.getvalue(),
r"error: the following arguments are required: -k/--key", r"error: the following arguments are required: -k/--key",
) )
@@ -114,7 +114,7 @@ class ArgumentsTest(TestBase):
test_args = None test_args = None
with self.assertRaises(SystemExit): with self.assertRaises(SystemExit):
arguments.parse(test_args) arguments.parse(test_args)
self.assertRegexpMatches( self.assertRegex(
stderr.getvalue(), stderr.getvalue(),
r"error: the following arguments are required: -m/--model, " r"error: the following arguments are required: -m/--model, "
"-k/--key, --title", "-k/--key, --title",

View File

@@ -102,7 +102,7 @@ class ModelTest(TestBase):
test = { test = {
"STree": ((11, 6, 4), 1.0), "STree": ((11, 6, 4), 1.0),
"Wodt": ((303, 152, 50), 0.9382022471910112), "Wodt": ((303, 152, 50), 0.9382022471910112),
"ODTE": ((7.86, 4.43, 3.37), 1.0), "ODTE": ((786, 443, 337), 1.0),
"Cart": ((23, 12, 5), 1.0), "Cart": ((23, 12, 5), 1.0),
"SVC": ((0, 0, 0), 0.7078651685393258), "SVC": ((0, 0, 0), 0.7078651685393258),
"RandomForest": ((21.3, 11, 5.26), 1.0), "RandomForest": ((21.3, 11, 5.26), 1.0),

View File

@@ -1,2 +1,2 @@
iris,class,all iris;class;all
wine,class,[0, 1] wine;class;[0, 1]

View File

@@ -6,7 +6,7 @@
"kernel": "liblinear", "kernel": "liblinear",
"multiclass_strategy": "ovr" "multiclass_strategy": "ovr"
}, },
"v. 1.3.1, Computed on Test on 2022-02-22 at 12:00:00 took 1s" "v. 1.4.0, Computed on Test on 2022-02-22 at 12:00:00 took 1s"
], ],
"balloons": [ "balloons": [
0.625, 0.625,
@@ -15,6 +15,6 @@
"kernel": "linear", "kernel": "linear",
"multiclass_strategy": "ovr" "multiclass_strategy": "ovr"
}, },
"v. 1.3.1, Computed on Test on 2022-02-22 at 12:00:00 took 1s" "v. 1.4.0, Computed on Test on 2022-02-22 at 12:00:00 took 1s"
] ]
} }

View File

@@ -120,7 +120,7 @@ class BeMainTest(TestBase):
module.main(parameter) module.main(parameter)
self.assertEqual(msg.exception.code, 2) self.assertEqual(msg.exception.code, 2)
self.assertEqual(stderr.getvalue(), "") self.assertEqual(stderr.getvalue(), "")
self.assertRegexpMatches(stdout.getvalue(), message) self.assertRegex(stdout.getvalue(), message)
def test_be_main_best_params_non_existent(self): def test_be_main_best_params_non_existent(self):
model = "GBC" model = "GBC"

View File

@@ -1,4 +1,4 @@
1;1;"Datasets used in benchmark ver. 0.5.0" 1;1;"Datasets used in benchmark ver. 1.0.1"
2;1;" Default score accuracy" 2;1;" Default score accuracy"
2;2;"Cross validation" 2;2;"Cross validation"
2;6;"5 Folds" 2;6;"5 Folds"

View File

@@ -1,4 +1,4 @@
Datasets used in benchmark ver. 0.5.0 Datasets used in benchmark ver. 1.0.1
Dataset Sampl. Feat. Cont Cls Balance Dataset Sampl. Feat. Cont Cls Balance
============================== ====== ===== ==== === ========================================== ============================== ====== ===== ==== === ==========================================