Add github ci and badges

refactor setup
This commit is contained in:
2022-05-11 12:21:55 +02:00
parent d826a65300
commit c7768ad387
5 changed files with 146 additions and 45 deletions

56
.github/workflows/codeql-analysis.yml vendored Normal file
View File

@@ -0,0 +1,56 @@
name: "CodeQL"
on:
push:
branches: [ master ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ master ]
schedule:
- cron: '16 17 * * 3'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
language: [ 'python' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

41
.github/workflows/main.yml vendored Normal file
View File

@@ -0,0 +1,41 @@
name: CI
on:
push:
branches: [master]
pull_request:
branches: [master]
workflow_dispatch:
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [macos-latest, ubuntu-latest, windows-latest]
python: [3.8]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- name: Install dependencies
run: |
pip install -q --upgrade pip
pip install -q -r requirements.txt
pip install -q --upgrade codecov coverage black flake8
- name: Lint
run: |
black --check --diff stree
flake8 --count stree
- name: Tests
run: |
coverage run -m unittest -v stree.tests
coverage xml
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./coverage.xml

View File

@@ -1,3 +1,7 @@
![CI](https://github.com/Doctorado-ML/benchmark/workflows/CI/badge.svg)
[![codecov](https://codecov.io/gh/doctorado-ml/benchmark/branch/master/graph/badge.svg)](https://codecov.io/gh/doctorado-ml/benchmark)
[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/Doctorado-ML/STree.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Doctorado-ML/STree/context:python)
![https://img.shields.io/badge/python-3.8%2B-blue](https://img.shields.io/badge/python-3.8%2B-brightgreen)
# benchmark # benchmark
Benchmarking models Benchmarking models
@@ -6,53 +10,55 @@ Benchmarking models
```python ```python
# 5 Fold 10 seeds with STree with default hyperparameters and report # 5 Fold 10 seeds with STree with default hyperparameters and report
python src/main.py -m STree -P iMac27 -r 1 be_main -m STree -P iMac27 -r 1
# Setting number of folds, in this case 7 # Setting number of folds, in this case 7
python src/main.py -m STree -P iMac27 -n 7 be_main -m STree -P iMac27 -n 7
# 5 Fold 10 seeds with STree and best results hyperparams # 5 Fold 10 seeds with STree and best results hyperparams
python src/main.py -m STree -P iMac27 -f 1 be_main -m STree -P iMac27 -f 1
# 5 Fold 10 seeds with STree and same hyperparameters # 5 Fold 10 seeds with STree and same hyperparameters
python src/main.py -m STree -P iMac27 -p '{"kernel": "rbf", "gamma": 0.1}' be_main -m STree -P iMac27 -p '{"kernel": "rbf", "gamma": 0.1}'
``` ```
## Best Results ## Best Results
```python ```python
# Build best results of STree model and print report # Build best results of STree model and print report
python src/build_best.py -m STree -r 1 be_build_best -m STree -r 1
# Report of STree best results # Report of STree best results
python src/report.py -b STree be_report -b STree
``` ```
## Reports ## Reports
```python ```python
# Datasets list # Datasets list
python src/report.py be_report
# Report of given experiment # Report of given experiment
python src/report.py -f results/results_STree_iMac27_2021-09-22_17:13:02.json be_report -f results/results_STree_iMac27_2021-09-22_17:13:02.json
# Report of given experiment building excel file and compare with best results # Report of given experiment building excel file and compare with best results
python src/report.py -f results/results_STree_iMac27_2021-09-22_17:13:02.json -x 1 -c 1 be_report -f results/results_STree_iMac27_2021-09-22_17:13:02.json -x 1 -c 1
# Report of given experiment building sql file # Report of given experiment building sql file
python src/report.py -f results/results_STree_iMac27_2021-09-22_17:13:02.json -q 1 be_report -f results/results_STree_iMac27_2021-09-22_17:13:02.json -q 1
``` ```
## Benchmark ## Benchmark
```python ```python
# Do benchmark and print report # Do benchmark and print report
python src/benchmark.py be_benchmark
# Do benchmark, print report and build excel file with data # Do benchmark, print report and build excel file with data
python src/benchmark.py -x 1 be_benchmark -x 1
# Do benchmark, print report and build tex table with results
be_benchmark -t 1
``` ```
## List ## List
```python ```python
# List of results of given model # List of results of given model
python src/list.py -m ODTE be_list -m ODTE
# List of results of given model and score # List of results of given model and score
python src/list.py -m STree -s f1-macro be_list -m STree -s f1-macro
# List all results # List all results
python src/list.py be_list
``` ```

View File

@@ -1,10 +1,9 @@
from .Experiments import Experiment, Datasets, DatasetsSurcov, DatasetsTanveer from .Experiments import Experiment, Datasets, DatasetsSurcov, DatasetsTanveer
from .Results import Report, Summary from .Results import Report, Summary
from .Arguments import EnvDefault
__author__ = "Ricardo Montañana Gómez" __author__ = "Ricardo Montañana Gómez"
__copyright__ = "Copyright 2020-2022, Ricardo Montañana Gómez" __copyright__ = "Copyright 2020-2022, Ricardo Montañana Gómez"
__license__ = "MIT License" __license__ = "MIT License"
__author_email__ = "ricardo.montanana@alu.uclm.es" __author_email__ = "ricardo.montanana@alu.uclm.es"
__all__ = ["Experiment", "Datasets", "Report", "Summary", "EnvDefault"] __all__ = ["Experiment", "Datasets", "Report", "Summary"]

View File

@@ -7,9 +7,8 @@ def readme():
return f.read() return f.read()
def get_data(field): def get_data(field, file_name="__init__.py"):
item = "" item = ""
file_name = "_version.py" if field == "version" else "__init__.py"
with open(os.path.join("benchmark", file_name)) as f: with open(os.path.join("benchmark", file_name)) as f:
for line in f.readlines(): for line in f.readlines():
if line.startswith(f"__{field}__"): if line.startswith(f"__{field}__"):
@@ -21,17 +20,34 @@ def get_data(field):
return item return item
def import_scripts(): def get_requirements():
with open("requirements.txt") as f:
return f.read().splitlines()
def script_names():
scripts = [
"benchmark",
"best",
"build_best",
"build_grid",
"grid",
"list",
"main",
"pair_check",
"print_strees",
"report",
"summary",
]
result = [] result = []
names = os.listdir(os.path.join("benchmark", "scripts")) for script in scripts:
for name in names: result.append(f"be_{script}=benchmark.scripts.be_{script}:main")
result.append(os.path.join("benchmark", "scripts", name))
return result return result
setuptools.setup( setuptools.setup(
name="benchmark", name="benchmark",
version=get_data("version"), version=get_data("version", "_version.py"),
license=get_data("license"), license=get_data("license"),
description="Oblique decision tree with svm nodes", description="Oblique decision tree with svm nodes",
long_description=readme(), long_description=readme(),
@@ -46,32 +62,15 @@ setuptools.setup(
"Development Status :: 4 - Beta", "Development Status :: 4 - Beta",
"License :: OSI Approved :: " + get_data("license"), "License :: OSI Approved :: " + get_data("license"),
"Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Natural Language :: English", "Natural Language :: English",
"Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Scientific/Engineering :: Artificial Intelligence",
"Intended Audience :: Science/Research", "Intended Audience :: Science/Research",
], ],
install_requires=[ install_requires=get_requirements(),
"scikit-learn",
"odte",
"pandas",
"mufs",
"xlsxwriter",
"tqdm",
],
zip_safe=False, zip_safe=False,
entry_points={ entry_points={
"console_scripts": [ "console_scripts": script_names(),
"be_list=benchmark.scripts.be_list:main",
"be_report=benchmark.scripts.be_report:main",
"be_main=benchmark.scripts.be_main:main",
"be_benchmark=benchmark.scripts.be_benchmark:main",
"be_best=benchmark.scripts.be_best:main",
"be_build_best=benchmark.scripts.be_build_best:main",
"be_build_grid=benchmark.scripts.be_build_grid:main",
"be_grid=benchmark.scripts.be_grid:main",
"be_pair_check=benchmark.scripts.be_pair_check:main",
"be_print_strees=benchmark.scripts.be_print_strees:main",
"be_summary=benchmark.scripts.be_summary:main",
],
}, },
) )