From c7768ad3873bfef3be35bd7a9c08c18abdf69ffa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Montan=CC=83ana?= Date: Wed, 11 May 2022 12:21:55 +0200 Subject: [PATCH] Add github ci and badges refactor setup --- .github/workflows/codeql-analysis.yml | 56 +++++++++++++++++++++++++++ .github/workflows/main.yml | 41 ++++++++++++++++++++ README.md | 36 ++++++++++------- benchmark/__init__.py | 3 +- setup.py | 55 +++++++++++++------------- 5 files changed, 146 insertions(+), 45 deletions(-) create mode 100644 .github/workflows/codeql-analysis.yml create mode 100644 .github/workflows/main.yml diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 0000000..671cd05 --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,56 @@ +name: "CodeQL" + +on: + push: + branches: [ master ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ master ] + schedule: + - cron: '16 17 * * 3' + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + language: [ 'python' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] + # Learn more: + # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v1 + + # ℹ️ Command-line programs to run using the OS shell. + # 📚 https://git.io/JvXDl + + # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000..473df43 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,41 @@ +name: CI + +on: + push: + branches: [master] + pull_request: + branches: [master] + workflow_dispatch: + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [macos-latest, ubuntu-latest, windows-latest] + python: [3.8] + + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install dependencies + run: | + pip install -q --upgrade pip + pip install -q -r requirements.txt + pip install -q --upgrade codecov coverage black flake8 + - name: Lint + run: | + black --check --diff stree + flake8 --count stree + - name: Tests + run: | + coverage run -m unittest -v stree.tests + coverage xml + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v1 + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: ./coverage.xml diff --git a/README.md b/README.md index 6d054fe..cdc808e 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,7 @@ +![CI](https://github.com/Doctorado-ML/benchmark/workflows/CI/badge.svg) +[![codecov](https://codecov.io/gh/doctorado-ml/benchmark/branch/master/graph/badge.svg)](https://codecov.io/gh/doctorado-ml/benchmark) +[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/Doctorado-ML/STree.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Doctorado-ML/STree/context:python) +![https://img.shields.io/badge/python-3.8%2B-blue](https://img.shields.io/badge/python-3.8%2B-brightgreen) # benchmark Benchmarking models @@ -6,53 +10,55 @@ Benchmarking models ```python # 5 Fold 10 seeds with STree with default hyperparameters and report -python src/main.py -m STree -P iMac27 -r 1 +be_main -m STree -P iMac27 -r 1 # Setting number of folds, in this case 7 -python src/main.py -m STree -P iMac27 -n 7 +be_main -m STree -P iMac27 -n 7 # 5 Fold 10 seeds with STree and best results hyperparams -python src/main.py -m STree -P iMac27 -f 1 +be_main -m STree -P iMac27 -f 1 # 5 Fold 10 seeds with STree and same hyperparameters -python src/main.py -m STree -P iMac27 -p '{"kernel": "rbf", "gamma": 0.1}' +be_main -m STree -P iMac27 -p '{"kernel": "rbf", "gamma": 0.1}' ``` ## Best Results ```python # Build best results of STree model and print report -python src/build_best.py -m STree -r 1 +be_build_best -m STree -r 1 # Report of STree best results -python src/report.py -b STree +be_report -b STree ``` ## Reports ```python # Datasets list -python src/report.py +be_report # Report of given experiment -python src/report.py -f results/results_STree_iMac27_2021-09-22_17:13:02.json +be_report -f results/results_STree_iMac27_2021-09-22_17:13:02.json # Report of given experiment building excel file and compare with best results -python src/report.py -f results/results_STree_iMac27_2021-09-22_17:13:02.json -x 1 -c 1 +be_report -f results/results_STree_iMac27_2021-09-22_17:13:02.json -x 1 -c 1 # Report of given experiment building sql file -python src/report.py -f results/results_STree_iMac27_2021-09-22_17:13:02.json -q 1 +be_report -f results/results_STree_iMac27_2021-09-22_17:13:02.json -q 1 ``` ## Benchmark ```python # Do benchmark and print report -python src/benchmark.py +be_benchmark # Do benchmark, print report and build excel file with data -python src/benchmark.py -x 1 +be_benchmark -x 1 +# Do benchmark, print report and build tex table with results +be_benchmark -t 1 ``` ## List ```python # List of results of given model -python src/list.py -m ODTE +be_list -m ODTE # List of results of given model and score -python src/list.py -m STree -s f1-macro +be_list -m STree -s f1-macro # List all results -python src/list.py +be_list ``` diff --git a/benchmark/__init__.py b/benchmark/__init__.py index 6d699da..bafc822 100644 --- a/benchmark/__init__.py +++ b/benchmark/__init__.py @@ -1,10 +1,9 @@ from .Experiments import Experiment, Datasets, DatasetsSurcov, DatasetsTanveer from .Results import Report, Summary -from .Arguments import EnvDefault __author__ = "Ricardo Montañana Gómez" __copyright__ = "Copyright 2020-2022, Ricardo Montañana Gómez" __license__ = "MIT License" __author_email__ = "ricardo.montanana@alu.uclm.es" -__all__ = ["Experiment", "Datasets", "Report", "Summary", "EnvDefault"] +__all__ = ["Experiment", "Datasets", "Report", "Summary"] diff --git a/setup.py b/setup.py index e5ac12a..d8d0067 100644 --- a/setup.py +++ b/setup.py @@ -7,9 +7,8 @@ def readme(): return f.read() -def get_data(field): +def get_data(field, file_name="__init__.py"): item = "" - file_name = "_version.py" if field == "version" else "__init__.py" with open(os.path.join("benchmark", file_name)) as f: for line in f.readlines(): if line.startswith(f"__{field}__"): @@ -21,17 +20,34 @@ def get_data(field): return item -def import_scripts(): +def get_requirements(): + with open("requirements.txt") as f: + return f.read().splitlines() + + +def script_names(): + scripts = [ + "benchmark", + "best", + "build_best", + "build_grid", + "grid", + "list", + "main", + "pair_check", + "print_strees", + "report", + "summary", + ] result = [] - names = os.listdir(os.path.join("benchmark", "scripts")) - for name in names: - result.append(os.path.join("benchmark", "scripts", name)) + for script in scripts: + result.append(f"be_{script}=benchmark.scripts.be_{script}:main") return result setuptools.setup( name="benchmark", - version=get_data("version"), + version=get_data("version", "_version.py"), license=get_data("license"), description="Oblique decision tree with svm nodes", long_description=readme(), @@ -46,32 +62,15 @@ setuptools.setup( "Development Status :: 4 - Beta", "License :: OSI Approved :: " + get_data("license"), "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", "Natural Language :: English", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Intended Audience :: Science/Research", ], - install_requires=[ - "scikit-learn", - "odte", - "pandas", - "mufs", - "xlsxwriter", - "tqdm", - ], + install_requires=get_requirements(), zip_safe=False, entry_points={ - "console_scripts": [ - "be_list=benchmark.scripts.be_list:main", - "be_report=benchmark.scripts.be_report:main", - "be_main=benchmark.scripts.be_main:main", - "be_benchmark=benchmark.scripts.be_benchmark:main", - "be_best=benchmark.scripts.be_best:main", - "be_build_best=benchmark.scripts.be_build_best:main", - "be_build_grid=benchmark.scripts.be_build_grid:main", - "be_grid=benchmark.scripts.be_grid:main", - "be_pair_check=benchmark.scripts.be_pair_check:main", - "be_print_strees=benchmark.scripts.be_print_strees:main", - "be_summary=benchmark.scripts.be_summary:main", - ], + "console_scripts": script_names(), }, )