Update doc config

Change build tool to hatch
This commit is contained in:
Ricardo Montañana Gómez 2024-08-15 13:16:51 +02:00
parent b19264b1eb
commit bcc763e656
Signed by: rmontanana
GPG Key ID: 46064262FD9A7ADE
23 changed files with 260 additions and 653 deletions

View File

@ -13,12 +13,12 @@ jobs:
strategy: strategy:
matrix: matrix:
os: [macos-latest, ubuntu-latest, windows-latest] os: [macos-latest, ubuntu-latest, windows-latest]
python: [3.8, 3.9, "3.10"] python: [3.11, 3.12]
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python }} - name: Set up Python ${{ matrix.python }}
uses: actions/setup-python@v4 uses: actions/setup-python@v5
with: with:
python-version: ${{ matrix.python }} python-version: ${{ matrix.python }}
- name: Install dependencies - name: Install dependencies
@ -35,7 +35,7 @@ jobs:
coverage run -m unittest -v odte.tests coverage run -m unittest -v odte.tests
coverage xml coverage xml
- name: Upload coverage to Codecov - name: Upload coverage to Codecov
uses: codecov/codecov-action@v3 uses: codecov/codecov-action@v4
with: with:
token: ${{ secrets.CODECOV_TOKEN }} token: ${{ secrets.CODECOV_TOKEN }}
files: ./coverage.xml files: ./coverage.xml

1
MANIFEST.in Normal file
View File

@ -0,0 +1 @@
include README.md LICENSE

View File

@ -1,44 +1,35 @@
SHELL := /bin/bash SHELL := /bin/bash
.DEFAULT_GOAL := help .DEFAULT_GOAL := help
.PHONY: coverage deps help lint push test doc build .PHONY: audit coverage help lint test doc doc-clean build
coverage: ## Run tests with coverage coverage: ## Run tests with coverage
coverage erase @coverage erase
coverage run -m unittest -v odte.tests @coverage run -m unittest -v odte.tests
coverage report -m @coverage report -m
deps: ## Install dependencies lint: ## Lint source files
pip install -r requirements.txt @black odte
@flake8 odte
devdeps: ## Install development dependencies @mypy odte
pip install black pip-audit flake8 mypy coverage
lint: ## Lint and static-check
black odte
flake8 odte
mypy odte --exclude tests
audit: ## Audit pip audit: ## Audit pip
pip-audit @pip-audit
push: ## Push code with tags
git push && git push --tags
test: ## Run tests test: ## Run tests
python -m unittest -v odte.tests @python -m unittest -v odte.tests
doc: ## Update documentation doc: ## Update documentation
make -C docs --makefile=Makefile html @make -C docs --makefile=Makefile html
build: ## Build package build: ## Build package
rm -fr dist/* @rm -fr dist/*
rm -fr build/* @rm -fr build/*
python setup.py sdist bdist_wheel @hatch build
doc-clean: ## Update documentation doc-clean: ## Clean documentation folders
make -C docs --makefile=Makefile clean @make -C docs --makefile=Makefile clean
help: ## Show help message help: ## Show this help message
@IFS=$$'\n' ; \ @IFS=$$'\n' ; \
help_lines=(`fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##/:/'`); \ help_lines=(`fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##/:/'`); \
printf "%s\n\n" "Usage: make [task]"; \ printf "%s\n\n" "Usage: make [task]"; \

20
docs/Makefile Normal file
View File

@ -0,0 +1,20 @@
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line, and also
# from the environment for the first two.
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
SOURCEDIR = source
BUILDDIR = build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

4
docs/requirements.txt Normal file
View File

@ -0,0 +1,4 @@
sphinx
sphinx-rtd-theme
myst-parser
stree

10
docs/source/api/Odte.rst Normal file
View File

@ -0,0 +1,10 @@
Odte
=====
.. automodule:: odte
.. autoclass:: Odte
:members:
:undoc-members:
:private-members:
:show-inheritance:
:noindex:

View File

@ -0,0 +1,8 @@
API index
=========
.. toctree::
:maxdepth: 2
:caption: Contents:
Odte

54
docs/source/conf.py Normal file
View File

@ -0,0 +1,54 @@
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
import odte
# -- Project information -----------------------------------------------------
project = "Odte"
copyright = "2024 Ricardo Montañana Gómez"
author = "Ricardo Montañana Gómez"
# The full version, including alpha/beta/rc tags
version = release = odte.__version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["myst_parser", "sphinx.ext.autodoc", "sphinx.ext.viewcode"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []

BIN
docs/source/example.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 MiB

View File

@ -0,0 +1,19 @@
# Hyperparameters
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
| --- | ------------------- | -------------------------------------------------------------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| | estimator | \<sklearn.BaseEstimator\> | Stree() | Base estimator used to build each element of the ensemble. |
| | n_jobs | \<int\> | -1 | Specifies the number of threads used to build the ensemble (-1 equals to all cores available) |
| | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls |
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
| | max_samples | \<int\> | 3 | Degree of the polynomial kernel function (poly). Ignored by all other kernels.|
| | n_estimators | \<int\> | 100 | The number of trees the ensemble is going to build |
| | be_hyperparams | \<str\> | "{}" | Hyperparameteres passed to the base estimator, i.e. "{\\"C\\": 17, \\"kernel\\": \\"rbf\\"}"|
\*\* **Splitting in a STree node**
The decision function is applied to the dataset and distances from samples to hyperplanes are computed in a matrix. This matrix has as many columns as classes the samples belongs to (if more than two, i.e. multiclass classification) or 1 column if it's a binary class dataset. In binary classification only one hyperplane is computed and therefore only one column is needed to store the distances of the samples to it. If three or more classes are present in the dataset we need as many hyperplanes as classes are there, and therefore one column per hyperplane is needed.
In case of multiclass classification we have to decide which column take into account to make the split, that depends on hyperparameter _split_criteria_, if "impurity" is chosen then STree computes information gain of every split candidate using each column and chooses the one that maximize the information gain, otherwise STree choses the column with more samples with a predicted class (the column with more positive numbers in it).
Once we have the column to take into account for the split, the algorithm splits samples with positive distances to hyperplane from the rest.

14
docs/source/index.rst Normal file
View File

@ -0,0 +1,14 @@
Welcome to Odte's documentation!
=================================
.. toctree::
:caption: Contents:
:titlesonly:
odte
install
hyperparameters
api/index
* :ref:`genindex`

15
docs/source/install.rst Normal file
View File

@ -0,0 +1,15 @@
Install
=======
The main stable release
``pip install odte``
or the last development branch
``pip install git+https://github.com/doctorado-ml/odte``
Tests
*****
``python -m unittest -v odte.tests``

17
docs/source/odte.md Normal file
View File

@ -0,0 +1,17 @@
# Odte
![CI](https://github.com/Doctorado-ML/Odte/workflows/CI/badge.svg)
[![CodeQL](https://github.com/Doctorado-ML/Odte/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/Doctorado-ML/Odte/actions/workflows/codeql-analysis.yml)
[![codecov](https://codecov.io/gh/Doctorado-ML/odte/branch/master/graph/badge.svg)](https://codecov.io/gh/Doctorado-ML/odte)
[![Codacy Badge](https://app.codacy.com/project/badge/Grade/f4b5ef87584b4095b6e49aefbe594c82)](https://www.codacy.com/gh/Doctorado-ML/Odte/dashboard?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/Odte&utm_campaign=Badge_Grade)
[![PyPI version](https://badge.fury.io/py/Odte.svg)](https://badge.fury.io/py/Odte)
![https://img.shields.io/badge/python-3.11%2B-blue](https://img.shields.io/badge/python-3.11%2B-brightgreen)
[![DOI](https://zenodo.org/badge/271595804.svg)](https://zenodo.org/badge/latestdoi/271595804)
Oblique Decision Tree Ensemble classifier based on [STree](https://github.com/doctorado-ml/stree) nodes.
![Odte](./example.png)
## License
Odte is [MIT](https://github.com/doctorado-ml/odte/blob/master/LICENSE) licensed

View File

@ -1,388 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Compare Odte with different estimators"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Setup\n",
"Uncomment the next cell if Odte is not already installed"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#\n",
"# Google Colab setup\n",
"#\n",
"#!pip install git+https://github.com/doctorado-ml/odte\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import datetime, time\n",
"import numpy as np\n",
"import pandas as pd\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn import tree\n",
"from sklearn.metrics import classification_report, confusion_matrix, f1_score\n",
"from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier\n",
"from stree import Stree\n",
"from odte import Odte"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"if not os.path.isfile('data/creditcard.csv'):\n",
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
" !tar xzf creditcard.tgz"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Tests"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"print(datetime.date.today(), time.strftime(\"%H:%M:%S\"))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Load dataset and normalize values"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Load Dataset\n",
"df = pd.read_csv('data/creditcard.csv')\n",
"df.shape\n",
"random_state = 2020"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
"print(\"Valid: {0:.3f}% {1:,}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Normalize Amount\n",
"from sklearn.preprocessing import RobustScaler\n",
"values = RobustScaler().fit_transform(df.Amount.values.reshape(-1, 1))\n",
"df['Amount_Scaled'] = values"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# Remove unneeded features\n",
"y = df.Class.values\n",
"X = df.drop(['Class', 'Time', 'Amount'], axis=1).values\n",
"print(f\"X shape: {X.shape}\\ny shape: {y.shape}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Build the models"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Divide dataset\n",
"train_size = .7\n",
"Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=train_size, shuffle=True, random_state=random_state, stratify=y)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Linear Tree\n",
"linear_tree = tree.DecisionTreeClassifier(random_state=random_state)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Random Forest\n",
"random_forest = RandomForestClassifier(random_state=random_state)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Stree\n",
"stree = Stree(random_state=random_state, C=.01)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# AdaBoost\n",
"adaboost = AdaBoostClassifier(random_state=random_state)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Gradient Boosting\n",
"gradient = GradientBoostingClassifier(random_state=random_state)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Oblique Decision Tree Ensemble\n",
"odte = Odte(random_state=random_state, max_features=\"auto\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Do the test"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def try_model(name, model):\n",
" print(f\"************************** {name} **********************\")\n",
" now = time.time()\n",
" model.fit(Xtrain, ytrain)\n",
" spent = time.time() - now\n",
" print(f\"Train Model {name} took: {spent:.4} seconds\")\n",
" predict = model.predict(Xtrain)\n",
" predictt = model.predict(Xtest)\n",
" print(f\"=========== {name} - Train {Xtrain.shape[0]:,} samples =============\",)\n",
" print(classification_report(ytrain, predict, digits=6))\n",
" print(f\"=========== {name} - Test {Xtest.shape[0]:,} samples =============\")\n",
" print(classification_report(ytest, predictt, digits=6))\n",
" print(\"Confusion Matrix in Train\")\n",
" print(confusion_matrix(ytrain, predict))\n",
" print(\"Confusion Matrix in Test\")\n",
" print(confusion_matrix(ytest, predictt))\n",
" return f1_score(ytest, predictt), spent"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# Train & Test models\n",
"models = {\n",
" 'Linear Tree':linear_tree, 'Random Forest': random_forest, 'Stree (SVM Tree)': stree, \n",
" 'AdaBoost model': adaboost, 'Odte model': odte #'Gradient Boost.': gradient\n",
"}\n",
"\n",
"best_f1 = 0\n",
"outcomes = []\n",
"for name, model in models.items():\n",
" f1, time_spent = try_model(name, model)\n",
" outcomes.append((name, f1, time_spent))\n",
" if f1 > best_f1:\n",
" best_model = name\n",
" best_time = time_spent\n",
" best_f1 = f1"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"print(\"*\"*110)\n",
"print(f\"*The best f1 model is {best_model}, with a f1 score: {best_f1:.4} in {best_time:.6} seconds with {train_size:,} samples in train dataset\")\n",
"print(\"*\"*110)\n",
"for name, f1, time_spent in outcomes:\n",
" print(f\"Model: {name}\\t Time: {time_spent:6.2f} seconds\\t f1: {f1:.4}\")"
]
},
{
"cell_type": "raw",
"metadata": {},
"source": [
"**************************************************************************************************************\n",
"*The best f1 model is Random Forest, with a f1 score: 0.8815 in 152.54 seconds with 0.7 samples in train dataset\n",
"**************************************************************************************************************\n",
"Model: Linear Tree\t Time: 13.52 seconds\t f1: 0.7645\n",
"Model: Random Forest\t Time: 152.54 seconds\t f1: 0.8815\n",
"Model: Stree (SVM Tree)\t Time: 32.55 seconds\t f1: 0.8603\n",
"Model: AdaBoost model\t Time: 47.34 seconds\t f1: 0.7509\n",
"Model: Gradient Boost.\t Time: 244.12 seconds\t f1: 0.5259"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"```\n",
"******************************************************************************************************************\n",
"*The best f1 model is Random Forest, with a f1 score: 0.8815 in 218.966 seconds with 0.7 samples in train dataset\n",
"******************************************************************************************************************\n",
"Model: Linear Tree Time: 23.05 seconds\t f1: 0.7645\n",
"Model: Random Forest\t Time: 218.97 seconds\t f1: 0.8815\n",
"Model: Stree (SVM Tree)\t Time: 49.45 seconds\t f1: 0.8467\n",
"Model: AdaBoost model\t Time: 73.83 seconds\t f1: 0.7509\n",
"Model: Gradient Boost.\t Time: 388.69 seconds\t f1: 0.5259\n",
"Model: Neural Network\t Time: 25.47 seconds\t f1: 0.8328\n",
"Model: Odte \t Time:2134.25 seconds\t f1: 0.8385\n",
"```"
]
}
],
"metadata": {
"hide_input": false,
"kernelspec": {
"display_name": "Python 3.7.6 64-bit ('general': venv)",
"language": "python",
"name": "python37664bitgeneralvenvfbd0a23e74cf4e778460f5ffc6761f39"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6-final"
},
"toc": {
"base_numbering": 1,
"nav_menu": {},
"number_sections": true,
"sideBar": true,
"skip_h1_title": false,
"title_cell": "Table of Contents",
"title_sidebar": "Contents",
"toc_cell": false,
"toc_position": {},
"toc_section_display": true,
"toc_window_display": false
},
"varInspector": {
"cols": {
"lenName": 16,
"lenType": 16,
"lenVar": 40
},
"kernels_config": {
"python": {
"delete_cmd_postfix": "",
"delete_cmd_prefix": "del ",
"library": "var_list.py",
"varRefreshCmd": "print(var_dic_list())"
},
"r": {
"delete_cmd_postfix": ") ",
"delete_cmd_prefix": "rm(",
"library": "var_list.r",
"varRefreshCmd": "cat(var_dic_list()) "
}
},
"position": {
"height": "392px",
"left": "1518px",
"right": "20px",
"top": "40px",
"width": "392px"
},
"types_to_exclude": [
"module",
"function",
"builtin_function_or_method",
"instance",
"_Feature"
],
"window_display": true
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@ -1,174 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import datetime, time\n",
"import numpy as np\n",
"import pandas as pd\n",
"from sklearn.model_selection import train_test_split, cross_validate\n",
"from sklearn import tree\n",
"from sklearn.metrics import classification_report, confusion_matrix, f1_score\n",
"from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, BaggingClassifier\n",
"from stree import Stree\n",
"from odte import Odte\n",
"\n",
"random_state = 1"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.datasets import load_wine\n",
"X, y = load_wine(return_X_y=True)\n",
"Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=.2, random_state=random_state)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"n_estimators = 20\n",
"clf = {}\n",
"clf[\"stree\"] = Stree(random_state=random_state, max_depth=5)\n",
"clf[\"stree\"].set_params(**dict(splitter=\"best\", kernel=\"linear\", max_features=\"auto\"))\n",
"clf[\"odte\"] = Odte(n_jobs=-1, estimator=clf[\"stree\"], random_state=random_state, n_estimators=n_estimators, max_features=.8)\n",
"clf[\"adaboost\"] = AdaBoostClassifier(estimator=clf[\"stree\"], n_estimators=n_estimators, random_state=random_state, algorithm=\"SAMME\")\n",
"clf[\"bagging\"] = BaggingClassifier(estimator=clf[\"stree\"], n_estimators=n_estimators)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"print(\"*\"*30,\"Results for wine\", \"*\"*30)\n",
"for clf_type, item in clf.items():\n",
" print(f\"Training {clf_type}...\")\n",
" now = time.time()\n",
" item.fit(Xtrain, ytrain)\n",
" print(f\"Score: {item.score(Xtest, ytest) * 100:.3f} in {time.time()-now:.2f} seconds\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.datasets import load_iris\n",
"X, y = load_iris(return_X_y=True)\n",
"Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=.2, random_state=random_state)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"n_estimators = 10\n",
"clf = {}\n",
"clf[\"stree\"] = Stree(random_state=random_state, max_depth=3)\n",
"clf[\"odte\"] = Odte(n_jobs=-1, random_state=random_state, n_estimators=n_estimators, max_features=1.0)\n",
"clf[\"adaboost\"] = AdaBoostClassifier(estimator=clf[\"stree\"], n_estimators=n_estimators, random_state=random_state, algorithm=\"SAMME\")\n",
"clf[\"bagging\"] = BaggingClassifier(estimator=clf[\"stree\"], n_estimators=n_estimators)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"print(\"*\"*30,\"Results for iris\", \"*\"*30)\n",
"for clf_type, item in clf.items():\n",
" print(f\"Training {clf_type}...\")\n",
" now = time.time()\n",
" item.fit(Xtrain, ytrain)\n",
" print(f\"Score: {item.score(Xtest, ytest) * 100:.3f} in {time.time()-now:.2f} seconds\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"cross = cross_validate(estimator=clf[\"odte\"], X=X, y=y, n_jobs=-1, return_train_score=True)\n",
"print(cross)\n",
"print(f\"{np.mean(cross['test_score'])*100:.3f} +- {np.std(cross['test_score']):.3f}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"cross = cross_validate(estimator=clf[\"adaboost\"], X=X, y=y, n_jobs=-1, return_train_score=True)\n",
"print(cross)\n",
"print(f\"{np.mean(cross['test_score'])*100:.3f} +- {np.std(cross['test_score']):.3f}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from sklearn.utils.estimator_checks import check_estimator\n",
"# Make checks one by one\n",
"c = 0\n",
"checks = check_estimator(Odte(), generate_only=True)\n",
"for check in checks:\n",
" c += 1\n",
" print(c, check[1])\n",
" check[1](check[0])"
]
}
],
"metadata": {
"interpreter": {
"hash": "da86226729227d0e8962a5ec29ea906307507ca2c30ceaaf651c09a617630939"
},
"kernelspec": {
"display_name": "Python 3.9.2 64-bit ('general': venv)",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.7"
},
"orig_nbformat": 2
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -33,10 +33,10 @@ class Odte(BaseEnsemble, ClassifierMixin):
# n_jobs = -1 to use all available cores # n_jobs = -1 to use all available cores
n_jobs: int = -1, n_jobs: int = -1,
estimator: BaseEstimator = Stree(), estimator: BaseEstimator = Stree(),
random_state: int = 0, random_state: Optional[int] = None,
max_features: Optional[Union[str, int, float]] = None, max_features: Optional[Union[str, int, float]] = None,
max_samples: Optional[Union[int, float]] = None, max_samples: Optional[Union[int, float]] = None,
n_estimators: int = 10, n_estimators: int = 100,
be_hyperparams: str = "{}", be_hyperparams: str = "{}",
): ):
super().__init__( super().__init__(
@ -62,7 +62,10 @@ class Odte(BaseEnsemble, ClassifierMixin):
) )
def fit( def fit(
self, X: np.ndarray, y: np.ndarray, sample_weight: np.ndarray = None self,
X: np.ndarray,
y: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
) -> Odte: ) -> Odte:
# Check parameters are Ok. # Check parameters are Ok.
if self.n_estimators < 3: if self.n_estimators < 3:
@ -100,9 +103,6 @@ class Odte(BaseEnsemble, ClassifierMixin):
tdepth += depth tdepth += depth
tnodes += nodes tnodes += nodes
tleaves += leaves tleaves += leaves
# self.depth_ = tdepth / self.n_estimators
# self.leaves_ = tleaves / self.n_estimators
# self.nodes_ = tnodes / self.n_estimators
self.depth_ = tdepth self.depth_ = tdepth
self.leaves_ = tleaves self.leaves_ = tleaves
self.nodes_ = tnodes self.nodes_ = tnodes
@ -113,6 +113,11 @@ class Odte(BaseEnsemble, ClassifierMixin):
n_samples = X.shape[0] n_samples = X.shape[0]
boot_samples = self._get_bootstrap_n_samples(n_samples) boot_samples = self._get_bootstrap_n_samples(n_samples)
estimator = clone(self.estimator_) estimator = clone(self.estimator_)
defined_state = (
random.randint(0, 2**31)
if self.random_state is None
else self.random_state
)
return Parallel(n_jobs=self.n_jobs, prefer="threads")( # type: ignore return Parallel(n_jobs=self.n_jobs, prefer="threads")( # type: ignore
delayed(Odte._parallel_build_tree)( delayed(Odte._parallel_build_tree)(
estimator, estimator,
@ -125,7 +130,7 @@ class Odte(BaseEnsemble, ClassifierMixin):
self.be_hyperparams, self.be_hyperparams,
) )
for random_seed in range( for random_seed in range(
self.random_state, self.random_state + self.n_estimators defined_state, defined_state + self.n_estimators
) )
) )

View File

@ -1,3 +1,4 @@
from ._version import __version__
from .Odte import Odte from .Odte import Odte
__author__ = "Ricardo Montañana Gómez" __author__ = "Ricardo Montañana Gómez"
@ -5,4 +6,4 @@ __copyright__ = "Copyright 2020-2021, Ricardo Montañana Gómez"
__license__ = "MIT License" __license__ = "MIT License"
__author_email__ = "ricardo.montanana@alu.uclm.es" __author_email__ = "ricardo.montanana@alu.uclm.es"
__all__ = ["Odte"] __all__ = ["__version__", "Odte"]

View File

@ -1 +1 @@
__version__ = "0.3.6" __version__ = "1.0.0"

View File

@ -180,7 +180,7 @@ class Odte_test(unittest.TestCase):
warnings.filterwarnings("ignore", category=RuntimeWarning) warnings.filterwarnings("ignore", category=RuntimeWarning)
from sklearn.utils.estimator_checks import check_estimator from sklearn.utils.estimator_checks import check_estimator
check_estimator(Odte()) check_estimator(Odte(n_estimators=10))
def test_nodes_leaves_not_fitted(self): def test_nodes_leaves_not_fitted(self):
tclf = Odte( tclf = Odte(

View File

@ -1,4 +1,3 @@
# type: ignore
from .Odte_tests import Odte_test from .Odte_tests import Odte_test
__all__ = ["Odte_test"] __all__ = ["Odte_test"]

View File

@ -1,5 +1,64 @@
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[project]
name = "Odte"
description = "Oblique decision tree Ensemble."
readme = "README.md"
license = { file = "LICENSE" }
authors = [
{ name = "Ricardo Montañana", email = "ricardo.montanana@alu.uclm.es" },
]
dynamic = ['version']
dependencies = ["stree>=1.4"]
requires-python = ">=3.11"
keywords = [
"scikit-learn",
"oblique-classifier",
"oblique-decision-tree",
"decision-tree",
"ensemble",
"svm",
]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Topic :: Software Development",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
]
[project.optional-dependencies]
dev = ["black", "flake8", "coverage", "mypy", "pandas", "hatch", "pip-audit"]
doc = ["sphinx", "myst-parser", "sphinx_rtd_theme", "sphinx-autodoc-typehints"]
[project.urls]
Home = "https://github.com/doctorado-ml/odte"
[tool.hatch.version]
path = "odte/_version.py"
[tool.mypy]
exclude = ['tests']
[tool.coverage.run]
branch = true
source = ["odte"]
command_line = "-m unittest discover -s odte.tests"
[tool.coverage.report]
show_missing = true
fail_under = 100
[tool.black] [tool.black]
line-length = 79 line-length = 79
target_version = ['py311']
include = '\.pyi?$' include = '\.pyi?$'
exclude = ''' exclude = '''
/( /(
@ -13,4 +72,4 @@ exclude = '''
| build | build
| dist | dist
)/ )/
''' '''

View File

@ -1 +1 @@
stree>=1.3.1 stree>=1.4

View File

@ -1,48 +0,0 @@
import setuptools
import os
def readme():
with open("README.md") as f:
return f.read()
def get_data(field):
item = ""
file_name = "_version.py" if field == "version" else "__init__.py"
with open(os.path.join("odte", file_name)) as f:
for line in f.readlines():
if line.startswith(f"__{field}__"):
delim = '"' if '"' in line else "'"
item = line.split(delim)[1]
break
else:
raise RuntimeError(f"Unable to find {field} string.")
return item
setuptools.setup(
name="Odte",
version=get_data("version"),
license=get_data("license"),
description="Oblique decision tree Ensemble",
long_description=readme(),
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
url="https://github.com/doctorado-ml/odte",
author=get_data("author"),
author_email=get_data("author_email"),
keywords="scikit-learn oblique-classifier oblique-decision-tree decision-\
tree ensemble svm svc",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.8",
"Natural Language :: English",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Intended Audience :: Science/Research",
],
install_requires=["stree"],
test_suite="odte.tests",
zip_safe=False,
)