Compare commits

...

39 Commits

Author SHA1 Message Date
cb80e8606b Add ask wiki link and init depth 2025-06-24 18:48:04 +02:00
c93d3fbcc7 Fix scikit-learn version in requirements for tests 2025-05-20 11:31:51 +02:00
f4ca4bbd5b Update comment and readme 2025-03-21 09:51:39 +01:00
e676ddbfcc Update python min version in Readme 2024-08-16 13:14:38 +02:00
Ricardo Montañana Gómez
dc637018e8 Rtd update (#58)
* Update read the docs config

*Update copyright year in docs

* Update python version

* Change build configuration

* Change version read in config

* Refactor config files

* Refactor api config
2024-08-15 11:49:38 +02:00
517013be09 Update readthedocs config place
Refactor __call__ method to do nothing as needed by sklearn
2024-08-14 16:37:36 +02:00
941c2ff5e0 Update gh action version 2024-08-14 10:15:26 +02:00
2ebf48145d Update python version requirements 2024-08-14 10:03:57 +02:00
7fbfd3622e Update python versions in gh actions 2024-08-14 09:58:36 +02:00
bc839a80d6 Remove black from lint in github actions 2024-08-14 09:52:05 +02:00
ba15ea2cc0 Remove unneeded file 2024-08-14 09:42:59 +02:00
85b56785c8 Change project builder to hatch
Update actions in Makefile
2024-08-14 09:41:45 +02:00
b627bb7531 Add pyproject.toml install information
Add __call__ method to support sklearn ensembles requirements for base estimators
Update tests
2024-08-13 13:28:32 +02:00
5f8ca8f3bb Reformat test with new black version 2024-03-05 18:46:19 +01:00
Ricardo Montañana Gómez
fb8b9b344f Update README.md
update installation instructions
2024-03-05 18:18:55 +01:00
036d1ba2a7 Add separate methods to return nodes/leaves/depth 2023-11-27 10:02:14 +01:00
4de74973b8 Black format issue 2023-07-12 14:16:08 +02:00
Ricardo Montañana Gómez
28dd04b95a Update benchmark.ipynb 2023-05-13 14:44:49 +02:00
Ricardo Montañana Gómez
542bbce7db ci: ⬆️ Update ci files and badges 2023-01-15 02:18:41 +01:00
Ricardo Montañana Gómez
5b791bc5bf New_version_sklearn (#56)
* test: 🧪 Update max_iter as int in test_multiclass_dataset

* refactor: 📝 Rename base_estimator to estimator as the former is deprectated in notebook

* refactor: 📌 Convert max_iter to int as needed in sklearn 1.2

* chore: 🔖 Update version info to 1.3.1
2023-01-15 01:21:32 +01:00
Ricardo Montañana Gómez
c37f044e3a Update doc and version 1.30 (#55)
* Add complete classes counts to node and tests

* Implement optimized predict and new predict_proba

* Add predict_proba test

* Add python 3.10 to CI

* Update version number and documentation
2022-10-21 13:31:59 +02:00
Ricardo Montañana Gómez
2f6ae648a1 New predict proba (#53)
* Add complete classes counts to node and tests

* Implement optimized predict and new predict_proba

* Add predict_proba test

* Add python 3.10 to CI
2022-10-21 12:26:46 +02:00
Ricardo Montañana Gómez
93be8a89a8 Graphviz (#52)
* Add graphviz representation of the tree

* Complete graphviz test
Add comments to some tests

* Add optional title to tree graph

* Add fontcolor keyword to nodes of the tree

* Add color keyword to arrows of graph

* Update version file to 1.2.4
2022-04-17 19:47:58 +02:00
82838fa3e0 Add audit and devdeps to Makefile 2022-01-11 11:02:09 +01:00
f0b2ce3c7b Fix github actions lint mistake 2022-01-11 10:44:45 +01:00
00ed57c015 Add version of the model method 2021-12-17 11:01:09 +01:00
Ricardo Montañana Gómez
08222f109e Update CITATION.cff 2021-11-04 11:06:13 +01:00
cc931d8547 Fix random seed not used in fs_mutual 2021-11-04 10:04:30 +01:00
b044a057df Update comments and README.md 2021-11-02 14:04:10 +01:00
fc48bc8ba4 Update docs and version number 2021-11-02 12:17:46 +01:00
Ricardo Montañana Gómez
8251f07674 Fix Citation (#49) 2021-11-02 10:58:30 +01:00
Ricardo Montañana Gómez
0b15a5af11 Fix space in CITATION.cff 2021-11-02 00:25:21 +01:00
Ricardo Montañana Gómez
28d905368b Create CITATION.cff 2021-11-02 00:20:49 +01:00
e5d49132ec Update benchmark hyperparams os STree 2021-10-31 12:41:30 +01:00
8daecc4726 Remove obsolete binder links 2021-10-31 11:51:31 +01:00
Ricardo Montañana Gómez
bf678df159 (#46) Implement true random feature selection (#48)
* (#46) Implement true random feature selection
2021-10-29 12:59:03 +02:00
Ricardo Montañana Gómez
36b08b1bcf Implement iwss feature selection (#45) (#47) 2021-10-29 11:49:46 +02:00
36ff3da26d Update Docs 2021-09-13 18:32:59 +02:00
Ricardo Montañana Gómez
6b281ebcc8 Add DOI to README 2021-09-13 18:23:11 +02:00
29 changed files with 1095 additions and 532 deletions

View File

@@ -2,12 +2,12 @@ name: "CodeQL"
on:
push:
branches: [ master ]
branches: [master]
pull_request:
# The branches below must be a subset of the branches above
branches: [ master ]
branches: [master]
schedule:
- cron: '16 17 * * 3'
- cron: "16 17 * * 3"
jobs:
analyze:
@@ -17,7 +17,7 @@ jobs:
strategy:
fail-fast: false
matrix:
language: [ 'python' ]
language: ["python"]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
@@ -28,7 +28,7 @@ jobs:
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
@@ -39,7 +39,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
@@ -53,4 +53,4 @@ jobs:
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1
uses: github/codeql-action/analyze@v2

View File

@@ -12,13 +12,13 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [macos-latest, ubuntu-latest]
python: [3.8]
os: [macos-latest, ubuntu-latest, windows-latest]
python: [3.11, 3.12]
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python }}
uses: actions/setup-python@v2
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python }}
- name: Install dependencies
@@ -28,14 +28,14 @@ jobs:
pip install -q --upgrade codecov coverage black flake8 codacy-coverage
- name: Lint
run: |
black --check --diff stree
# black --check --diff stree
flake8 --count stree
- name: Tests
run: |
coverage run -m unittest -v stree.tests
coverage xml
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v1
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./coverage.xml

View File

@@ -3,8 +3,12 @@ version: 2
sphinx:
configuration: docs/source/conf.py
build:
os: ubuntu-22.04
tools:
python: "3.12"
python:
version: 3.8
install:
- requirements: requirements.txt
- requirements: docs/requirements.txt

37
CITATION.cff Normal file
View File

@@ -0,0 +1,37 @@
cff-version: 1.2.0
message: "If you use this software, please cite it as below."
authors:
- family-names: "Montañana"
given-names: "Ricardo"
orcid: "https://orcid.org/0000-0003-3242-5452"
- family-names: "Gámez"
given-names: "José A."
orcid: "https://orcid.org/0000-0003-1188-1117"
- family-names: "Puerta"
given-names: "José M."
orcid: "https://orcid.org/0000-0002-9164-5191"
title: "STree"
version: 1.2.3
doi: 10.5281/zenodo.5504083
date-released: 2021-11-02
url: "https://github.com/Doctorado-ML/STree"
preferred-citation:
type: article
authors:
- family-names: "Montañana"
given-names: "Ricardo"
orcid: "https://orcid.org/0000-0003-3242-5452"
- family-names: "Gámez"
given-names: "José A."
orcid: "https://orcid.org/0000-0003-1188-1117"
- family-names: "Puerta"
given-names: "José M."
orcid: "https://orcid.org/0000-0002-9164-5191"
doi: "10.1007/978-3-030-85713-4_6"
journal: "Lecture Notes in Computer Science"
month: 9
start: 54
end: 64
title: "STree: A Single Multi-class Oblique Decision Tree Based on Support Vector Machines"
volume: 12882
year: 2021

1
MANIFEST.in Normal file
View File

@@ -0,0 +1 @@
include README.md LICENSE

View File

@@ -1,40 +1,36 @@
SHELL := /bin/bash
.DEFAULT_GOAL := help
.PHONY: coverage deps help lint push test doc build
.PHONY: audit coverage help lint test doc doc-clean build
coverage: ## Run tests with coverage
coverage erase
coverage run -m unittest -v stree.tests
coverage report -m
@coverage erase
@coverage run -m unittest -v stree.tests
@coverage report -m
deps: ## Install dependencies
pip install -r requirements.txt
lint: ## Lint and static-check
black stree
flake8 stree
mypy stree
push: ## Push code with tags
git push && git push --tags
lint: ## Lint source files
@black stree
@flake8 stree
test: ## Run tests
python -m unittest -v stree.tests
@python -m unittest -v stree.tests
doc: ## Update documentation
make -C docs --makefile=Makefile html
@make -C docs --makefile=Makefile html
build: ## Build package
rm -fr dist/*
rm -fr build/*
python setup.py sdist bdist_wheel
@rm -fr dist/*
@rm -fr build/*
@hatch build
doc-clean: ## Update documentation
make -C docs --makefile=Makefile clean
doc-clean: ## Clean documentation folders
@make -C docs --makefile=Makefile clean
help: ## Show help message
audit: ## Audit pip
@pip-audit
help: ## Show this help message
@IFS=$$'\n' ; \
help_lines=(`fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##/:/'`); \
help_lines=(`grep -Fh "##" $(MAKEFILE_LIST) | grep -Fv fgrep | sed -e 's/\\$$//' | sed -e 's/##/:/'`); \
printf "%s\n\n" "Usage: make [task]"; \
printf "%-20s %s\n" "task" "help" ; \
printf "%-20s %s\n" "------" "----" ; \

View File

@@ -1,21 +1,23 @@
# STree
![CI](https://github.com/Doctorado-ML/STree/workflows/CI/badge.svg)
[![CodeQL](https://github.com/Doctorado-ML/STree/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/Doctorado-ML/STree/actions/workflows/codeql-analysis.yml)
[![codecov](https://codecov.io/gh/doctorado-ml/stree/branch/master/graph/badge.svg)](https://codecov.io/gh/doctorado-ml/stree)
[![Codacy Badge](https://app.codacy.com/project/badge/Grade/35fa3dfd53a24a339344b33d9f9f2f3d)](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/Doctorado-ML/STree.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Doctorado-ML/STree/context:python)
[![PyPI version](https://badge.fury.io/py/STree.svg)](https://badge.fury.io/py/STree)
![https://img.shields.io/badge/python-3.8%2B-blue](https://img.shields.io/badge/python-3.8%2B-brightgreen)
![https://img.shields.io/badge/python-3.11%2B-blue](https://img.shields.io/badge/python-3.11%2B-brightgreen)
[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/Doctorado-ML/STree)
[![DOI](https://zenodo.org/badge/262658230.svg)](https://zenodo.org/badge/latestdoi/262658230)
# STree
![Stree](https://raw.github.com/doctorado-ml/stree/master/example.png)
Oblique Tree classifier based on SVM nodes. The nodes are built and splitted with sklearn SVC models. Stree is a sklearn estimator and can be integrated in pipelines, grid searches, etc.
![Stree](https://raw.github.com/doctorado-ml/stree/master/example.png)
## Installation
```bash
pip install git+https://github.com/doctorado-ml/stree
pip install Stree
```
## Documentation
@@ -26,8 +28,6 @@ Can be found in [stree.readthedocs.io](https://stree.readthedocs.io/en/stable/)
### Jupyter notebooks
- [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/Doctorado-ML/STree/master?urlpath=lab/tree/notebooks/benchmark.ipynb) Benchmark
- [![benchmark](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/benchmark.ipynb) Benchmark
- [![features](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/features.ipynb) Some features
@@ -39,7 +39,7 @@ Can be found in [stree.readthedocs.io](https://stree.readthedocs.io/en/stable/)
## Hyperparameters
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
| --- | ------------------- | ------------------------------------------------------ | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| --- | ------------------- | -------------------------------------------------------------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
| \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of liblinear, linear, poly or rbf. liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library |
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
@@ -52,11 +52,11 @@ Can be found in [stree.readthedocs.io](https://stree.readthedocs.io/en/stable/)
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
| | splitter | {"best", "random", "mutual", "cfs", "fcbf"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label. **"cfs"**: Apply Correlation-based Feature Selection. **"fcbf"**: Apply Fast Correlation-Based Filter |
| | splitter | {"best", "random", "trandom", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features).
Supported strategies are: **best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them. **trandom”**: The algorithm generates only one random combination. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label. **"cfs"**: Apply Correlation-based Feature Selection. **"fcbf"**: Apply Fast Correlation-Based Filter. **"iwss"**: IWSS based algorithm |
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest |
\* Hyperparameter used by the support vector classifier of every node
\*\* **Splitting in a STree node**
@@ -76,3 +76,7 @@ python -m unittest -v stree.tests
## License
STree is [MIT](https://github.com/doctorado-ml/stree/blob/master/LICENSE) licensed
## Reference
R. Montañana, J. A. Gámez, J. M. Puerta, "STree: a single multi-class oblique decision tree based on support vector machines.", 2021 LNAI 12882, pg. 54-64

View File

@@ -1,9 +1,10 @@
Siterator
=========
.. automodule:: Splitter
.. automodule:: stree
.. autoclass:: Siterator
:members:
:undoc-members:
:private-members:
:show-inheritance:
:noindex:

View File

@@ -1,9 +1,9 @@
Snode
=====
.. automodule:: Splitter
.. autoclass:: Snode
.. autoclass:: stree.Splitter.Snode
:members:
:undoc-members:
:private-members:
:show-inheritance:
:noindex:

View File

@@ -1,9 +1,10 @@
Splitter
========
.. automodule:: Splitter
.. automodule:: stree.Splitter
.. autoclass:: Splitter
:members:
:undoc-members:
:private-members:
:show-inheritance:
:noindex:

View File

@@ -7,3 +7,4 @@ Stree
:undoc-members:
:private-members:
:show-inheritance:
:noindex:

View File

@@ -6,27 +6,21 @@
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
import stree
sys.path.insert(0, os.path.abspath("../../stree/"))
# -- Project information -----------------------------------------------------
project = "STree"
copyright = "2020 - 2021, Ricardo Montañana Gómez"
copyright = "2020 - 2024, Ricardo Montañana Gómez"
author = "Ricardo Montañana Gómez"
# The full version, including alpha/beta/rc tags
version = stree.__version__
release = version
version = release = stree.__version__
# -- General configuration ---------------------------------------------------
@@ -54,4 +48,4 @@ html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_static_path = []

View File

@@ -2,8 +2,6 @@
## Notebooks
- [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/Doctorado-ML/STree/master?urlpath=lab/tree/notebooks/benchmark.ipynb) Benchmark
- [![benchmark](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/benchmark.ipynb) Benchmark
- [![features](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/features.ipynb) Some features

View File

@@ -1,22 +1,22 @@
# Hyperparameters
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
| --- | ------------------- | ------------------------------------------------------ | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| --- | ------------------- | -------------------------------------------------------------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
| \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of liblinear, linear, poly or rbf. liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library |
| \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of liblinear, linear, poly or rbf.<br>liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library |
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
| \* | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls |
| | max_depth | \<int\> | None | Specifies the maximum depth of the tree |
| \* | tol | \<float\> | 1e-4 | Tolerance for stopping criterion. |
| \* | degree | \<int\> | 3 | Degree of the polynomial kernel function (poly). Ignored by all other kernels. |
| \* | gamma | {"scale", "auto"} or \<float\> | scale | Kernel coefficient for rbf, poly and sigmoid.<br>if gamma='scale' (default) is passed then it uses 1 / (n_features \* X.var()) as value of gamma,<br>if auto, uses 1 / n_features. |
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\*. max_samples is incompatible with 'ovo' multiclass_strategy |
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\*.<br>max_samples is incompatible with 'ovo' multiclass_strategy |
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features).<br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
| | splitter | {"best", "random", "mutual", "cfs", "fcbf"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label. **"cfs"**: Apply Correlation-based Feature Selection. **"fcbf"**: Apply Fast Correlation-Based Filter |
| | splitter | {"best", "random", "trandom", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features).<br>Supported strategies are:<br>**“best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features.<br>**“random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them.<br>**“trandom”**: The algorithm generates only one random combination.<br>**"mutual"**: Chooses the best features w.r.t. their mutual info with the label.<br>**"cfs"**: Apply Correlation-based Feature Selection.<br>**"fcbf"**: Apply Fast Correlation-Based Filter.<br>**"iwss"**: IWSS based algorithm |
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest |
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets:<br>**"ovo"**: one versus one.<br>**"ovr"**: one versus rest |
\* Hyperparameter used by the support vector classifier of every node

View File

@@ -5,7 +5,6 @@ Welcome to STree's documentation!
:caption: Contents:
:titlesonly:
stree
install
hyperparameters

View File

@@ -1,9 +1,12 @@
# STree
[![Codeship Status for Doctorado-ML/STree](https://app.codeship.com/projects/8b2bd350-8a1b-0138-5f2c-3ad36f3eb318/status?branch=master)](https://app.codeship.com/projects/399170)
![CI](https://github.com/Doctorado-ML/STree/workflows/CI/badge.svg)
[![codecov](https://codecov.io/gh/doctorado-ml/stree/branch/master/graph/badge.svg)](https://codecov.io/gh/doctorado-ml/stree)
[![Codacy Badge](https://app.codacy.com/project/badge/Grade/35fa3dfd53a24a339344b33d9f9f2f3d)](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/Doctorado-ML/STree.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Doctorado-ML/STree/context:python)
[![PyPI version](https://badge.fury.io/py/STree.svg)](https://badge.fury.io/py/STree)
![https://img.shields.io/badge/python-3.8%2B-blue](https://img.shields.io/badge/python-3.8%2B-brightgreen)
[![DOI](https://zenodo.org/badge/262658230.svg)](https://zenodo.org/badge/latestdoi/262658230)
Oblique Tree classifier based on SVM nodes. The nodes are built and splitted with sklearn SVC models. Stree is a sklearn estimator and can be integrated in pipelines, grid searches, etc.

View File

@@ -178,7 +178,7 @@
"outputs": [],
"source": [
"# Stree\n",
"stree = Stree(random_state=random_state, C=.01, max_iter=1e3)"
"stree = Stree(random_state=random_state, C=.01, max_iter=1000, kernel=\"liblinear\", multiclass_strategy=\"ovr\")"
]
},
{
@@ -198,7 +198,7 @@
"outputs": [],
"source": [
"# SVC (linear)\n",
"svc = LinearSVC(random_state=random_state, C=.01, max_iter=1e3)"
"svc = LinearSVC(random_state=random_state, C=.01, max_iter=1000)"
]
},
{

View File

@@ -133,33 +133,33 @@
" 'base_estimator': [Stree(random_state=random_state)],\n",
" 'n_estimators': [10, 25],\n",
" 'learning_rate': [.5, 1],\n",
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
" 'base_estimator__tol': [.1, 1e-02],\n",
" 'base_estimator__max_depth': [3, 5, 7],\n",
" 'base_estimator__C': [1, 7, 55],\n",
" 'base_estimator__kernel': ['linear']\n",
" 'estimator__split_criteria': ['max_samples', 'impurity'],\n",
" 'estimator__tol': [.1, 1e-02],\n",
" 'estimator__max_depth': [3, 5, 7],\n",
" 'estimator__C': [1, 7, 55],\n",
" 'estimator__kernel': ['linear']\n",
"},\n",
"{\n",
" 'base_estimator': [Stree(random_state=random_state)],\n",
" 'n_estimators': [10, 25],\n",
" 'learning_rate': [.5, 1],\n",
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
" 'base_estimator__tol': [.1, 1e-02],\n",
" 'base_estimator__max_depth': [3, 5, 7],\n",
" 'base_estimator__C': [1, 7, 55],\n",
" 'base_estimator__degree': [3, 5, 7],\n",
" 'base_estimator__kernel': ['poly']\n",
" 'estimator__split_criteria': ['max_samples', 'impurity'],\n",
" 'estimator__tol': [.1, 1e-02],\n",
" 'estimator__max_depth': [3, 5, 7],\n",
" 'estimator__C': [1, 7, 55],\n",
" 'estimator__degree': [3, 5, 7],\n",
" 'estimator__kernel': ['poly']\n",
"},\n",
"{\n",
" 'base_estimator': [Stree(random_state=random_state)],\n",
" 'n_estimators': [10, 25],\n",
" 'learning_rate': [.5, 1],\n",
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
" 'base_estimator__tol': [.1, 1e-02],\n",
" 'base_estimator__max_depth': [3, 5, 7],\n",
" 'base_estimator__C': [1, 7, 55],\n",
" 'base_estimator__gamma': [.1, 1, 10],\n",
" 'base_estimator__kernel': ['rbf']\n",
" 'estimator__split_criteria': ['max_samples', 'impurity'],\n",
" 'estimator__tol': [.1, 1e-02],\n",
" 'estimator__max_depth': [3, 5, 7],\n",
" 'estimator__C': [1, 7, 55],\n",
" 'estimator__gamma': [.1, 1, 10],\n",
" 'estimator__kernel': ['rbf']\n",
"}]"
]
},
@@ -214,7 +214,7 @@
" base_estimator=Stree(C=55, max_depth=7, random_state=1,\n",
" split_criteria='max_samples', tol=0.1),\n",
" learning_rate=0.5, n_estimators=25, random_state=1)\n",
"Best hyperparameters: {'base_estimator': Stree(C=55, max_depth=7, random_state=1, split_criteria='max_samples', tol=0.1), 'base_estimator__C': 55, 'base_estimator__kernel': 'linear', 'base_estimator__max_depth': 7, 'base_estimator__split_criteria': 'max_samples', 'base_estimator__tol': 0.1, 'learning_rate': 0.5, 'n_estimators': 25}"
"Best hyperparameters: {'base_estimator': Stree(C=55, max_depth=7, random_state=1, split_criteria='max_samples', tol=0.1), 'estimator__C': 55, 'estimator__kernel': 'linear', 'estimator__max_depth': 7, 'estimator__split_criteria': 'max_samples', 'estimator__tol': 0.1, 'learning_rate': 0.5, 'n_estimators': 25}"
]
},
{

View File

@@ -1,5 +1,65 @@
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[project]
name = "STree"
dependencies = ["scikit-learn>1.0", "mufs"]
license = { file = "LICENSE" }
description = "Oblique decision tree with svm nodes."
readme = "README.md"
authors = [
{ name = "Ricardo Montañana", email = "ricardo.montanana@alu.uclm.es" },
]
dynamic = ['version']
requires-python = ">=3.11"
keywords = [
"scikit-learn",
"oblique-classifier",
"oblique-decision-tree",
"decision-tree",
"svm",
"svc",
]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Topic :: Software Development",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
]
[project.optional-dependencies]
dev = ["black", "flake8", "coverage", "hatch", "pip-audit"]
doc = ["sphinx", "myst-parser", "sphinx_rtd_theme", "sphinx-autodoc-typehints"]
[project.urls]
Code = "https://github.com/Doctorado-ML/STree"
Documentation = "https://stree.readthedocs.io/en/latest/index.html"
[tool.hatch.version]
path = "stree/_version.py"
[tool.hatch.build.targets.sdist]
include = ["/stree"]
[tool.coverage.run]
branch = true
source = ["stree"]
command_line = "-m unittest discover -s stree.tests"
[tool.coverage.report]
show_missing = true
fail_under = 100
[tool.black]
line-length = 79
target-version = ["py311"]
include = '\.pyi?$'
exclude = '''
/(

View File

@@ -1,2 +1,3 @@
scikit-learn>0.24
scikit-learn==1.5.2
coverage
mufs

View File

@@ -1 +0,0 @@
python-3.8

View File

@@ -1,50 +0,0 @@
import setuptools
def readme():
with open("README.md") as f:
return f.read()
def get_data(field):
item = ""
with open("stree/__init__.py") as f:
for line in f.readlines():
if line.startswith(f"__{field}__"):
delim = '"' if '"' in line else "'"
item = line.split(delim)[1]
break
else:
raise RuntimeError(f"Unable to find {field} string.")
return item
setuptools.setup(
name="STree",
version=get_data("version"),
license=get_data("license"),
description="Oblique decision tree with svm nodes",
long_description=readme(),
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
url="https://github.com/Doctorado-ML/STree#stree",
project_urls={
"Code": "https://github.com/Doctorado-ML/STree",
"Documentation": "https://stree.readthedocs.io/en/latest/index.html",
},
author=get_data("author"),
author_email=get_data("author_email"),
keywords="scikit-learn oblique-classifier oblique-decision-tree decision-\
tree svm svc",
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: " + get_data("license"),
"Programming Language :: Python :: 3.8",
"Natural Language :: English",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Intended Audience :: Science/Research",
],
install_requires=["scikit-learn", "mufs"],
test_suite="stree.tests",
zip_safe=False,
)

View File

@@ -16,8 +16,28 @@ from mufs import MUFS
class Snode:
"""Nodes of the tree that keeps the svm classifier and if testing the
"""
Nodes of the tree that keeps the svm classifier and if testing the
dataset assigned to it
Parameters
----------
clf : SVC
Classifier used
X : np.ndarray
input dataset in train time (only in testing)
y : np.ndarray
input labes in train time
features : np.array
features used to compute hyperplane
impurity : float
impurity of the node
title : str
label describing the route to the node
weight : np.ndarray, optional
weights applied to input dataset in train time, by default None
scaler : StandardScaler, optional
scaler used if any, by default None
"""
def __init__(
@@ -48,6 +68,7 @@ class Snode:
self._impurity = impurity
self._partition_column: int = -1
self._scaler = scaler
self._proba = None
@classmethod
def copy(cls, node: "Snode") -> "Snode":
@@ -107,24 +128,45 @@ class Snode:
def get_up(self) -> "Snode":
return self._up
def make_predictor(self):
def make_predictor(self, num_classes: int) -> None:
"""Compute the class of the predictor and its belief based on the
subdataset of the node only if it is a leaf
"""
if not self.is_leaf():
return
classes, card = np.unique(self._y, return_counts=True)
if len(classes) > 1:
self._proba = np.zeros((num_classes,), dtype=np.int64)
for c, n in zip(classes, card):
self._proba[c] = n
try:
max_card = max(card)
self._class = classes[card == max_card][0]
self._belief = max_card / np.sum(card)
else:
self._belief = 1
try:
self._class = classes[0]
except IndexError:
except ValueError:
self._class = None
def graph(self):
"""
Return a string representing the node in graphviz format
"""
output = ""
count_values = np.unique(self._y, return_counts=True)
if self.is_leaf():
output += (
f'N{id(self)} [shape=box style=filled label="'
f"class={self._class} impurity={self._impurity:.3f} "
f'counts={self._proba}"];\n'
)
else:
output += (
f'N{id(self)} [label="#features={len(self._features)} '
f"classes={count_values[0]} samples={count_values[1]} "
f'({sum(count_values[1])})" fontcolor=black];\n'
)
output += f"N{id(self)} -> N{id(self.get_up())} [color=black];\n"
output += f"N{id(self)} -> N{id(self.get_down())} [color=black];\n"
return output
def __str__(self) -> str:
count_values = np.unique(self._y, return_counts=True)
if self.is_leaf():
@@ -165,6 +207,56 @@ class Siterator:
class Splitter:
"""
Splits a dataset in two based on different criteria
Parameters
----------
clf : SVC, optional
classifier, by default None
criterion : str, optional
The function to measure the quality of a split (only used if
max_features != num_features). Supported criteria are “gini” for the
Gini impurity and “entropy” for the information gain., by default
"entropy", by default None
feature_select : str, optional
The strategy used to choose the feature set at each node (only used if
max_features < num_features). Supported strategies are: “best”: sklearn
SelectKBest algorithm is used in every node to choose the max_features
best features. “random”: The algorithm generates 5 candidates and
choose the best (max. info. gain) of them. “trandom”: The algorithm
generates only one random combination. "mutual": Chooses the best
features w.r.t. their mutual info with the label. "cfs": Apply
Correlation-based Feature Selection. "fcbf": Apply Fast Correlation-
Based, by default None
criteria : str, optional
ecides (just in case of a multi class classification) which column
(class) use to split the dataset in a node. max_samples is
incompatible with 'ovo' multiclass_strategy, by default None
min_samples_split : int, optional
The minimum number of samples required to split an internal node. 0
(default) for any, by default None
random_state : optional
Controls the pseudo random number generation for shuffling the data for
probability estimates. Ignored when probability is False.Pass an int
for reproducible output across multiple function calls, by
default None
normalize : bool, optional
If standardization of features should be applied on each node with the
samples that reach it , by default False
Raises
------
ValueError
clf has to be a sklearn estimator
ValueError
criterion must be gini or entropy
ValueError
criteria has to be max_samples or impurity
ValueError
splitter must be in {random, best, mutual, cfs, fcbf}
"""
def __init__(
self,
clf: SVC = None,
@@ -201,10 +293,19 @@ class Splitter:
f"criteria has to be max_samples or impurity; got ({criteria})"
)
if feature_select not in ["random", "best", "mutual", "cfs", "fcbf"]:
if feature_select not in [
"random",
"trandom",
"best",
"mutual",
"cfs",
"fcbf",
"iwss",
]:
raise ValueError(
"splitter must be in {random, best, mutual, cfs, fcbf} got "
f"({feature_select})"
"splitter must be in {random, trandom, best, mutual, cfs, "
"fcbf, iwss} "
f"got ({feature_select})"
)
self.criterion_function = getattr(self, f"_{self._criterion}")
self.decision_criteria = getattr(self, f"_{self._criteria}")
@@ -235,6 +336,31 @@ class Splitter:
features_sets = self._generate_spaces(n_features, max_features)
return self._select_best_set(dataset, labels, features_sets)
@staticmethod
def _fs_trandom(
dataset: np.array, labels: np.array, max_features: int
) -> tuple:
"""Return the a random feature set combination
Parameters
----------
dataset : np.array
array of samples
labels : np.array
labels of the dataset
max_features : int
number of features of the subspace
(< number of features in dataset)
Returns
-------
tuple
indices of the features selected
"""
# Random feature reduction
n_features = dataset.shape[1]
return tuple(sorted(random.sample(range(n_features), max_features)))
@staticmethod
def _fs_best(
dataset: np.array, labels: np.array, max_features: int
@@ -262,9 +388,8 @@ class Splitter:
.get_support(indices=True)
)
@staticmethod
def _fs_mutual(
dataset: np.array, labels: np.array, max_features: int
self, dataset: np.array, labels: np.array, max_features: int
) -> tuple:
"""Return the best features with mutual information with labels
@@ -284,10 +409,13 @@ class Splitter:
indices of the features selected
"""
# return best features with mutual info with the label
feature_list = mutual_info_classif(dataset, labels)
feature_list = mutual_info_classif(
dataset, labels, random_state=self._random_state
)
return tuple(
sorted(
range(len(feature_list)), key=lambda sub: feature_list[sub]
range(len(feature_list)),
key=lambda sub: feature_list[sub],
)[-max_features:]
)
@@ -339,6 +467,31 @@ class Splitter:
mufs = MUFS(max_features=max_features, discrete=False)
return mufs.fcbf(dataset, labels, 5e-4).get_results()
@staticmethod
def _fs_iwss(
dataset: np.array, labels: np.array, max_features: int
) -> tuple:
"""Correlattion-based feature selection based on iwss with max_features
limit
Parameters
----------
dataset : np.array
array of samples
labels : np.array
labels of the dataset
max_features : int
number of features of the subspace
(< number of features in dataset)
Returns
-------
tuple
indices of the features selected
"""
mufs = MUFS(max_features=max_features, discrete=False)
return mufs.iwss(dataset, labels, 0.25).get_results()
def partition_impurity(self, y: np.array) -> np.array:
return self.criterion_function(y)
@@ -377,7 +530,10 @@ class Splitter:
return entropy
def information_gain(
self, labels: np.array, labels_up: np.array, labels_dn: np.array
self,
labels: np.array,
labels_up: np.array,
labels_dn: np.array,
) -> float:
"""Compute information gain of a split candidate
@@ -590,7 +746,7 @@ class Splitter:
Train time - True / Test time - False
"""
# data contains the distances of every sample to every class hyperplane
# array of (m, nc) nc = # classes
# array of (m, nc) nc = k if ovr, nc = k*(k-1)/2 if ovo
data = self._distances(node, samples)
if data.shape[0] < self._min_samples_split:
# there aren't enough samples to split

View File

@@ -17,21 +17,129 @@ from sklearn.utils.validation import (
_check_sample_weight,
)
from .Splitter import Splitter, Snode, Siterator
from ._version import __version__
class Stree(BaseEstimator, ClassifierMixin):
"""Estimator that is based on binary trees of svm nodes
"""
Estimator that is based on binary trees of svm nodes
can deal with sample_weights in predict, used in boosting sklearn methods
inheriting from BaseEstimator implements get_params and set_params methods
inheriting from ClassifierMixin implement the attribute _estimator_type
with "classifier" as value
Parameters
----------
C : float, optional
Regularization parameter. The strength of the regularization is
inversely proportional to C. Must be strictly positive., by default 1.0
kernel : str, optional
Specifies the kernel type to be used in the algorithm. It must be one
of liblinear, linear, poly or rbf. liblinear uses
[liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and
the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/)
library through scikit-learn library, by default "linear"
max_iter : int, optional
Hard limit on iterations within solver, or -1 for no limit., by default
1e5
random_state : int, optional
Controls the pseudo random number generation for shuffling the data for
probability estimates. Ignored when probability is False.Pass an int
for reproducible output across multiple function calls, by
default None
max_depth : int, optional
Specifies the maximum depth of the tree, by default None
tol : float, optional
Tolerance for stopping, by default 1e-4
degree : int, optional
Degree of the polynomial kernel function (poly). Ignored by all other
kernels., by default 3
gamma : str, optional
Kernel coefficient for rbf, poly and sigmoid.if gamma='scale'
(default) is passed then it uses 1 / (n_features * X.var()) as value
of gamma,if auto, uses 1 / n_features., by default "scale"
split_criteria : str, optional
Decides (just in case of a multi class classification) which column
(class) use to split the dataset in a node. max_samples is
incompatible with 'ovo' multiclass_strategy, by default "impurity"
criterion : str, optional
The function to measure the quality of a split (only used if
max_features != num_features). Supported criteria are “gini” for the
Gini impurity and “entropy” for the information gain., by default
"entropy"
min_samples_split : int, optional
The minimum number of samples required to split an internal node. 0
(default) for any, by default 0
max_features : optional
The number of features to consider when looking for the split: If int,
then consider max_features features at each split. If float, then
max_features is a fraction and int(max_features * n_features) features
are considered at each split. If “auto”, then max_features=
sqrt(n_features). If “sqrt”, then max_features=sqrt(n_features). If
“log2”, then max_features=log2(n_features). If None, then max_features=
n_features., by default None
splitter : str, optional
The strategy used to choose the feature set at each node (only used if
max_features < num_features). Supported strategies are: “best”: sklearn
SelectKBest algorithm is used in every node to choose the max_features
best features. “random”: The algorithm generates 5 candidates and
choose the best (max. info. gain) of them. “trandom”: The algorithm
generates only one random combination. "mutual": Chooses the best
features w.r.t. their mutual info with the label. "cfs": Apply
Correlation-based Feature Selection. "fcbf": Apply Fast Correlation-
Based , by default "random"
multiclass_strategy : str, optional
Strategy to use with multiclass datasets, "ovo": one versus one. "ovr":
one versus rest, by default "ovo"
normalize : bool, optional
If standardization of features should be applied on each node with the
samples that reach it , by default False
Attributes
----------
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_classes_ : int
The number of classes
n_iter_ : int
Max number of iterations in classifier
depth_ : int
Max depht of the tree
n_features_ : int
The number of features when ``fit`` is performed.
n_features_in_ : int
Number of features seen during :term:`fit`.
max_features_ : int
Number of features to use in hyperplane computation
tree_ : Node
root of the tree
X_ : ndarray
points to the input dataset
y_ : ndarray
points to the input labels
References
----------
R. Montañana, J. A. Gámez, J. M. Puerta, "STree: a single multi-class
oblique decision tree based on support vector machines.", 2021 LNAI 12882
"""
def __init__(
self,
C: float = 1.0,
kernel: str = "linear",
max_iter: int = 1e5,
max_iter: int = int(1e5),
random_state: int = None,
max_depth: int = None,
tol: float = 1e-4,
@@ -60,6 +168,17 @@ class Stree(BaseEstimator, ClassifierMixin):
self.splitter = splitter
self.normalize = normalize
self.multiclass_strategy = multiclass_strategy
self.depth_ = 0
@staticmethod
def version() -> str:
"""Return the version of the package."""
return __version__
def __call__(self) -> None:
"""Only added to comply with scikit-learn base sestimator for
ensembles"""
pass
def _more_tags(self) -> dict:
"""Required by sklearn to supply features of the classifier
@@ -71,7 +190,10 @@ class Stree(BaseEstimator, ClassifierMixin):
return {"requires_y": True}
def fit(
self, X: np.ndarray, y: np.ndarray, sample_weight: np.array = None
self,
X: np.ndarray,
y: np.ndarray,
sample_weight: np.array = None,
) -> "Stree":
"""Build the tree based on the dataset of samples and its labels
@@ -200,7 +322,7 @@ class Stree(BaseEstimator, ClassifierMixin):
if np.unique(y).shape[0] == 1:
# only 1 class => pure dataset
node.set_title(title + ", <pure>")
node.make_predictor()
node.make_predictor(self.n_classes_)
return node
# Train the model
clf = self._build_clf()
@@ -219,14 +341,18 @@ class Stree(BaseEstimator, ClassifierMixin):
if X_U is None or X_D is None:
# didn't part anything
node.set_title(title + ", <cgaf>")
node.make_predictor()
node.make_predictor(self.n_classes_)
return node
node.set_up(
self._train(X_U, y_u, sw_u, depth + 1, title + f" - Up({depth+1})")
)
node.set_down(
self._train(
X_D, y_d, sw_d, depth + 1, title + f" - Down({depth+1})"
X_D,
y_d,
sw_d,
depth + 1,
title + f" - Down({depth+1})",
)
)
return node
@@ -253,28 +379,100 @@ class Stree(BaseEstimator, ClassifierMixin):
)
)
@staticmethod
def _reorder_results(y: np.array, indices: np.array) -> np.array:
"""Reorder an array based on the array of indices passed
def __predict_class(self, X: np.array) -> np.array:
"""Compute the predicted class for the samples in X. Returns the number
of samples of each class in the corresponding leaf node.
Parameters
----------
y : np.array
data untidy
indices : np.array
indices used to set order
X : np.array
Array of samples
Returns
-------
np.array
array y ordered
Array of shape (n_samples, n_classes) with the number of samples
of each class in the corresponding leaf node
"""
# return array of same type given in y
y_ordered = y.copy()
indices = indices.astype(int)
for i, index in enumerate(indices):
y_ordered[index] = y[i]
return y_ordered
def compute_prediction(xp, indices, node):
if xp is None:
return
if node.is_leaf():
# set a class for indices
result[indices] = node._proba
return
self.splitter_.partition(xp, node, train=False)
x_u, x_d = self.splitter_.part(xp)
i_u, i_d = self.splitter_.part(indices)
compute_prediction(x_u, i_u, node.get_up())
compute_prediction(x_d, i_d, node.get_down())
# setup prediction & make it happen
result = np.zeros((X.shape[0], self.n_classes_))
indices = np.arange(X.shape[0])
compute_prediction(X, indices, self.tree_)
return result
def check_predict(self, X) -> np.array:
"""Checks predict and predict_proba preconditions. If input X is not an
np.array convert it to one.
Parameters
----------
X : np.ndarray
Array of samples
Returns
-------
np.array
Array of samples
Raises
------
ValueError
If number of features of X is different of the number of features
in training data
"""
check_is_fitted(self, ["tree_"])
# Input validation
X = check_array(X)
if X.shape[1] != self.n_features_:
raise ValueError(
f"Expected {self.n_features_} features but got "
f"({X.shape[1]})"
)
return X
def predict_proba(self, X: np.array) -> np.array:
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : dataset of samples.
Returns
-------
proba : array of shape (n_samples, n_classes)
The class probabilities of the input samples.
Raises
------
ValueError
if dataset with inconsistent number of features
NotFittedError
if model is not fitted
"""
X = self.check_predict(X)
# return # of samples of each class in leaf node
values = self.__predict_class(X)
normalizer = values.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
return values / normalizer
def predict(self, X: np.array) -> np.array:
"""Predict labels for each sample in dataset passed
@@ -296,40 +494,45 @@ class Stree(BaseEstimator, ClassifierMixin):
NotFittedError
if model is not fitted
"""
X = self.check_predict(X)
return self.classes_[np.argmax(self.__predict_class(X), axis=1)]
def predict_class(
xp: np.array, indices: np.array, node: Snode
) -> np.array:
if xp is None:
return [], []
def get_nodes(self) -> int:
"""Return the number of nodes in the tree
Returns
-------
int
number of nodes
"""
nodes = 0
for _ in self:
nodes += 1
return nodes
def get_leaves(self) -> int:
"""Return the number of leaves in the tree
Returns
-------
int
number of leaves
"""
leaves = 0
for node in self:
if node.is_leaf():
# set a class for every sample in dataset
prediction = np.full((xp.shape[0], 1), node._class)
return prediction, indices
self.splitter_.partition(xp, node, train=False)
x_u, x_d = self.splitter_.part(xp)
i_u, i_d = self.splitter_.part(indices)
prx_u, prin_u = predict_class(x_u, i_u, node.get_up())
prx_d, prin_d = predict_class(x_d, i_d, node.get_down())
return np.append(prx_u, prx_d), np.append(prin_u, prin_d)
leaves += 1
return leaves
# sklearn check
check_is_fitted(self, ["tree_"])
# Input validation
X = check_array(X)
if X.shape[1] != self.n_features_:
raise ValueError(
f"Expected {self.n_features_} features but got "
f"({X.shape[1]})"
)
# setup prediction & make it happen
indices = np.arange(X.shape[0])
result = (
self._reorder_results(*predict_class(X, indices, self.tree_))
.astype(int)
.ravel()
)
return self.classes_[result]
def get_depth(self) -> int:
"""Return the depth of the tree
Returns
-------
int
depth of the tree
"""
return self.depth_
def nodes_leaves(self) -> tuple:
"""Compute the number of nodes and leaves in the built tree
@@ -362,6 +565,23 @@ class Stree(BaseEstimator, ClassifierMixin):
tree = None
return Siterator(tree)
def graph(self, title="") -> str:
"""Graphviz code representing the tree
Returns
-------
str
graphviz code
"""
output = (
"digraph STree {\nlabel=<STree "
f"{title}>\nfontsize=30\nfontcolor=blue\nlabelloc=t\n"
)
for node in self:
output += node.graph()
output += "}\n"
return output
def __str__(self) -> str:
"""String representation of the tree

View File

@@ -1,10 +1,9 @@
from .Strees import Stree, Siterator
__version__ = "1.2.1"
from ._version import __version__
__author__ = "Ricardo Montañana Gómez"
__copyright__ = "Copyright 2020-2021, Ricardo Montañana Gómez"
__license__ = "MIT License"
__author_email__ = "ricardo.montanana@alu.uclm.es"
__all__ = ["Stree", "Siterator"]
__all__ = ["__version__", "Stree", "Siterator"]

1
stree/_version.py Normal file
View File

@@ -0,0 +1 @@
__version__ = "1.4.0"

View File

@@ -67,10 +67,28 @@ class Snode_test(unittest.TestCase):
def test_make_predictor_on_leaf(self):
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
test.make_predictor()
test.make_predictor(2)
self.assertEqual(1, test._class)
self.assertEqual(0.75, test._belief)
self.assertEqual(-1, test._partition_column)
self.assertListEqual([1, 3], test._proba.tolist())
def test_make_predictor_on_not_leaf(self):
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
test.set_up(Snode(None, [1], [1], [], 0.0, "another_test"))
test.make_predictor(2)
self.assertIsNone(test._class)
self.assertEqual(0, test._belief)
self.assertEqual(-1, test._partition_column)
self.assertEqual(-1, test.get_up()._partition_column)
self.assertIsNone(test._proba)
def test_make_predictor_on_leaf_bogus_data(self):
test = Snode(None, [1, 2, 3, 4], [], [], 0.0, "test")
test.make_predictor(2)
self.assertIsNone(test._class)
self.assertEqual(-1, test._partition_column)
self.assertListEqual([0, 0], test._proba.tolist())
def test_set_title(self):
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
@@ -97,21 +115,6 @@ class Snode_test(unittest.TestCase):
test.set_features([1, 2])
self.assertListEqual([1, 2], test.get_features())
def test_make_predictor_on_not_leaf(self):
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
test.set_up(Snode(None, [1], [1], [], 0.0, "another_test"))
test.make_predictor()
self.assertIsNone(test._class)
self.assertEqual(0, test._belief)
self.assertEqual(-1, test._partition_column)
self.assertEqual(-1, test.get_up()._partition_column)
def test_make_predictor_on_leaf_bogus_data(self):
test = Snode(None, [1, 2, 3, 4], [], [], 0.0, "test")
test.make_predictor()
self.assertIsNone(test._class)
self.assertEqual(-1, test._partition_column)
def test_copy_node(self):
px = [1, 2, 3, 4]
py = [1]

View File

@@ -285,3 +285,28 @@ class Splitter_test(unittest.TestCase):
Xs, computed = tcl.get_subspace(X, y, rs)
self.assertListEqual(expected, list(computed))
self.assertListEqual(X[:, expected].tolist(), Xs.tolist())
def test_get_iwss_subspaces(self):
results = [
(4, [1, 5, 9, 12]),
(6, [1, 5, 9, 12, 4, 15]),
]
for rs, expected in results:
X, y = load_dataset(n_features=20, n_informative=7)
tcl = self.build(feature_select="iwss", random_state=rs)
Xs, computed = tcl.get_subspace(X, y, rs)
self.assertListEqual(expected, list(computed))
self.assertListEqual(X[:, expected].tolist(), Xs.tolist())
def test_get_trandom_subspaces(self):
results = [
(4, [3, 7, 9, 12]),
(6, [0, 1, 2, 8, 15, 18]),
(7, [1, 2, 4, 8, 10, 12, 13]),
]
for rs, expected in results:
X, y = load_dataset(n_features=20, n_informative=7)
tcl = self.build(feature_select="trandom", random_state=rs)
Xs, computed = tcl.get_subspace(X, y, rs)
self.assertListEqual(expected, list(computed))
self.assertListEqual(X[:, expected].tolist(), Xs.tolist())

View File

@@ -10,6 +10,7 @@ from sklearn.svm import LinearSVC
from stree import Stree
from stree.Splitter import Snode
from .utils import load_dataset
from .._version import __version__
class Stree_test(unittest.TestCase):
@@ -114,6 +115,38 @@ class Stree_test(unittest.TestCase):
yp = clf.fit(X, y).predict(X[:num, :])
self.assertListEqual(y[:num].tolist(), yp.tolist())
def test_multiple_predict_proba(self):
expected = {
"liblinear": {
0: [0.02401129943502825, 0.9759887005649718],
17: [0.9282970550576184, 0.07170294494238157],
},
"linear": {
0: [0.029329608938547486, 0.9706703910614525],
17: [0.9298469387755102, 0.07015306122448979],
},
"rbf": {
0: [0.023448275862068966, 0.976551724137931],
17: [0.9458064516129032, 0.05419354838709677],
},
"poly": {
0: [0.01601164483260553, 0.9839883551673945],
17: [0.9089790897908979, 0.0910209102091021],
},
}
indices = [0, 17]
X, y = load_dataset(self._random_state)
for kernel in ["liblinear", "linear", "rbf", "poly"]:
clf = Stree(
kernel=kernel,
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
random_state=self._random_state,
)
yp = clf.fit(X, y).predict_proba(X)
for index in indices:
for exp, comp in zip(expected[kernel][index], yp[index]):
self.assertAlmostEqual(exp, comp)
def test_single_vs_multiple_prediction(self):
"""Check if predicting sample by sample gives the same result as
predicting all samples at once
@@ -206,6 +239,7 @@ class Stree_test(unittest.TestCase):
)
tcl.fit(*load_dataset(self._random_state))
self.assertEqual(depth, tcl.depth_)
self.assertEqual(depth, tcl.get_depth())
def test_unfitted_tree_is_iterable(self):
tcl = Stree()
@@ -255,12 +289,12 @@ class Stree_test(unittest.TestCase):
"impurity sigmoid": 0.824,
},
"Iris": {
"max_samples liblinear": 0.9550561797752809,
"max_samples liblinear": 0.9887640449438202,
"max_samples linear": 1.0,
"max_samples rbf": 0.6685393258426966,
"max_samples poly": 0.6853932584269663,
"max_samples sigmoid": 0.6404494382022472,
"impurity liblinear": 0.9550561797752809,
"impurity liblinear": 0.9887640449438202,
"impurity linear": 1.0,
"impurity rbf": 0.6685393258426966,
"impurity poly": 0.6853932584269663,
@@ -273,10 +307,10 @@ class Stree_test(unittest.TestCase):
for criteria in ["max_samples", "impurity"]:
for kernel in self._kernels:
clf = Stree(
max_iter=1e4,
multiclass_strategy="ovr"
if kernel == "liblinear"
else "ovo",
max_iter=int(1e4),
multiclass_strategy=(
"ovr" if kernel == "liblinear" else "ovo"
),
kernel=kernel,
random_state=self._random_state,
)
@@ -357,6 +391,7 @@ class Stree_test(unittest.TestCase):
# Tests of score
def test_score_binary(self):
"""Check score for binary classification."""
X, y = load_dataset(self._random_state)
accuracies = [
0.9506666666666667,
@@ -379,6 +414,7 @@ class Stree_test(unittest.TestCase):
self.assertAlmostEqual(accuracy_expected, accuracy_score)
def test_score_max_features(self):
"""Check score using max_features."""
X, y = load_dataset(self._random_state)
clf = Stree(
kernel="liblinear",
@@ -390,6 +426,7 @@ class Stree_test(unittest.TestCase):
self.assertAlmostEqual(0.9453333333333334, clf.score(X, y))
def test_bogus_splitter_parameter(self):
"""Check that bogus splitter parameter raises exception."""
clf = Stree(splitter="duck")
with self.assertRaises(ValueError):
clf.fit(*load_dataset())
@@ -403,10 +440,10 @@ class Stree_test(unittest.TestCase):
clf.fit(X, y)
score = clf.score(X, y)
# Check accuracy of the whole model
self.assertAlmostEquals(0.98, score, 5)
self.assertAlmostEqual(0.98, score, 5)
svm = LinearSVC(random_state=0)
svm.fit(X, y)
self.assertAlmostEquals(0.9666666666666667, svm.score(X, y), 5)
self.assertAlmostEqual(0.9666666666666667, svm.score(X, y), 5)
data = svm.decision_function(X)
expected = [
0.4444444444444444,
@@ -418,7 +455,7 @@ class Stree_test(unittest.TestCase):
ty[data > 0] = 1
ty = ty.astype(int)
for i in range(3):
self.assertAlmostEquals(
self.assertAlmostEqual(
expected[i],
clf.splitter_._gini(ty[:, i]),
)
@@ -445,6 +482,7 @@ class Stree_test(unittest.TestCase):
self.assertListEqual([47], resdn[1].tolist())
def test_score_multiclass_rbf(self):
"""Test score for multiclass classification with rbf kernel."""
X, y = load_dataset(
random_state=self._random_state,
n_classes=3,
@@ -462,6 +500,7 @@ class Stree_test(unittest.TestCase):
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
def test_score_multiclass_poly(self):
"""Test score for multiclass classification with poly kernel."""
X, y = load_dataset(
random_state=self._random_state,
n_classes=3,
@@ -483,6 +522,7 @@ class Stree_test(unittest.TestCase):
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
def test_score_multiclass_liblinear(self):
"""Test score for multiclass classification with liblinear kernel."""
X, y = load_dataset(
random_state=self._random_state,
n_classes=3,
@@ -508,6 +548,7 @@ class Stree_test(unittest.TestCase):
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
def test_score_multiclass_sigmoid(self):
"""Test score for multiclass classification with sigmoid kernel."""
X, y = load_dataset(
random_state=self._random_state,
n_classes=3,
@@ -528,6 +569,7 @@ class Stree_test(unittest.TestCase):
self.assertEqual(0.9662921348314607, clf2.fit(X, y).score(X, y))
def test_score_multiclass_linear(self):
"""Test score for multiclass classification with linear kernel."""
warnings.filterwarnings("ignore", category=ConvergenceWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
X, y = load_dataset(
@@ -551,15 +593,17 @@ class Stree_test(unittest.TestCase):
)
self.assertEqual(0.9526666666666667, clf2.fit(X, y).score(X, y))
X, y = load_wine(return_X_y=True)
self.assertEqual(0.9831460674157303, clf.fit(X, y).score(X, y))
self.assertEqual(0.9887640449438202, clf.fit(X, y).score(X, y))
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
def test_zero_all_sample_weights(self):
"""Test exception raises when all sample weights are zero."""
X, y = load_dataset(self._random_state)
with self.assertRaises(ValueError):
Stree().fit(X, y, np.zeros(len(y)))
def test_mask_samples_weighted_zero(self):
"""Check that the weighted zero samples are masked."""
X = np.array(
[
[1, 1],
@@ -587,6 +631,7 @@ class Stree_test(unittest.TestCase):
self.assertEqual(model2.score(X, y, w), 1)
def test_depth(self):
"""Check depth of the tree."""
X, y = load_dataset(
random_state=self._random_state,
n_classes=3,
@@ -596,12 +641,15 @@ class Stree_test(unittest.TestCase):
clf = Stree(random_state=self._random_state)
clf.fit(X, y)
self.assertEqual(6, clf.depth_)
self.assertEqual(6, clf.get_depth())
X, y = load_wine(return_X_y=True)
clf = Stree(random_state=self._random_state)
clf.fit(X, y)
self.assertEqual(4, clf.depth_)
self.assertEqual(4, clf.get_depth())
def test_nodes_leaves(self):
"""Check number of nodes and leaves."""
X, y = load_dataset(
random_state=self._random_state,
n_classes=3,
@@ -612,15 +660,20 @@ class Stree_test(unittest.TestCase):
clf.fit(X, y)
nodes, leaves = clf.nodes_leaves()
self.assertEqual(31, nodes)
self.assertEqual(31, clf.get_nodes())
self.assertEqual(16, leaves)
self.assertEqual(16, clf.get_leaves())
X, y = load_wine(return_X_y=True)
clf = Stree(random_state=self._random_state)
clf.fit(X, y)
nodes, leaves = clf.nodes_leaves()
self.assertEqual(11, nodes)
self.assertEqual(11, clf.get_nodes())
self.assertEqual(6, leaves)
self.assertEqual(6, clf.get_leaves())
def test_nodes_leaves_artificial(self):
"""Check leaves of artificial dataset."""
n1 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test1")
n2 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test2")
n3 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test3")
@@ -636,15 +689,19 @@ class Stree_test(unittest.TestCase):
clf.tree_ = n1
nodes, leaves = clf.nodes_leaves()
self.assertEqual(6, nodes)
self.assertEqual(6, clf.get_nodes())
self.assertEqual(2, leaves)
self.assertEqual(2, clf.get_leaves())
def test_bogus_multiclass_strategy(self):
"""Check invalid multiclass strategy."""
clf = Stree(multiclass_strategy="other")
X, y = load_wine(return_X_y=True)
with self.assertRaises(ValueError):
clf.fit(X, y)
def test_multiclass_strategy(self):
"""Check multiclass strategy."""
X, y = load_wine(return_X_y=True)
clf_o = Stree(multiclass_strategy="ovo")
clf_r = Stree(multiclass_strategy="ovr")
@@ -654,6 +711,7 @@ class Stree_test(unittest.TestCase):
self.assertEqual(0.9269662921348315, score_r)
def test_incompatible_hyperparameters(self):
"""Check incompatible hyperparameters."""
X, y = load_wine(return_X_y=True)
clf = Stree(kernel="liblinear", multiclass_strategy="ovo")
with self.assertRaises(ValueError):
@@ -661,3 +719,55 @@ class Stree_test(unittest.TestCase):
clf = Stree(multiclass_strategy="ovo", split_criteria="max_samples")
with self.assertRaises(ValueError):
clf.fit(X, y)
def test_version(self):
"""Check STree version."""
clf = Stree()
self.assertEqual(__version__, clf.version())
def test_call(self) -> None:
"""Check call method."""
clf = Stree()
self.assertIsNone(clf())
def test_graph(self):
"""Check graphviz representation of the tree."""
X, y = load_wine(return_X_y=True)
clf = Stree(random_state=self._random_state)
expected_head = (
"digraph STree {\nlabel=<STree >\nfontsize=30\n"
"fontcolor=blue\nlabelloc=t\n"
)
expected_tail = (
' [shape=box style=filled label="class=1 impurity=0.000 '
'counts=[0 1 0]"];\n}\n'
)
self.assertEqual(clf.graph(), expected_head + "}\n")
clf.fit(X, y)
computed = clf.graph()
computed_head = computed[: len(expected_head)]
num = -len(expected_tail)
computed_tail = computed[num:]
self.assertEqual(computed_head, expected_head)
self.assertEqual(computed_tail, expected_tail)
def test_graph_title(self):
X, y = load_wine(return_X_y=True)
clf = Stree(random_state=self._random_state)
expected_head = (
"digraph STree {\nlabel=<STree Sample title>\nfontsize=30\n"
"fontcolor=blue\nlabelloc=t\n"
)
expected_tail = (
' [shape=box style=filled label="class=1 impurity=0.000 '
'counts=[0 1 0]"];\n}\n'
)
self.assertEqual(clf.graph("Sample title"), expected_head + "}\n")
clf.fit(X, y)
computed = clf.graph("Sample title")
computed_head = computed[: len(expected_head)]
num = -len(expected_tail)
computed_tail = computed[num:]
self.assertEqual(computed_head, expected_head)
self.assertEqual(computed_tail, expected_tail)