Compare commits

...

33 Commits

Author SHA1 Message Date
cb80e8606b Add ask wiki link and init depth 2025-06-24 18:48:04 +02:00
c93d3fbcc7 Fix scikit-learn version in requirements for tests 2025-05-20 11:31:51 +02:00
f4ca4bbd5b Update comment and readme 2025-03-21 09:51:39 +01:00
e676ddbfcc Update python min version in Readme 2024-08-16 13:14:38 +02:00
Ricardo Montañana Gómez
dc637018e8 Rtd update (#58)
* Update read the docs config

*Update copyright year in docs

* Update python version

* Change build configuration

* Change version read in config

* Refactor config files

* Refactor api config
2024-08-15 11:49:38 +02:00
517013be09 Update readthedocs config place
Refactor __call__ method to do nothing as needed by sklearn
2024-08-14 16:37:36 +02:00
941c2ff5e0 Update gh action version 2024-08-14 10:15:26 +02:00
2ebf48145d Update python version requirements 2024-08-14 10:03:57 +02:00
7fbfd3622e Update python versions in gh actions 2024-08-14 09:58:36 +02:00
bc839a80d6 Remove black from lint in github actions 2024-08-14 09:52:05 +02:00
ba15ea2cc0 Remove unneeded file 2024-08-14 09:42:59 +02:00
85b56785c8 Change project builder to hatch
Update actions in Makefile
2024-08-14 09:41:45 +02:00
b627bb7531 Add pyproject.toml install information
Add __call__ method to support sklearn ensembles requirements for base estimators
Update tests
2024-08-13 13:28:32 +02:00
5f8ca8f3bb Reformat test with new black version 2024-03-05 18:46:19 +01:00
Ricardo Montañana Gómez
fb8b9b344f Update README.md
update installation instructions
2024-03-05 18:18:55 +01:00
036d1ba2a7 Add separate methods to return nodes/leaves/depth 2023-11-27 10:02:14 +01:00
4de74973b8 Black format issue 2023-07-12 14:16:08 +02:00
Ricardo Montañana Gómez
28dd04b95a Update benchmark.ipynb 2023-05-13 14:44:49 +02:00
Ricardo Montañana Gómez
542bbce7db ci: ⬆️ Update ci files and badges 2023-01-15 02:18:41 +01:00
Ricardo Montañana Gómez
5b791bc5bf New_version_sklearn (#56)
* test: 🧪 Update max_iter as int in test_multiclass_dataset

* refactor: 📝 Rename base_estimator to estimator as the former is deprectated in notebook

* refactor: 📌 Convert max_iter to int as needed in sklearn 1.2

* chore: 🔖 Update version info to 1.3.1
2023-01-15 01:21:32 +01:00
Ricardo Montañana Gómez
c37f044e3a Update doc and version 1.30 (#55)
* Add complete classes counts to node and tests

* Implement optimized predict and new predict_proba

* Add predict_proba test

* Add python 3.10 to CI

* Update version number and documentation
2022-10-21 13:31:59 +02:00
Ricardo Montañana Gómez
2f6ae648a1 New predict proba (#53)
* Add complete classes counts to node and tests

* Implement optimized predict and new predict_proba

* Add predict_proba test

* Add python 3.10 to CI
2022-10-21 12:26:46 +02:00
Ricardo Montañana Gómez
93be8a89a8 Graphviz (#52)
* Add graphviz representation of the tree

* Complete graphviz test
Add comments to some tests

* Add optional title to tree graph

* Add fontcolor keyword to nodes of the tree

* Add color keyword to arrows of graph

* Update version file to 1.2.4
2022-04-17 19:47:58 +02:00
82838fa3e0 Add audit and devdeps to Makefile 2022-01-11 11:02:09 +01:00
f0b2ce3c7b Fix github actions lint mistake 2022-01-11 10:44:45 +01:00
00ed57c015 Add version of the model method 2021-12-17 11:01:09 +01:00
Ricardo Montañana Gómez
08222f109e Update CITATION.cff 2021-11-04 11:06:13 +01:00
cc931d8547 Fix random seed not used in fs_mutual 2021-11-04 10:04:30 +01:00
b044a057df Update comments and README.md 2021-11-02 14:04:10 +01:00
fc48bc8ba4 Update docs and version number 2021-11-02 12:17:46 +01:00
Ricardo Montañana Gómez
8251f07674 Fix Citation (#49) 2021-11-02 10:58:30 +01:00
Ricardo Montañana Gómez
0b15a5af11 Fix space in CITATION.cff 2021-11-02 00:25:21 +01:00
Ricardo Montañana Gómez
28d905368b Create CITATION.cff 2021-11-02 00:20:49 +01:00
26 changed files with 828 additions and 524 deletions

View File

@@ -2,12 +2,12 @@ name: "CodeQL"
on: on:
push: push:
branches: [ master ] branches: [master]
pull_request: pull_request:
# The branches below must be a subset of the branches above # The branches below must be a subset of the branches above
branches: [ master ] branches: [master]
schedule: schedule:
- cron: '16 17 * * 3' - cron: "16 17 * * 3"
jobs: jobs:
analyze: analyze:
@@ -17,40 +17,40 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
language: [ 'python' ] language: ["python"]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more: # Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v2 uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning. # Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@v1 uses: github/codeql-action/init@v2
with: with:
languages: ${{ matrix.language }} languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file. # If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file. # By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file. # Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main # queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below) # If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild - name: Autobuild
uses: github/codeql-action/autobuild@v1 uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell. # Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl # 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project # and modify them (or add more) to build your code if your project
# uses a compiled language # uses a compiled language
#- run: | #- run: |
# make bootstrap # make bootstrap
# make release # make release
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1 uses: github/codeql-action/analyze@v2

View File

@@ -12,13 +12,13 @@ jobs:
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [macos-latest, ubuntu-latest] os: [macos-latest, ubuntu-latest, windows-latest]
python: [3.8] python: [3.11, 3.12]
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python }} - name: Set up Python ${{ matrix.python }}
uses: actions/setup-python@v2 uses: actions/setup-python@v5
with: with:
python-version: ${{ matrix.python }} python-version: ${{ matrix.python }}
- name: Install dependencies - name: Install dependencies
@@ -28,14 +28,14 @@ jobs:
pip install -q --upgrade codecov coverage black flake8 codacy-coverage pip install -q --upgrade codecov coverage black flake8 codacy-coverage
- name: Lint - name: Lint
run: | run: |
black --check --diff stree # black --check --diff stree
flake8 --count stree flake8 --count stree
- name: Tests - name: Tests
run: | run: |
coverage run -m unittest -v stree.tests coverage run -m unittest -v stree.tests
coverage xml coverage xml
- name: Upload coverage to Codecov - name: Upload coverage to Codecov
uses: codecov/codecov-action@v1 uses: codecov/codecov-action@v4
with: with:
token: ${{ secrets.CODECOV_TOKEN }} token: ${{ secrets.CODECOV_TOKEN }}
files: ./coverage.xml files: ./coverage.xml

View File

@@ -3,8 +3,12 @@ version: 2
sphinx: sphinx:
configuration: docs/source/conf.py configuration: docs/source/conf.py
build:
os: ubuntu-22.04
tools:
python: "3.12"
python: python:
version: 3.8
install: install:
- requirements: requirements.txt - requirements: requirements.txt
- requirements: docs/requirements.txt - requirements: docs/requirements.txt

37
CITATION.cff Normal file
View File

@@ -0,0 +1,37 @@
cff-version: 1.2.0
message: "If you use this software, please cite it as below."
authors:
- family-names: "Montañana"
given-names: "Ricardo"
orcid: "https://orcid.org/0000-0003-3242-5452"
- family-names: "Gámez"
given-names: "José A."
orcid: "https://orcid.org/0000-0003-1188-1117"
- family-names: "Puerta"
given-names: "José M."
orcid: "https://orcid.org/0000-0002-9164-5191"
title: "STree"
version: 1.2.3
doi: 10.5281/zenodo.5504083
date-released: 2021-11-02
url: "https://github.com/Doctorado-ML/STree"
preferred-citation:
type: article
authors:
- family-names: "Montañana"
given-names: "Ricardo"
orcid: "https://orcid.org/0000-0003-3242-5452"
- family-names: "Gámez"
given-names: "José A."
orcid: "https://orcid.org/0000-0003-1188-1117"
- family-names: "Puerta"
given-names: "José M."
orcid: "https://orcid.org/0000-0002-9164-5191"
doi: "10.1007/978-3-030-85713-4_6"
journal: "Lecture Notes in Computer Science"
month: 9
start: 54
end: 64
title: "STree: A Single Multi-class Oblique Decision Tree Based on Support Vector Machines"
volume: 12882
year: 2021

1
MANIFEST.in Normal file
View File

@@ -0,0 +1 @@
include README.md LICENSE

View File

@@ -1,40 +1,36 @@
SHELL := /bin/bash SHELL := /bin/bash
.DEFAULT_GOAL := help .DEFAULT_GOAL := help
.PHONY: coverage deps help lint push test doc build .PHONY: audit coverage help lint test doc doc-clean build
coverage: ## Run tests with coverage coverage: ## Run tests with coverage
coverage erase @coverage erase
coverage run -m unittest -v stree.tests @coverage run -m unittest -v stree.tests
coverage report -m @coverage report -m
deps: ## Install dependencies lint: ## Lint source files
pip install -r requirements.txt @black stree
@flake8 stree
lint: ## Lint and static-check
black stree
flake8 stree
mypy stree
push: ## Push code with tags
git push && git push --tags
test: ## Run tests test: ## Run tests
python -m unittest -v stree.tests @python -m unittest -v stree.tests
doc: ## Update documentation doc: ## Update documentation
make -C docs --makefile=Makefile html @make -C docs --makefile=Makefile html
build: ## Build package build: ## Build package
rm -fr dist/* @rm -fr dist/*
rm -fr build/* @rm -fr build/*
python setup.py sdist bdist_wheel @hatch build
doc-clean: ## Update documentation doc-clean: ## Clean documentation folders
make -C docs --makefile=Makefile clean @make -C docs --makefile=Makefile clean
help: ## Show help message audit: ## Audit pip
@pip-audit
help: ## Show this help message
@IFS=$$'\n' ; \ @IFS=$$'\n' ; \
help_lines=(`fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##/:/'`); \ help_lines=(`grep -Fh "##" $(MAKEFILE_LIST) | grep -Fv fgrep | sed -e 's/\\$$//' | sed -e 's/##/:/'`); \
printf "%s\n\n" "Usage: make [task]"; \ printf "%s\n\n" "Usage: make [task]"; \
printf "%-20s %s\n" "task" "help" ; \ printf "%-20s %s\n" "task" "help" ; \
printf "%-20s %s\n" "------" "----" ; \ printf "%-20s %s\n" "------" "----" ; \

View File

@@ -1,21 +1,23 @@
# STree
![CI](https://github.com/Doctorado-ML/STree/workflows/CI/badge.svg) ![CI](https://github.com/Doctorado-ML/STree/workflows/CI/badge.svg)
[![CodeQL](https://github.com/Doctorado-ML/STree/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/Doctorado-ML/STree/actions/workflows/codeql-analysis.yml)
[![codecov](https://codecov.io/gh/doctorado-ml/stree/branch/master/graph/badge.svg)](https://codecov.io/gh/doctorado-ml/stree) [![codecov](https://codecov.io/gh/doctorado-ml/stree/branch/master/graph/badge.svg)](https://codecov.io/gh/doctorado-ml/stree)
[![Codacy Badge](https://app.codacy.com/project/badge/Grade/35fa3dfd53a24a339344b33d9f9f2f3d)](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade) [![Codacy Badge](https://app.codacy.com/project/badge/Grade/35fa3dfd53a24a339344b33d9f9f2f3d)](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/Doctorado-ML/STree.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Doctorado-ML/STree/context:python)
[![PyPI version](https://badge.fury.io/py/STree.svg)](https://badge.fury.io/py/STree) [![PyPI version](https://badge.fury.io/py/STree.svg)](https://badge.fury.io/py/STree)
![https://img.shields.io/badge/python-3.8%2B-blue](https://img.shields.io/badge/python-3.8%2B-brightgreen) ![https://img.shields.io/badge/python-3.11%2B-blue](https://img.shields.io/badge/python-3.11%2B-brightgreen)
[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/Doctorado-ML/STree)
[![DOI](https://zenodo.org/badge/262658230.svg)](https://zenodo.org/badge/latestdoi/262658230) [![DOI](https://zenodo.org/badge/262658230.svg)](https://zenodo.org/badge/latestdoi/262658230)
# STree ![Stree](https://raw.github.com/doctorado-ml/stree/master/example.png)
Oblique Tree classifier based on SVM nodes. The nodes are built and splitted with sklearn SVC models. Stree is a sklearn estimator and can be integrated in pipelines, grid searches, etc. Oblique Tree classifier based on SVM nodes. The nodes are built and splitted with sklearn SVC models. Stree is a sklearn estimator and can be integrated in pipelines, grid searches, etc.
![Stree](https://raw.github.com/doctorado-ml/stree/master/example.png)
## Installation ## Installation
```bash ```bash
pip install git+https://github.com/doctorado-ml/stree pip install Stree
``` ```
## Documentation ## Documentation
@@ -36,23 +38,24 @@ Can be found in [stree.readthedocs.io](https://stree.readthedocs.io/en/stable/)
## Hyperparameters ## Hyperparameters
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** | | | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
| --- | ------------------- | ------------------------------------------------------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | --- | ------------------- | -------------------------------------------------------------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. | | \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
| \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of liblinear, linear, poly or rbf. liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library | | \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of liblinear, linear, poly or rbf. liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library |
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. | | \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
| \* | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls | | \* | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls |
| | max_depth | \<int\> | None | Specifies the maximum depth of the tree | | | max_depth | \<int\> | None | Specifies the maximum depth of the tree |
| \* | tol | \<float\> | 1e-4 | Tolerance for stopping criterion. | | \* | tol | \<float\> | 1e-4 | Tolerance for stopping criterion. |
| \* | degree | \<int\> | 3 | Degree of the polynomial kernel function (poly). Ignored by all other kernels. | | \* | degree | \<int\> | 3 | Degree of the polynomial kernel function (poly). Ignored by all other kernels. |
| \* | gamma | {"scale", "auto"} or \<float\> | scale | Kernel coefficient for rbf, poly and sigmoid.<br>if gamma='scale' (default) is passed then it uses 1 / (n_features \* X.var()) as value of gamma,<br>if auto, uses 1 / n_features. | | \* | gamma | {"scale", "auto"} or \<float\> | scale | Kernel coefficient for rbf, poly and sigmoid.<br>if gamma='scale' (default) is passed then it uses 1 / (n_features \* X.var()) as value of gamma,<br>if auto, uses 1 / n_features. |
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\*. max_samples is incompatible with 'ovo' multiclass_strategy | | | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\*. max_samples is incompatible with 'ovo' multiclass_strategy |
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. | | | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any | | | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. | | | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
| | splitter | {"best", "random", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them. **trandom”**: The algorithm generates a true random combination. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label. **"cfs"**: Apply Correlation-based Feature Selection. **"fcbf"**: Apply Fast Correlation-Based Filter. **"iwss"**: IWSS based algorithm | | | splitter | {"best", "random", "trandom", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features).
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it | Supported strategies are: **best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them. **trandom”**: The algorithm generates only one random combination. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label. **"cfs"**: Apply Correlation-based Feature Selection. **"fcbf"**: Apply Fast Correlation-Based Filter. **"iwss"**: IWSS based algorithm |
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest | | | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest |
\* Hyperparameter used by the support vector classifier of every node \* Hyperparameter used by the support vector classifier of every node
@@ -73,3 +76,7 @@ python -m unittest -v stree.tests
## License ## License
STree is [MIT](https://github.com/doctorado-ml/stree/blob/master/LICENSE) licensed STree is [MIT](https://github.com/doctorado-ml/stree/blob/master/LICENSE) licensed
## Reference
R. Montañana, J. A. Gámez, J. M. Puerta, "STree: a single multi-class oblique decision tree based on support vector machines.", 2021 LNAI 12882, pg. 54-64

View File

@@ -1,9 +1,10 @@
Siterator Siterator
========= =========
.. automodule:: Splitter .. automodule:: stree
.. autoclass:: Siterator .. autoclass:: Siterator
:members: :members:
:undoc-members: :undoc-members:
:private-members: :private-members:
:show-inheritance: :show-inheritance:
:noindex:

View File

@@ -1,9 +1,9 @@
Snode Snode
===== =====
.. automodule:: Splitter .. autoclass:: stree.Splitter.Snode
.. autoclass:: Snode
:members: :members:
:undoc-members: :undoc-members:
:private-members: :private-members:
:show-inheritance: :show-inheritance:
:noindex:

View File

@@ -1,9 +1,10 @@
Splitter Splitter
======== ========
.. automodule:: Splitter .. automodule:: stree.Splitter
.. autoclass:: Splitter .. autoclass:: Splitter
:members: :members:
:undoc-members: :undoc-members:
:private-members: :private-members:
:show-inheritance: :show-inheritance:
:noindex:

View File

@@ -7,3 +7,4 @@ Stree
:undoc-members: :undoc-members:
:private-members: :private-members:
:show-inheritance: :show-inheritance:
:noindex:

View File

@@ -6,27 +6,21 @@
# -- Path setup -------------------------------------------------------------- # -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
import stree import stree
sys.path.insert(0, os.path.abspath("../../stree/"))
# -- Project information ----------------------------------------------------- # -- Project information -----------------------------------------------------
project = "STree" project = "STree"
copyright = "2020 - 2021, Ricardo Montañana Gómez" copyright = "2020 - 2024, Ricardo Montañana Gómez"
author = "Ricardo Montañana Gómez" author = "Ricardo Montañana Gómez"
# The full version, including alpha/beta/rc tags # The full version, including alpha/beta/rc tags
version = stree.__version__ version = release = stree.__version__
release = version
# -- General configuration --------------------------------------------------- # -- General configuration ---------------------------------------------------
@@ -54,4 +48,4 @@ html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here, # Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files, # relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css". # so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"] html_static_path = []

View File

@@ -1,22 +1,22 @@
# Hyperparameters # Hyperparameters
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** | | | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
| --- | ------------------- | ------------------------------------------------------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | --- | ------------------- | -------------------------------------------------------------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. | | \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
| \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of liblinear, linear, poly or rbf. liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library | | \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of liblinear, linear, poly or rbf.<br>liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library |
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. | | \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
| \* | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls | | \* | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls |
| | max_depth | \<int\> | None | Specifies the maximum depth of the tree | | | max_depth | \<int\> | None | Specifies the maximum depth of the tree |
| \* | tol | \<float\> | 1e-4 | Tolerance for stopping criterion. | | \* | tol | \<float\> | 1e-4 | Tolerance for stopping criterion. |
| \* | degree | \<int\> | 3 | Degree of the polynomial kernel function (poly). Ignored by all other kernels. | | \* | degree | \<int\> | 3 | Degree of the polynomial kernel function (poly). Ignored by all other kernels. |
| \* | gamma | {"scale", "auto"} or \<float\> | scale | Kernel coefficient for rbf, poly and sigmoid.<br>if gamma='scale' (default) is passed then it uses 1 / (n_features \* X.var()) as value of gamma,<br>if auto, uses 1 / n_features. | | \* | gamma | {"scale", "auto"} or \<float\> | scale | Kernel coefficient for rbf, poly and sigmoid.<br>if gamma='scale' (default) is passed then it uses 1 / (n_features \* X.var()) as value of gamma,<br>if auto, uses 1 / n_features. |
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\*. max_samples is incompatible with 'ovo' multiclass_strategy | | | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\*.<br>max_samples is incompatible with 'ovo' multiclass_strategy |
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. | | | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features).<br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any | | | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. | | | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
| | splitter | {"best", "random", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them. **trandom”**: The algorithm generates a true random combination. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label. **"cfs"**: Apply Correlation-based Feature Selection. **"fcbf"**: Apply Fast Correlation-Based Filter. **"iwss"**: IWSS based algorithm | | | splitter | {"best", "random", "trandom", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features).<br>Supported strategies are:<br>**“best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features.<br>**“random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them.<br>**“trandom”**: The algorithm generates only one random combination.<br>**"mutual"**: Chooses the best features w.r.t. their mutual info with the label.<br>**"cfs"**: Apply Correlation-based Feature Selection.<br>**"fcbf"**: Apply Fast Correlation-Based Filter.<br>**"iwss"**: IWSS based algorithm |
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it | | | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest | | \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets:<br>**"ovo"**: one versus one.<br>**"ovr"**: one versus rest |
\* Hyperparameter used by the support vector classifier of every node \* Hyperparameter used by the support vector classifier of every node

View File

@@ -5,7 +5,6 @@ Welcome to STree's documentation!
:caption: Contents: :caption: Contents:
:titlesonly: :titlesonly:
stree stree
install install
hyperparameters hyperparameters

View File

@@ -178,7 +178,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# Stree\n", "# Stree\n",
"stree = Stree(random_state=random_state, C=.01, max_iter=1e3, kernel=\"liblinear\", multiclass_strategy=\"ovr\")" "stree = Stree(random_state=random_state, C=.01, max_iter=1000, kernel=\"liblinear\", multiclass_strategy=\"ovr\")"
] ]
}, },
{ {
@@ -198,7 +198,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# SVC (linear)\n", "# SVC (linear)\n",
"svc = LinearSVC(random_state=random_state, C=.01, max_iter=1e3)" "svc = LinearSVC(random_state=random_state, C=.01, max_iter=1000)"
] ]
}, },
{ {

View File

@@ -1,253 +1,253 @@
{ {
"cells": [ "cells": [
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Test Gridsearch\n", "# Test Gridsearch\n",
"with different kernels and different configurations" "with different kernels and different configurations"
] ]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Setup\n",
"Uncomment the next cell if STree is not already installed"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#\n",
"# Google Colab setup\n",
"#\n",
"#!pip install git+https://github.com/doctorado-ml/stree\n",
"!pip install pandas"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "zIHKVxthDZEa"
},
"outputs": [],
"source": [
"import random\n",
"import os\n",
"import pandas as pd\n",
"import numpy as np\n",
"from sklearn.ensemble import AdaBoostClassifier\n",
"from sklearn.svm import LinearSVC\n",
"from sklearn.model_selection import GridSearchCV, train_test_split\n",
"from stree import Stree"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "IEmq50QgDZEi"
},
"outputs": [],
"source": [
"if not os.path.isfile('data/creditcard.csv'):\n",
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
" !tar xzf creditcard.tgz"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "z9Q-YUfBDZEq",
"outputId": "afc822fb-f16a-4302-8a67-2b9e2880159b",
"tags": []
},
"outputs": [],
"source": [
"random_state=1\n",
"\n",
"def load_creditcard(n_examples=0):\n",
" df = pd.read_csv('data/creditcard.csv')\n",
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
" y = df.Class\n",
" X = df.drop(['Class', 'Time', 'Amount'], axis=1).values\n",
" if n_examples > 0:\n",
" # Take first n_examples samples\n",
" X = X[:n_examples, :]\n",
" y = y[:n_examples, :]\n",
" else:\n",
" # Take all the positive samples with a number of random negatives\n",
" if n_examples < 0:\n",
" Xt = X[(y == 1).ravel()]\n",
" yt = y[(y == 1).ravel()]\n",
" indices = random.sample(range(X.shape[0]), -1 * n_examples)\n",
" X = np.append(Xt, X[indices], axis=0)\n",
" y = np.append(yt, y[indices], axis=0)\n",
" print(\"X.shape\", X.shape, \" y.shape\", y.shape)\n",
" print(\"Fraud: {0:.3f}% {1}\".format(len(y[y == 1])*100/X.shape[0], len(y[y == 1])))\n",
" print(\"Valid: {0:.3f}% {1}\".format(len(y[y == 0]) * 100 / X.shape[0], len(y[y == 0])))\n",
" Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=0.7, shuffle=True, random_state=random_state, stratify=y)\n",
" return Xtrain, Xtest, ytrain, ytest\n",
"\n",
"data = load_creditcard(-1000) # Take all true samples + 1000 of the others\n",
"# data = load_creditcard(5000) # Take the first 5000 samples\n",
"# data = load_creditcard(0) # Take all the samples\n",
"\n",
"Xtrain = data[0]\n",
"Xtest = data[1]\n",
"ytrain = data[2]\n",
"ytest = data[3]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Tests"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "HmX3kR4PDZEw"
},
"outputs": [],
"source": [
"parameters = [{\n",
" 'base_estimator': [Stree(random_state=random_state)],\n",
" 'n_estimators': [10, 25],\n",
" 'learning_rate': [.5, 1],\n",
" 'estimator__split_criteria': ['max_samples', 'impurity'],\n",
" 'estimator__tol': [.1, 1e-02],\n",
" 'estimator__max_depth': [3, 5, 7],\n",
" 'estimator__C': [1, 7, 55],\n",
" 'estimator__kernel': ['linear']\n",
"},\n",
"{\n",
" 'base_estimator': [Stree(random_state=random_state)],\n",
" 'n_estimators': [10, 25],\n",
" 'learning_rate': [.5, 1],\n",
" 'estimator__split_criteria': ['max_samples', 'impurity'],\n",
" 'estimator__tol': [.1, 1e-02],\n",
" 'estimator__max_depth': [3, 5, 7],\n",
" 'estimator__C': [1, 7, 55],\n",
" 'estimator__degree': [3, 5, 7],\n",
" 'estimator__kernel': ['poly']\n",
"},\n",
"{\n",
" 'base_estimator': [Stree(random_state=random_state)],\n",
" 'n_estimators': [10, 25],\n",
" 'learning_rate': [.5, 1],\n",
" 'estimator__split_criteria': ['max_samples', 'impurity'],\n",
" 'estimator__tol': [.1, 1e-02],\n",
" 'estimator__max_depth': [3, 5, 7],\n",
" 'estimator__C': [1, 7, 55],\n",
" 'estimator__gamma': [.1, 1, 10],\n",
" 'estimator__kernel': ['rbf']\n",
"}]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Stree().get_params()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "CrcB8o6EDZE5",
"outputId": "7703413a-d563-4289-a13b-532f38f82762",
"tags": []
},
"outputs": [],
"source": [
"clf = AdaBoostClassifier(random_state=random_state, algorithm=\"SAMME\")\n",
"grid = GridSearchCV(clf, parameters, verbose=5, n_jobs=-1, return_train_score=True)\n",
"grid.fit(Xtrain, ytrain)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "ZjX88NoYDZE8",
"outputId": "285163c8-fa33-4915-8ae7-61c4f7844344",
"tags": []
},
"outputs": [],
"source": [
"print(\"Best estimator: \", grid.best_estimator_)\n",
"print(\"Best hyperparameters: \", grid.best_params_)\n",
"print(\"Best accuracy: \", grid.best_score_)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Best estimator: AdaBoostClassifier(algorithm='SAMME',\n",
" base_estimator=Stree(C=55, max_depth=7, random_state=1,\n",
" split_criteria='max_samples', tol=0.1),\n",
" learning_rate=0.5, n_estimators=25, random_state=1)\n",
"Best hyperparameters: {'base_estimator': Stree(C=55, max_depth=7, random_state=1, split_criteria='max_samples', tol=0.1), 'estimator__C': 55, 'estimator__kernel': 'linear', 'estimator__max_depth': 7, 'estimator__split_criteria': 'max_samples', 'estimator__tol': 0.1, 'learning_rate': 0.5, 'n_estimators': 25}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Best accuracy: 0.9511777695988222"
]
}
],
"metadata": {
"colab": {
"name": "gridsearch.ipynb",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.2-final"
}
}, },
{ "nbformat": 4,
"cell_type": "markdown", "nbformat_minor": 4
"metadata": {},
"source": [
"# Setup\n",
"Uncomment the next cell if STree is not already installed"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#\n",
"# Google Colab setup\n",
"#\n",
"#!pip install git+https://github.com/doctorado-ml/stree\n",
"!pip install pandas"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "zIHKVxthDZEa"
},
"outputs": [],
"source": [
"import random\n",
"import os\n",
"import pandas as pd\n",
"import numpy as np\n",
"from sklearn.ensemble import AdaBoostClassifier\n",
"from sklearn.svm import LinearSVC\n",
"from sklearn.model_selection import GridSearchCV, train_test_split\n",
"from stree import Stree"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "IEmq50QgDZEi"
},
"outputs": [],
"source": [
"if not os.path.isfile('data/creditcard.csv'):\n",
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
" !tar xzf creditcard.tgz"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "z9Q-YUfBDZEq",
"outputId": "afc822fb-f16a-4302-8a67-2b9e2880159b",
"tags": []
},
"outputs": [],
"source": [
"random_state=1\n",
"\n",
"def load_creditcard(n_examples=0):\n",
" df = pd.read_csv('data/creditcard.csv')\n",
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
" y = df.Class\n",
" X = df.drop(['Class', 'Time', 'Amount'], axis=1).values\n",
" if n_examples > 0:\n",
" # Take first n_examples samples\n",
" X = X[:n_examples, :]\n",
" y = y[:n_examples, :]\n",
" else:\n",
" # Take all the positive samples with a number of random negatives\n",
" if n_examples < 0:\n",
" Xt = X[(y == 1).ravel()]\n",
" yt = y[(y == 1).ravel()]\n",
" indices = random.sample(range(X.shape[0]), -1 * n_examples)\n",
" X = np.append(Xt, X[indices], axis=0)\n",
" y = np.append(yt, y[indices], axis=0)\n",
" print(\"X.shape\", X.shape, \" y.shape\", y.shape)\n",
" print(\"Fraud: {0:.3f}% {1}\".format(len(y[y == 1])*100/X.shape[0], len(y[y == 1])))\n",
" print(\"Valid: {0:.3f}% {1}\".format(len(y[y == 0]) * 100 / X.shape[0], len(y[y == 0])))\n",
" Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=0.7, shuffle=True, random_state=random_state, stratify=y)\n",
" return Xtrain, Xtest, ytrain, ytest\n",
"\n",
"data = load_creditcard(-1000) # Take all true samples + 1000 of the others\n",
"# data = load_creditcard(5000) # Take the first 5000 samples\n",
"# data = load_creditcard(0) # Take all the samples\n",
"\n",
"Xtrain = data[0]\n",
"Xtest = data[1]\n",
"ytrain = data[2]\n",
"ytest = data[3]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Tests"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "HmX3kR4PDZEw"
},
"outputs": [],
"source": [
"parameters = [{\n",
" 'base_estimator': [Stree(random_state=random_state)],\n",
" 'n_estimators': [10, 25],\n",
" 'learning_rate': [.5, 1],\n",
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
" 'base_estimator__tol': [.1, 1e-02],\n",
" 'base_estimator__max_depth': [3, 5, 7],\n",
" 'base_estimator__C': [1, 7, 55],\n",
" 'base_estimator__kernel': ['linear']\n",
"},\n",
"{\n",
" 'base_estimator': [Stree(random_state=random_state)],\n",
" 'n_estimators': [10, 25],\n",
" 'learning_rate': [.5, 1],\n",
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
" 'base_estimator__tol': [.1, 1e-02],\n",
" 'base_estimator__max_depth': [3, 5, 7],\n",
" 'base_estimator__C': [1, 7, 55],\n",
" 'base_estimator__degree': [3, 5, 7],\n",
" 'base_estimator__kernel': ['poly']\n",
"},\n",
"{\n",
" 'base_estimator': [Stree(random_state=random_state)],\n",
" 'n_estimators': [10, 25],\n",
" 'learning_rate': [.5, 1],\n",
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
" 'base_estimator__tol': [.1, 1e-02],\n",
" 'base_estimator__max_depth': [3, 5, 7],\n",
" 'base_estimator__C': [1, 7, 55],\n",
" 'base_estimator__gamma': [.1, 1, 10],\n",
" 'base_estimator__kernel': ['rbf']\n",
"}]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Stree().get_params()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "CrcB8o6EDZE5",
"outputId": "7703413a-d563-4289-a13b-532f38f82762",
"tags": []
},
"outputs": [],
"source": [
"clf = AdaBoostClassifier(random_state=random_state, algorithm=\"SAMME\")\n",
"grid = GridSearchCV(clf, parameters, verbose=5, n_jobs=-1, return_train_score=True)\n",
"grid.fit(Xtrain, ytrain)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "ZjX88NoYDZE8",
"outputId": "285163c8-fa33-4915-8ae7-61c4f7844344",
"tags": []
},
"outputs": [],
"source": [
"print(\"Best estimator: \", grid.best_estimator_)\n",
"print(\"Best hyperparameters: \", grid.best_params_)\n",
"print(\"Best accuracy: \", grid.best_score_)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Best estimator: AdaBoostClassifier(algorithm='SAMME',\n",
" base_estimator=Stree(C=55, max_depth=7, random_state=1,\n",
" split_criteria='max_samples', tol=0.1),\n",
" learning_rate=0.5, n_estimators=25, random_state=1)\n",
"Best hyperparameters: {'base_estimator': Stree(C=55, max_depth=7, random_state=1, split_criteria='max_samples', tol=0.1), 'base_estimator__C': 55, 'base_estimator__kernel': 'linear', 'base_estimator__max_depth': 7, 'base_estimator__split_criteria': 'max_samples', 'base_estimator__tol': 0.1, 'learning_rate': 0.5, 'n_estimators': 25}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Best accuracy: 0.9511777695988222"
]
}
],
"metadata": {
"colab": {
"name": "gridsearch.ipynb",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.2-final"
}
},
"nbformat": 4,
"nbformat_minor": 4
} }

View File

@@ -1,5 +1,65 @@
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[project]
name = "STree"
dependencies = ["scikit-learn>1.0", "mufs"]
license = { file = "LICENSE" }
description = "Oblique decision tree with svm nodes."
readme = "README.md"
authors = [
{ name = "Ricardo Montañana", email = "ricardo.montanana@alu.uclm.es" },
]
dynamic = ['version']
requires-python = ">=3.11"
keywords = [
"scikit-learn",
"oblique-classifier",
"oblique-decision-tree",
"decision-tree",
"svm",
"svc",
]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Topic :: Software Development",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
]
[project.optional-dependencies]
dev = ["black", "flake8", "coverage", "hatch", "pip-audit"]
doc = ["sphinx", "myst-parser", "sphinx_rtd_theme", "sphinx-autodoc-typehints"]
[project.urls]
Code = "https://github.com/Doctorado-ML/STree"
Documentation = "https://stree.readthedocs.io/en/latest/index.html"
[tool.hatch.version]
path = "stree/_version.py"
[tool.hatch.build.targets.sdist]
include = ["/stree"]
[tool.coverage.run]
branch = true
source = ["stree"]
command_line = "-m unittest discover -s stree.tests"
[tool.coverage.report]
show_missing = true
fail_under = 100
[tool.black] [tool.black]
line-length = 79 line-length = 79
target-version = ["py311"]
include = '\.pyi?$' include = '\.pyi?$'
exclude = ''' exclude = '''
/( /(

View File

@@ -1,2 +1,3 @@
scikit-learn>0.24 scikit-learn==1.5.2
coverage
mufs mufs

View File

@@ -1 +0,0 @@
python-3.8

View File

@@ -1,50 +0,0 @@
import setuptools
def readme():
with open("README.md") as f:
return f.read()
def get_data(field):
item = ""
with open("stree/__init__.py") as f:
for line in f.readlines():
if line.startswith(f"__{field}__"):
delim = '"' if '"' in line else "'"
item = line.split(delim)[1]
break
else:
raise RuntimeError(f"Unable to find {field} string.")
return item
setuptools.setup(
name="STree",
version=get_data("version"),
license=get_data("license"),
description="Oblique decision tree with svm nodes",
long_description=readme(),
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
url="https://github.com/Doctorado-ML/STree#stree",
project_urls={
"Code": "https://github.com/Doctorado-ML/STree",
"Documentation": "https://stree.readthedocs.io/en/latest/index.html",
},
author=get_data("author"),
author_email=get_data("author_email"),
keywords="scikit-learn oblique-classifier oblique-decision-tree decision-\
tree svm svc",
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: " + get_data("license"),
"Programming Language :: Python :: 3.8",
"Natural Language :: English",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Intended Audience :: Science/Research",
],
install_requires=["scikit-learn", "mufs"],
test_suite="stree.tests",
zip_safe=False,
)

View File

@@ -68,6 +68,7 @@ class Snode:
self._impurity = impurity self._impurity = impurity
self._partition_column: int = -1 self._partition_column: int = -1
self._scaler = scaler self._scaler = scaler
self._proba = None
@classmethod @classmethod
def copy(cls, node: "Snode") -> "Snode": def copy(cls, node: "Snode") -> "Snode":
@@ -127,23 +128,44 @@ class Snode:
def get_up(self) -> "Snode": def get_up(self) -> "Snode":
return self._up return self._up
def make_predictor(self): def make_predictor(self, num_classes: int) -> None:
"""Compute the class of the predictor and its belief based on the """Compute the class of the predictor and its belief based on the
subdataset of the node only if it is a leaf subdataset of the node only if it is a leaf
""" """
if not self.is_leaf(): if not self.is_leaf():
return return
classes, card = np.unique(self._y, return_counts=True) classes, card = np.unique(self._y, return_counts=True)
if len(classes) > 1: self._proba = np.zeros((num_classes,), dtype=np.int64)
for c, n in zip(classes, card):
self._proba[c] = n
try:
max_card = max(card) max_card = max(card)
self._class = classes[card == max_card][0] self._class = classes[card == max_card][0]
self._belief = max_card / np.sum(card) self._belief = max_card / np.sum(card)
except ValueError:
self._class = None
def graph(self):
"""
Return a string representing the node in graphviz format
"""
output = ""
count_values = np.unique(self._y, return_counts=True)
if self.is_leaf():
output += (
f'N{id(self)} [shape=box style=filled label="'
f"class={self._class} impurity={self._impurity:.3f} "
f'counts={self._proba}"];\n'
)
else: else:
self._belief = 1 output += (
try: f'N{id(self)} [label="#features={len(self._features)} '
self._class = classes[0] f"classes={count_values[0]} samples={count_values[1]} "
except IndexError: f'({sum(count_values[1])})" fontcolor=black];\n'
self._class = None )
output += f"N{id(self)} -> N{id(self.get_up())} [color=black];\n"
output += f"N{id(self)} -> N{id(self.get_down())} [color=black];\n"
return output
def __str__(self) -> str: def __str__(self) -> str:
count_values = np.unique(self._y, return_counts=True) count_values = np.unique(self._y, return_counts=True)
@@ -202,7 +224,8 @@ class Splitter:
max_features < num_features). Supported strategies are: “best”: sklearn max_features < num_features). Supported strategies are: “best”: sklearn
SelectKBest algorithm is used in every node to choose the max_features SelectKBest algorithm is used in every node to choose the max_features
best features. “random”: The algorithm generates 5 candidates and best features. “random”: The algorithm generates 5 candidates and
choose the best (max. info. gain) of them. "mutual": Chooses the best choose the best (max. info. gain) of them. “trandom”: The algorithm
generates only one random combination. "mutual": Chooses the best
features w.r.t. their mutual info with the label. "cfs": Apply features w.r.t. their mutual info with the label. "cfs": Apply
Correlation-based Feature Selection. "fcbf": Apply Fast Correlation- Correlation-based Feature Selection. "fcbf": Apply Fast Correlation-
Based, by default None Based, by default None
@@ -244,7 +267,6 @@ class Splitter:
random_state=None, random_state=None,
normalize=False, normalize=False,
): ):
self._clf = clf self._clf = clf
self._random_state = random_state self._random_state = random_state
if random_state is not None: if random_state is not None:
@@ -366,9 +388,8 @@ class Splitter:
.get_support(indices=True) .get_support(indices=True)
) )
@staticmethod
def _fs_mutual( def _fs_mutual(
dataset: np.array, labels: np.array, max_features: int self, dataset: np.array, labels: np.array, max_features: int
) -> tuple: ) -> tuple:
"""Return the best features with mutual information with labels """Return the best features with mutual information with labels
@@ -388,10 +409,13 @@ class Splitter:
indices of the features selected indices of the features selected
""" """
# return best features with mutual info with the label # return best features with mutual info with the label
feature_list = mutual_info_classif(dataset, labels) feature_list = mutual_info_classif(
dataset, labels, random_state=self._random_state
)
return tuple( return tuple(
sorted( sorted(
range(len(feature_list)), key=lambda sub: feature_list[sub] range(len(feature_list)),
key=lambda sub: feature_list[sub],
)[-max_features:] )[-max_features:]
) )
@@ -506,7 +530,10 @@ class Splitter:
return entropy return entropy
def information_gain( def information_gain(
self, labels: np.array, labels_up: np.array, labels_dn: np.array self,
labels: np.array,
labels_up: np.array,
labels_dn: np.array,
) -> float: ) -> float:
"""Compute information gain of a split candidate """Compute information gain of a split candidate
@@ -719,7 +746,7 @@ class Splitter:
Train time - True / Test time - False Train time - True / Test time - False
""" """
# data contains the distances of every sample to every class hyperplane # data contains the distances of every sample to every class hyperplane
# array of (m, nc) nc = # classes # array of (m, nc) nc = k if ovr, nc = k*(k-1)/2 if ovo
data = self._distances(node, samples) data = self._distances(node, samples)
if data.shape[0] < self._min_samples_split: if data.shape[0] < self._min_samples_split:
# there aren't enough samples to split # there aren't enough samples to split

View File

@@ -17,6 +17,7 @@ from sklearn.utils.validation import (
_check_sample_weight, _check_sample_weight,
) )
from .Splitter import Splitter, Snode, Siterator from .Splitter import Splitter, Snode, Siterator
from ._version import __version__
class Stree(BaseEstimator, ClassifierMixin): class Stree(BaseEstimator, ClassifierMixin):
@@ -82,7 +83,8 @@ class Stree(BaseEstimator, ClassifierMixin):
max_features < num_features). Supported strategies are: “best”: sklearn max_features < num_features). Supported strategies are: “best”: sklearn
SelectKBest algorithm is used in every node to choose the max_features SelectKBest algorithm is used in every node to choose the max_features
best features. “random”: The algorithm generates 5 candidates and best features. “random”: The algorithm generates 5 candidates and
choose the best (max. info. gain) of them. "mutual": Chooses the best choose the best (max. info. gain) of them. “trandom”: The algorithm
generates only one random combination. "mutual": Chooses the best
features w.r.t. their mutual info with the label. "cfs": Apply features w.r.t. their mutual info with the label. "cfs": Apply
Correlation-based Feature Selection. "fcbf": Apply Fast Correlation- Correlation-based Feature Selection. "fcbf": Apply Fast Correlation-
Based , by default "random" Based , by default "random"
@@ -128,7 +130,7 @@ class Stree(BaseEstimator, ClassifierMixin):
References References
---------- ----------
R. Montañana, J. A. Gámez, J. M. Puerta, "STree: a single multi-class R. Montañana, J. A. Gámez, J. M. Puerta, "STree: a single multi-class
oblique decision tree based on support vector machines.", 2021 LNAI... oblique decision tree based on support vector machines.", 2021 LNAI 12882
""" """
@@ -137,7 +139,7 @@ class Stree(BaseEstimator, ClassifierMixin):
self, self,
C: float = 1.0, C: float = 1.0,
kernel: str = "linear", kernel: str = "linear",
max_iter: int = 1e5, max_iter: int = int(1e5),
random_state: int = None, random_state: int = None,
max_depth: int = None, max_depth: int = None,
tol: float = 1e-4, tol: float = 1e-4,
@@ -151,7 +153,6 @@ class Stree(BaseEstimator, ClassifierMixin):
multiclass_strategy: str = "ovo", multiclass_strategy: str = "ovo",
normalize: bool = False, normalize: bool = False,
): ):
self.max_iter = max_iter self.max_iter = max_iter
self.C = C self.C = C
self.kernel = kernel self.kernel = kernel
@@ -167,6 +168,17 @@ class Stree(BaseEstimator, ClassifierMixin):
self.splitter = splitter self.splitter = splitter
self.normalize = normalize self.normalize = normalize
self.multiclass_strategy = multiclass_strategy self.multiclass_strategy = multiclass_strategy
self.depth_ = 0
@staticmethod
def version() -> str:
"""Return the version of the package."""
return __version__
def __call__(self) -> None:
"""Only added to comply with scikit-learn base sestimator for
ensembles"""
pass
def _more_tags(self) -> dict: def _more_tags(self) -> dict:
"""Required by sklearn to supply features of the classifier """Required by sklearn to supply features of the classifier
@@ -178,7 +190,10 @@ class Stree(BaseEstimator, ClassifierMixin):
return {"requires_y": True} return {"requires_y": True}
def fit( def fit(
self, X: np.ndarray, y: np.ndarray, sample_weight: np.array = None self,
X: np.ndarray,
y: np.ndarray,
sample_weight: np.array = None,
) -> "Stree": ) -> "Stree":
"""Build the tree based on the dataset of samples and its labels """Build the tree based on the dataset of samples and its labels
@@ -307,7 +322,7 @@ class Stree(BaseEstimator, ClassifierMixin):
if np.unique(y).shape[0] == 1: if np.unique(y).shape[0] == 1:
# only 1 class => pure dataset # only 1 class => pure dataset
node.set_title(title + ", <pure>") node.set_title(title + ", <pure>")
node.make_predictor() node.make_predictor(self.n_classes_)
return node return node
# Train the model # Train the model
clf = self._build_clf() clf = self._build_clf()
@@ -326,14 +341,18 @@ class Stree(BaseEstimator, ClassifierMixin):
if X_U is None or X_D is None: if X_U is None or X_D is None:
# didn't part anything # didn't part anything
node.set_title(title + ", <cgaf>") node.set_title(title + ", <cgaf>")
node.make_predictor() node.make_predictor(self.n_classes_)
return node return node
node.set_up( node.set_up(
self._train(X_U, y_u, sw_u, depth + 1, title + f" - Up({depth+1})") self._train(X_U, y_u, sw_u, depth + 1, title + f" - Up({depth+1})")
) )
node.set_down( node.set_down(
self._train( self._train(
X_D, y_d, sw_d, depth + 1, title + f" - Down({depth+1})" X_D,
y_d,
sw_d,
depth + 1,
title + f" - Down({depth+1})",
) )
) )
return node return node
@@ -360,28 +379,100 @@ class Stree(BaseEstimator, ClassifierMixin):
) )
) )
@staticmethod def __predict_class(self, X: np.array) -> np.array:
def _reorder_results(y: np.array, indices: np.array) -> np.array: """Compute the predicted class for the samples in X. Returns the number
"""Reorder an array based on the array of indices passed of samples of each class in the corresponding leaf node.
Parameters Parameters
---------- ----------
y : np.array X : np.array
data untidy Array of samples
indices : np.array
indices used to set order
Returns Returns
------- -------
np.array np.array
array y ordered Array of shape (n_samples, n_classes) with the number of samples
of each class in the corresponding leaf node
""" """
# return array of same type given in y
y_ordered = y.copy() def compute_prediction(xp, indices, node):
indices = indices.astype(int) if xp is None:
for i, index in enumerate(indices): return
y_ordered[index] = y[i] if node.is_leaf():
return y_ordered # set a class for indices
result[indices] = node._proba
return
self.splitter_.partition(xp, node, train=False)
x_u, x_d = self.splitter_.part(xp)
i_u, i_d = self.splitter_.part(indices)
compute_prediction(x_u, i_u, node.get_up())
compute_prediction(x_d, i_d, node.get_down())
# setup prediction & make it happen
result = np.zeros((X.shape[0], self.n_classes_))
indices = np.arange(X.shape[0])
compute_prediction(X, indices, self.tree_)
return result
def check_predict(self, X) -> np.array:
"""Checks predict and predict_proba preconditions. If input X is not an
np.array convert it to one.
Parameters
----------
X : np.ndarray
Array of samples
Returns
-------
np.array
Array of samples
Raises
------
ValueError
If number of features of X is different of the number of features
in training data
"""
check_is_fitted(self, ["tree_"])
# Input validation
X = check_array(X)
if X.shape[1] != self.n_features_:
raise ValueError(
f"Expected {self.n_features_} features but got "
f"({X.shape[1]})"
)
return X
def predict_proba(self, X: np.array) -> np.array:
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : dataset of samples.
Returns
-------
proba : array of shape (n_samples, n_classes)
The class probabilities of the input samples.
Raises
------
ValueError
if dataset with inconsistent number of features
NotFittedError
if model is not fitted
"""
X = self.check_predict(X)
# return # of samples of each class in leaf node
values = self.__predict_class(X)
normalizer = values.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
return values / normalizer
def predict(self, X: np.array) -> np.array: def predict(self, X: np.array) -> np.array:
"""Predict labels for each sample in dataset passed """Predict labels for each sample in dataset passed
@@ -403,40 +494,45 @@ class Stree(BaseEstimator, ClassifierMixin):
NotFittedError NotFittedError
if model is not fitted if model is not fitted
""" """
X = self.check_predict(X)
return self.classes_[np.argmax(self.__predict_class(X), axis=1)]
def predict_class( def get_nodes(self) -> int:
xp: np.array, indices: np.array, node: Snode """Return the number of nodes in the tree
) -> np.array:
if xp is None: Returns
return [], [] -------
int
number of nodes
"""
nodes = 0
for _ in self:
nodes += 1
return nodes
def get_leaves(self) -> int:
"""Return the number of leaves in the tree
Returns
-------
int
number of leaves
"""
leaves = 0
for node in self:
if node.is_leaf(): if node.is_leaf():
# set a class for every sample in dataset leaves += 1
prediction = np.full((xp.shape[0], 1), node._class) return leaves
return prediction, indices
self.splitter_.partition(xp, node, train=False)
x_u, x_d = self.splitter_.part(xp)
i_u, i_d = self.splitter_.part(indices)
prx_u, prin_u = predict_class(x_u, i_u, node.get_up())
prx_d, prin_d = predict_class(x_d, i_d, node.get_down())
return np.append(prx_u, prx_d), np.append(prin_u, prin_d)
# sklearn check def get_depth(self) -> int:
check_is_fitted(self, ["tree_"]) """Return the depth of the tree
# Input validation
X = check_array(X) Returns
if X.shape[1] != self.n_features_: -------
raise ValueError( int
f"Expected {self.n_features_} features but got " depth of the tree
f"({X.shape[1]})" """
) return self.depth_
# setup prediction & make it happen
indices = np.arange(X.shape[0])
result = (
self._reorder_results(*predict_class(X, indices, self.tree_))
.astype(int)
.ravel()
)
return self.classes_[result]
def nodes_leaves(self) -> tuple: def nodes_leaves(self) -> tuple:
"""Compute the number of nodes and leaves in the built tree """Compute the number of nodes and leaves in the built tree
@@ -469,6 +565,23 @@ class Stree(BaseEstimator, ClassifierMixin):
tree = None tree = None
return Siterator(tree) return Siterator(tree)
def graph(self, title="") -> str:
"""Graphviz code representing the tree
Returns
-------
str
graphviz code
"""
output = (
"digraph STree {\nlabel=<STree "
f"{title}>\nfontsize=30\nfontcolor=blue\nlabelloc=t\n"
)
for node in self:
output += node.graph()
output += "}\n"
return output
def __str__(self) -> str: def __str__(self) -> str:
"""String representation of the tree """String representation of the tree

View File

@@ -1,10 +1,9 @@
from .Strees import Stree, Siterator from .Strees import Stree, Siterator
from ._version import __version__
__version__ = "1.2.1"
__author__ = "Ricardo Montañana Gómez" __author__ = "Ricardo Montañana Gómez"
__copyright__ = "Copyright 2020-2021, Ricardo Montañana Gómez" __copyright__ = "Copyright 2020-2021, Ricardo Montañana Gómez"
__license__ = "MIT License" __license__ = "MIT License"
__author_email__ = "ricardo.montanana@alu.uclm.es" __author_email__ = "ricardo.montanana@alu.uclm.es"
__all__ = ["Stree", "Siterator"] __all__ = ["__version__", "Stree", "Siterator"]

1
stree/_version.py Normal file
View File

@@ -0,0 +1 @@
__version__ = "1.4.0"

View File

@@ -67,10 +67,28 @@ class Snode_test(unittest.TestCase):
def test_make_predictor_on_leaf(self): def test_make_predictor_on_leaf(self):
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test") test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
test.make_predictor() test.make_predictor(2)
self.assertEqual(1, test._class) self.assertEqual(1, test._class)
self.assertEqual(0.75, test._belief) self.assertEqual(0.75, test._belief)
self.assertEqual(-1, test._partition_column) self.assertEqual(-1, test._partition_column)
self.assertListEqual([1, 3], test._proba.tolist())
def test_make_predictor_on_not_leaf(self):
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
test.set_up(Snode(None, [1], [1], [], 0.0, "another_test"))
test.make_predictor(2)
self.assertIsNone(test._class)
self.assertEqual(0, test._belief)
self.assertEqual(-1, test._partition_column)
self.assertEqual(-1, test.get_up()._partition_column)
self.assertIsNone(test._proba)
def test_make_predictor_on_leaf_bogus_data(self):
test = Snode(None, [1, 2, 3, 4], [], [], 0.0, "test")
test.make_predictor(2)
self.assertIsNone(test._class)
self.assertEqual(-1, test._partition_column)
self.assertListEqual([0, 0], test._proba.tolist())
def test_set_title(self): def test_set_title(self):
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test") test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
@@ -97,21 +115,6 @@ class Snode_test(unittest.TestCase):
test.set_features([1, 2]) test.set_features([1, 2])
self.assertListEqual([1, 2], test.get_features()) self.assertListEqual([1, 2], test.get_features())
def test_make_predictor_on_not_leaf(self):
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
test.set_up(Snode(None, [1], [1], [], 0.0, "another_test"))
test.make_predictor()
self.assertIsNone(test._class)
self.assertEqual(0, test._belief)
self.assertEqual(-1, test._partition_column)
self.assertEqual(-1, test.get_up()._partition_column)
def test_make_predictor_on_leaf_bogus_data(self):
test = Snode(None, [1, 2, 3, 4], [], [], 0.0, "test")
test.make_predictor()
self.assertIsNone(test._class)
self.assertEqual(-1, test._partition_column)
def test_copy_node(self): def test_copy_node(self):
px = [1, 2, 3, 4] px = [1, 2, 3, 4]
py = [1] py = [1]

View File

@@ -10,6 +10,7 @@ from sklearn.svm import LinearSVC
from stree import Stree from stree import Stree
from stree.Splitter import Snode from stree.Splitter import Snode
from .utils import load_dataset from .utils import load_dataset
from .._version import __version__
class Stree_test(unittest.TestCase): class Stree_test(unittest.TestCase):
@@ -114,6 +115,38 @@ class Stree_test(unittest.TestCase):
yp = clf.fit(X, y).predict(X[:num, :]) yp = clf.fit(X, y).predict(X[:num, :])
self.assertListEqual(y[:num].tolist(), yp.tolist()) self.assertListEqual(y[:num].tolist(), yp.tolist())
def test_multiple_predict_proba(self):
expected = {
"liblinear": {
0: [0.02401129943502825, 0.9759887005649718],
17: [0.9282970550576184, 0.07170294494238157],
},
"linear": {
0: [0.029329608938547486, 0.9706703910614525],
17: [0.9298469387755102, 0.07015306122448979],
},
"rbf": {
0: [0.023448275862068966, 0.976551724137931],
17: [0.9458064516129032, 0.05419354838709677],
},
"poly": {
0: [0.01601164483260553, 0.9839883551673945],
17: [0.9089790897908979, 0.0910209102091021],
},
}
indices = [0, 17]
X, y = load_dataset(self._random_state)
for kernel in ["liblinear", "linear", "rbf", "poly"]:
clf = Stree(
kernel=kernel,
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
random_state=self._random_state,
)
yp = clf.fit(X, y).predict_proba(X)
for index in indices:
for exp, comp in zip(expected[kernel][index], yp[index]):
self.assertAlmostEqual(exp, comp)
def test_single_vs_multiple_prediction(self): def test_single_vs_multiple_prediction(self):
"""Check if predicting sample by sample gives the same result as """Check if predicting sample by sample gives the same result as
predicting all samples at once predicting all samples at once
@@ -206,6 +239,7 @@ class Stree_test(unittest.TestCase):
) )
tcl.fit(*load_dataset(self._random_state)) tcl.fit(*load_dataset(self._random_state))
self.assertEqual(depth, tcl.depth_) self.assertEqual(depth, tcl.depth_)
self.assertEqual(depth, tcl.get_depth())
def test_unfitted_tree_is_iterable(self): def test_unfitted_tree_is_iterable(self):
tcl = Stree() tcl = Stree()
@@ -255,12 +289,12 @@ class Stree_test(unittest.TestCase):
"impurity sigmoid": 0.824, "impurity sigmoid": 0.824,
}, },
"Iris": { "Iris": {
"max_samples liblinear": 0.9550561797752809, "max_samples liblinear": 0.9887640449438202,
"max_samples linear": 1.0, "max_samples linear": 1.0,
"max_samples rbf": 0.6685393258426966, "max_samples rbf": 0.6685393258426966,
"max_samples poly": 0.6853932584269663, "max_samples poly": 0.6853932584269663,
"max_samples sigmoid": 0.6404494382022472, "max_samples sigmoid": 0.6404494382022472,
"impurity liblinear": 0.9550561797752809, "impurity liblinear": 0.9887640449438202,
"impurity linear": 1.0, "impurity linear": 1.0,
"impurity rbf": 0.6685393258426966, "impurity rbf": 0.6685393258426966,
"impurity poly": 0.6853932584269663, "impurity poly": 0.6853932584269663,
@@ -273,10 +307,10 @@ class Stree_test(unittest.TestCase):
for criteria in ["max_samples", "impurity"]: for criteria in ["max_samples", "impurity"]:
for kernel in self._kernels: for kernel in self._kernels:
clf = Stree( clf = Stree(
max_iter=1e4, max_iter=int(1e4),
multiclass_strategy="ovr" multiclass_strategy=(
if kernel == "liblinear" "ovr" if kernel == "liblinear" else "ovo"
else "ovo", ),
kernel=kernel, kernel=kernel,
random_state=self._random_state, random_state=self._random_state,
) )
@@ -357,6 +391,7 @@ class Stree_test(unittest.TestCase):
# Tests of score # Tests of score
def test_score_binary(self): def test_score_binary(self):
"""Check score for binary classification."""
X, y = load_dataset(self._random_state) X, y = load_dataset(self._random_state)
accuracies = [ accuracies = [
0.9506666666666667, 0.9506666666666667,
@@ -379,6 +414,7 @@ class Stree_test(unittest.TestCase):
self.assertAlmostEqual(accuracy_expected, accuracy_score) self.assertAlmostEqual(accuracy_expected, accuracy_score)
def test_score_max_features(self): def test_score_max_features(self):
"""Check score using max_features."""
X, y = load_dataset(self._random_state) X, y = load_dataset(self._random_state)
clf = Stree( clf = Stree(
kernel="liblinear", kernel="liblinear",
@@ -390,6 +426,7 @@ class Stree_test(unittest.TestCase):
self.assertAlmostEqual(0.9453333333333334, clf.score(X, y)) self.assertAlmostEqual(0.9453333333333334, clf.score(X, y))
def test_bogus_splitter_parameter(self): def test_bogus_splitter_parameter(self):
"""Check that bogus splitter parameter raises exception."""
clf = Stree(splitter="duck") clf = Stree(splitter="duck")
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
clf.fit(*load_dataset()) clf.fit(*load_dataset())
@@ -403,10 +440,10 @@ class Stree_test(unittest.TestCase):
clf.fit(X, y) clf.fit(X, y)
score = clf.score(X, y) score = clf.score(X, y)
# Check accuracy of the whole model # Check accuracy of the whole model
self.assertAlmostEquals(0.98, score, 5) self.assertAlmostEqual(0.98, score, 5)
svm = LinearSVC(random_state=0) svm = LinearSVC(random_state=0)
svm.fit(X, y) svm.fit(X, y)
self.assertAlmostEquals(0.9666666666666667, svm.score(X, y), 5) self.assertAlmostEqual(0.9666666666666667, svm.score(X, y), 5)
data = svm.decision_function(X) data = svm.decision_function(X)
expected = [ expected = [
0.4444444444444444, 0.4444444444444444,
@@ -418,7 +455,7 @@ class Stree_test(unittest.TestCase):
ty[data > 0] = 1 ty[data > 0] = 1
ty = ty.astype(int) ty = ty.astype(int)
for i in range(3): for i in range(3):
self.assertAlmostEquals( self.assertAlmostEqual(
expected[i], expected[i],
clf.splitter_._gini(ty[:, i]), clf.splitter_._gini(ty[:, i]),
) )
@@ -445,6 +482,7 @@ class Stree_test(unittest.TestCase):
self.assertListEqual([47], resdn[1].tolist()) self.assertListEqual([47], resdn[1].tolist())
def test_score_multiclass_rbf(self): def test_score_multiclass_rbf(self):
"""Test score for multiclass classification with rbf kernel."""
X, y = load_dataset( X, y = load_dataset(
random_state=self._random_state, random_state=self._random_state,
n_classes=3, n_classes=3,
@@ -462,6 +500,7 @@ class Stree_test(unittest.TestCase):
self.assertEqual(1.0, clf2.fit(X, y).score(X, y)) self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
def test_score_multiclass_poly(self): def test_score_multiclass_poly(self):
"""Test score for multiclass classification with poly kernel."""
X, y = load_dataset( X, y = load_dataset(
random_state=self._random_state, random_state=self._random_state,
n_classes=3, n_classes=3,
@@ -483,6 +522,7 @@ class Stree_test(unittest.TestCase):
self.assertEqual(1.0, clf2.fit(X, y).score(X, y)) self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
def test_score_multiclass_liblinear(self): def test_score_multiclass_liblinear(self):
"""Test score for multiclass classification with liblinear kernel."""
X, y = load_dataset( X, y = load_dataset(
random_state=self._random_state, random_state=self._random_state,
n_classes=3, n_classes=3,
@@ -508,6 +548,7 @@ class Stree_test(unittest.TestCase):
self.assertEqual(1.0, clf2.fit(X, y).score(X, y)) self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
def test_score_multiclass_sigmoid(self): def test_score_multiclass_sigmoid(self):
"""Test score for multiclass classification with sigmoid kernel."""
X, y = load_dataset( X, y = load_dataset(
random_state=self._random_state, random_state=self._random_state,
n_classes=3, n_classes=3,
@@ -528,6 +569,7 @@ class Stree_test(unittest.TestCase):
self.assertEqual(0.9662921348314607, clf2.fit(X, y).score(X, y)) self.assertEqual(0.9662921348314607, clf2.fit(X, y).score(X, y))
def test_score_multiclass_linear(self): def test_score_multiclass_linear(self):
"""Test score for multiclass classification with linear kernel."""
warnings.filterwarnings("ignore", category=ConvergenceWarning) warnings.filterwarnings("ignore", category=ConvergenceWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning) warnings.filterwarnings("ignore", category=RuntimeWarning)
X, y = load_dataset( X, y = load_dataset(
@@ -551,15 +593,17 @@ class Stree_test(unittest.TestCase):
) )
self.assertEqual(0.9526666666666667, clf2.fit(X, y).score(X, y)) self.assertEqual(0.9526666666666667, clf2.fit(X, y).score(X, y))
X, y = load_wine(return_X_y=True) X, y = load_wine(return_X_y=True)
self.assertEqual(0.9831460674157303, clf.fit(X, y).score(X, y)) self.assertEqual(0.9887640449438202, clf.fit(X, y).score(X, y))
self.assertEqual(1.0, clf2.fit(X, y).score(X, y)) self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
def test_zero_all_sample_weights(self): def test_zero_all_sample_weights(self):
"""Test exception raises when all sample weights are zero."""
X, y = load_dataset(self._random_state) X, y = load_dataset(self._random_state)
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
Stree().fit(X, y, np.zeros(len(y))) Stree().fit(X, y, np.zeros(len(y)))
def test_mask_samples_weighted_zero(self): def test_mask_samples_weighted_zero(self):
"""Check that the weighted zero samples are masked."""
X = np.array( X = np.array(
[ [
[1, 1], [1, 1],
@@ -587,6 +631,7 @@ class Stree_test(unittest.TestCase):
self.assertEqual(model2.score(X, y, w), 1) self.assertEqual(model2.score(X, y, w), 1)
def test_depth(self): def test_depth(self):
"""Check depth of the tree."""
X, y = load_dataset( X, y = load_dataset(
random_state=self._random_state, random_state=self._random_state,
n_classes=3, n_classes=3,
@@ -596,12 +641,15 @@ class Stree_test(unittest.TestCase):
clf = Stree(random_state=self._random_state) clf = Stree(random_state=self._random_state)
clf.fit(X, y) clf.fit(X, y)
self.assertEqual(6, clf.depth_) self.assertEqual(6, clf.depth_)
self.assertEqual(6, clf.get_depth())
X, y = load_wine(return_X_y=True) X, y = load_wine(return_X_y=True)
clf = Stree(random_state=self._random_state) clf = Stree(random_state=self._random_state)
clf.fit(X, y) clf.fit(X, y)
self.assertEqual(4, clf.depth_) self.assertEqual(4, clf.depth_)
self.assertEqual(4, clf.get_depth())
def test_nodes_leaves(self): def test_nodes_leaves(self):
"""Check number of nodes and leaves."""
X, y = load_dataset( X, y = load_dataset(
random_state=self._random_state, random_state=self._random_state,
n_classes=3, n_classes=3,
@@ -612,15 +660,20 @@ class Stree_test(unittest.TestCase):
clf.fit(X, y) clf.fit(X, y)
nodes, leaves = clf.nodes_leaves() nodes, leaves = clf.nodes_leaves()
self.assertEqual(31, nodes) self.assertEqual(31, nodes)
self.assertEqual(31, clf.get_nodes())
self.assertEqual(16, leaves) self.assertEqual(16, leaves)
self.assertEqual(16, clf.get_leaves())
X, y = load_wine(return_X_y=True) X, y = load_wine(return_X_y=True)
clf = Stree(random_state=self._random_state) clf = Stree(random_state=self._random_state)
clf.fit(X, y) clf.fit(X, y)
nodes, leaves = clf.nodes_leaves() nodes, leaves = clf.nodes_leaves()
self.assertEqual(11, nodes) self.assertEqual(11, nodes)
self.assertEqual(11, clf.get_nodes())
self.assertEqual(6, leaves) self.assertEqual(6, leaves)
self.assertEqual(6, clf.get_leaves())
def test_nodes_leaves_artificial(self): def test_nodes_leaves_artificial(self):
"""Check leaves of artificial dataset."""
n1 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test1") n1 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test1")
n2 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test2") n2 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test2")
n3 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test3") n3 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test3")
@@ -636,15 +689,19 @@ class Stree_test(unittest.TestCase):
clf.tree_ = n1 clf.tree_ = n1
nodes, leaves = clf.nodes_leaves() nodes, leaves = clf.nodes_leaves()
self.assertEqual(6, nodes) self.assertEqual(6, nodes)
self.assertEqual(6, clf.get_nodes())
self.assertEqual(2, leaves) self.assertEqual(2, leaves)
self.assertEqual(2, clf.get_leaves())
def test_bogus_multiclass_strategy(self): def test_bogus_multiclass_strategy(self):
"""Check invalid multiclass strategy."""
clf = Stree(multiclass_strategy="other") clf = Stree(multiclass_strategy="other")
X, y = load_wine(return_X_y=True) X, y = load_wine(return_X_y=True)
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
clf.fit(X, y) clf.fit(X, y)
def test_multiclass_strategy(self): def test_multiclass_strategy(self):
"""Check multiclass strategy."""
X, y = load_wine(return_X_y=True) X, y = load_wine(return_X_y=True)
clf_o = Stree(multiclass_strategy="ovo") clf_o = Stree(multiclass_strategy="ovo")
clf_r = Stree(multiclass_strategy="ovr") clf_r = Stree(multiclass_strategy="ovr")
@@ -654,6 +711,7 @@ class Stree_test(unittest.TestCase):
self.assertEqual(0.9269662921348315, score_r) self.assertEqual(0.9269662921348315, score_r)
def test_incompatible_hyperparameters(self): def test_incompatible_hyperparameters(self):
"""Check incompatible hyperparameters."""
X, y = load_wine(return_X_y=True) X, y = load_wine(return_X_y=True)
clf = Stree(kernel="liblinear", multiclass_strategy="ovo") clf = Stree(kernel="liblinear", multiclass_strategy="ovo")
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
@@ -661,3 +719,55 @@ class Stree_test(unittest.TestCase):
clf = Stree(multiclass_strategy="ovo", split_criteria="max_samples") clf = Stree(multiclass_strategy="ovo", split_criteria="max_samples")
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
clf.fit(X, y) clf.fit(X, y)
def test_version(self):
"""Check STree version."""
clf = Stree()
self.assertEqual(__version__, clf.version())
def test_call(self) -> None:
"""Check call method."""
clf = Stree()
self.assertIsNone(clf())
def test_graph(self):
"""Check graphviz representation of the tree."""
X, y = load_wine(return_X_y=True)
clf = Stree(random_state=self._random_state)
expected_head = (
"digraph STree {\nlabel=<STree >\nfontsize=30\n"
"fontcolor=blue\nlabelloc=t\n"
)
expected_tail = (
' [shape=box style=filled label="class=1 impurity=0.000 '
'counts=[0 1 0]"];\n}\n'
)
self.assertEqual(clf.graph(), expected_head + "}\n")
clf.fit(X, y)
computed = clf.graph()
computed_head = computed[: len(expected_head)]
num = -len(expected_tail)
computed_tail = computed[num:]
self.assertEqual(computed_head, expected_head)
self.assertEqual(computed_tail, expected_tail)
def test_graph_title(self):
X, y = load_wine(return_X_y=True)
clf = Stree(random_state=self._random_state)
expected_head = (
"digraph STree {\nlabel=<STree Sample title>\nfontsize=30\n"
"fontcolor=blue\nlabelloc=t\n"
)
expected_tail = (
' [shape=box style=filled label="class=1 impurity=0.000 '
'counts=[0 1 0]"];\n}\n'
)
self.assertEqual(clf.graph("Sample title"), expected_head + "}\n")
clf.fit(X, y)
computed = clf.graph("Sample title")
computed_head = computed[: len(expected_head)]
num = -len(expected_tail)
computed_tail = computed[num:]
self.assertEqual(computed_head, expected_head)
self.assertEqual(computed_tail, expected_tail)