mirror of
https://github.com/Doctorado-ML/STree.git
synced 2025-08-17 16:36:01 +00:00
Compare commits
8 Commits
Author | SHA1 | Date | |
---|---|---|---|
acf10d8d7f
|
|||
34bf539fa3
|
|||
5a36c5d29b
|
|||
5cef0f4875
|
|||
28c7558f01
|
|||
|
e19d10f6a7 | ||
|
02de394c96 | ||
|
a4aac9d310 |
56
.github/workflows/codeql-analysis.yml
vendored
Normal file
56
.github/workflows/codeql-analysis.yml
vendored
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
name: "CodeQL"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ master ]
|
||||||
|
pull_request:
|
||||||
|
# The branches below must be a subset of the branches above
|
||||||
|
branches: [ master ]
|
||||||
|
schedule:
|
||||||
|
- cron: '16 17 * * 3'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
name: Analyze
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
language: [ 'python' ]
|
||||||
|
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||||
|
# Learn more:
|
||||||
|
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
# Initializes the CodeQL tools for scanning.
|
||||||
|
- name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v1
|
||||||
|
with:
|
||||||
|
languages: ${{ matrix.language }}
|
||||||
|
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||||
|
# By default, queries listed here will override any specified in a config file.
|
||||||
|
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||||
|
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||||
|
|
||||||
|
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||||
|
# If this step fails, then you should remove it and run the build manually (see below)
|
||||||
|
- name: Autobuild
|
||||||
|
uses: github/codeql-action/autobuild@v1
|
||||||
|
|
||||||
|
# ℹ️ Command-line programs to run using the OS shell.
|
||||||
|
# 📚 https://git.io/JvXDl
|
||||||
|
|
||||||
|
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||||
|
# and modify them (or add more) to build your code if your project
|
||||||
|
# uses a compiled language
|
||||||
|
|
||||||
|
#- run: |
|
||||||
|
# make bootstrap
|
||||||
|
# make release
|
||||||
|
|
||||||
|
- name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@v1
|
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2020 Doctorado-ML
|
Copyright (c) 2020-2021, Ricardo Montañana Gómez
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
42
Makefile
Normal file
42
Makefile
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
SHELL := /bin/bash
|
||||||
|
.DEFAULT_GOAL := help
|
||||||
|
.PHONY: coverage deps help lint push test
|
||||||
|
|
||||||
|
coverage: ## Run tests with coverage
|
||||||
|
coverage erase
|
||||||
|
coverage run -m unittest -v stree.tests
|
||||||
|
coverage report -m
|
||||||
|
|
||||||
|
deps: ## Install dependencies
|
||||||
|
pip install -r requirements.txt
|
||||||
|
|
||||||
|
lint: ## Lint and static-check
|
||||||
|
black stree
|
||||||
|
flake8 stree
|
||||||
|
mypy stree
|
||||||
|
|
||||||
|
push: ## Push code with tags
|
||||||
|
git push && git push --tags
|
||||||
|
|
||||||
|
test: ## Run tests
|
||||||
|
python -m unittest -v stree.tests
|
||||||
|
|
||||||
|
doc: ## Update documentation
|
||||||
|
make -C docs --makefile=Makefile html
|
||||||
|
|
||||||
|
help: ## Show help message
|
||||||
|
@IFS=$$'\n' ; \
|
||||||
|
help_lines=(`fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##/:/'`); \
|
||||||
|
printf "%s\n\n" "Usage: make [task]"; \
|
||||||
|
printf "%-20s %s\n" "task" "help" ; \
|
||||||
|
printf "%-20s %s\n" "------" "----" ; \
|
||||||
|
for help_line in $${help_lines[@]}; do \
|
||||||
|
IFS=$$':' ; \
|
||||||
|
help_split=($$help_line) ; \
|
||||||
|
help_command=`echo $${help_split[0]} | sed -e 's/^ *//' -e 's/ *$$//'` ; \
|
||||||
|
help_info=`echo $${help_split[2]} | sed -e 's/^ *//' -e 's/ *$$//'` ; \
|
||||||
|
printf '\033[36m'; \
|
||||||
|
printf "%-20s %s" $$help_command ; \
|
||||||
|
printf '\033[0m'; \
|
||||||
|
printf "%s\n" $$help_info; \
|
||||||
|
done
|
40
README.md
40
README.md
@@ -14,6 +14,10 @@ Oblique Tree classifier based on SVM nodes. The nodes are built and splitted wit
|
|||||||
pip install git+https://github.com/doctorado-ml/stree
|
pip install git+https://github.com/doctorado-ml/stree
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
Can be found in
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
### Jupyter notebooks
|
### Jupyter notebooks
|
||||||
@@ -30,21 +34,23 @@ pip install git+https://github.com/doctorado-ml/stree
|
|||||||
|
|
||||||
## Hyperparameters
|
## Hyperparameters
|
||||||
|
|
||||||
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
|
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
|
||||||
| --- | ------------------ | ------------------------------------------------------ | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
| --- | ------------------- | ------------------------------------------------------ | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
|
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
|
||||||
| \* | kernel | {"linear", "poly", "rbf"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of ‘linear’, ‘poly’ or ‘rbf’. |
|
| \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of ‘liblinear’, ‘linear’, ‘poly’ or ‘rbf’. liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library |
|
||||||
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
|
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
|
||||||
| \* | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls |
|
| \* | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls |
|
||||||
| | max_depth | \<int\> | None | Specifies the maximum depth of the tree |
|
| | max_depth | \<int\> | None | Specifies the maximum depth of the tree |
|
||||||
| \* | tol | \<float\> | 1e-4 | Tolerance for stopping criterion. |
|
| \* | tol | \<float\> | 1e-4 | Tolerance for stopping criterion. |
|
||||||
| \* | degree | \<int\> | 3 | Degree of the polynomial kernel function (‘poly’). Ignored by all other kernels. |
|
| \* | degree | \<int\> | 3 | Degree of the polynomial kernel function (‘poly’). Ignored by all other kernels. |
|
||||||
| \* | gamma | {"scale", "auto"} or \<float\> | scale | Kernel coefficient for ‘rbf’ and ‘poly’.<br>if gamma='scale' (default) is passed then it uses 1 / (n_features \* X.var()) as value of gamma,<br>if ‘auto’, uses 1 / n_features. |
|
| \* | gamma | {"scale", "auto"} or \<float\> | scale | Kernel coefficient for ‘rbf’ and ‘poly’.<br>if gamma='scale' (default) is passed then it uses 1 / (n_features \* X.var()) as value of gamma,<br>if ‘auto’, uses 1 / n_features. |
|
||||||
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\* |
|
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\*. max_samples is incompatible with 'ovo' multiclass_strategy |
|
||||||
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
|
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
|
||||||
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
|
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
|
||||||
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
|
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
|
||||||
| | splitter | {"best", "random"} | random | The strategy used to choose the feature set at each node (only used if max_features != num_features). <br>Supported strategies are “best” to choose the best feature set and “random” to choose a random combination. <br>The algorithm generates 5 candidates at most to choose from in both strategies. |
|
| | splitter | {"best", "random", "mutual"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **“best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **“random”**: The algorithm generates 5 candidates and choose one randomly. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label |
|
||||||
|
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
|
||||||
|
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest |
|
||||||
|
|
||||||
\* Hyperparameter used by the support vector classifier of every node
|
\* Hyperparameter used by the support vector classifier of every node
|
||||||
|
|
||||||
@@ -61,3 +67,7 @@ Once we have the column to take into account for the split, the algorithm splits
|
|||||||
```bash
|
```bash
|
||||||
python -m unittest -v stree.tests
|
python -m unittest -v stree.tests
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
STree is [MIT](https://github.com/doctorado-ml/stree/blob/master/LICENSE) licensed
|
||||||
|
@@ -1,8 +1,8 @@
|
|||||||
overage:
|
coverage:
|
||||||
status:
|
status:
|
||||||
project:
|
project:
|
||||||
default:
|
default:
|
||||||
target: 90%
|
target: 100%
|
||||||
comment:
|
comment:
|
||||||
layout: "reach, diff, flags, files"
|
layout: "reach, diff, flags, files"
|
||||||
behavior: default
|
behavior: default
|
||||||
|
20
docs/Makefile
Normal file
20
docs/Makefile
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Minimal makefile for Sphinx documentation
|
||||||
|
#
|
||||||
|
|
||||||
|
# You can set these variables from the command line, and also
|
||||||
|
# from the environment for the first two.
|
||||||
|
SPHINXOPTS ?=
|
||||||
|
SPHINXBUILD ?= sphinx-build
|
||||||
|
SOURCEDIR = source
|
||||||
|
BUILDDIR = build
|
||||||
|
|
||||||
|
# Put it first so that "make" without argument is like "make help".
|
||||||
|
help:
|
||||||
|
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||||
|
|
||||||
|
.PHONY: help Makefile
|
||||||
|
|
||||||
|
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||||
|
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||||
|
%: Makefile
|
||||||
|
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
3
docs/requirements.txt
Normal file
3
docs/requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
sphinx
|
||||||
|
sphinx-rtd-theme
|
||||||
|
myst-parser
|
9
docs/source/api/Siterator.rst
Normal file
9
docs/source/api/Siterator.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Siterator
|
||||||
|
=========
|
||||||
|
|
||||||
|
.. automodule:: stree
|
||||||
|
.. autoclass:: Siterator
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
|
:show-inheritance:
|
9
docs/source/api/Snode.rst
Normal file
9
docs/source/api/Snode.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Snode
|
||||||
|
=====
|
||||||
|
|
||||||
|
.. automodule:: stree
|
||||||
|
.. autoclass:: Snode
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
|
:show-inheritance:
|
9
docs/source/api/Splitter.rst
Normal file
9
docs/source/api/Splitter.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Splitter
|
||||||
|
========
|
||||||
|
|
||||||
|
.. automodule:: stree
|
||||||
|
.. autoclass:: Splitter
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
|
:show-inheritance:
|
9
docs/source/api/Stree.rst
Normal file
9
docs/source/api/Stree.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Stree
|
||||||
|
=====
|
||||||
|
|
||||||
|
.. automodule:: stree
|
||||||
|
.. autoclass:: Stree
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
|
:show-inheritance:
|
11
docs/source/api/index.rst
Normal file
11
docs/source/api/index.rst
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
API index
|
||||||
|
=========
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 2
|
||||||
|
:caption: Contents:
|
||||||
|
|
||||||
|
Stree
|
||||||
|
Splitter
|
||||||
|
Snode
|
||||||
|
Siterator
|
55
docs/source/conf.py
Normal file
55
docs/source/conf.py
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
# Configuration file for the Sphinx documentation builder.
|
||||||
|
#
|
||||||
|
# This file only contains a selection of the most common options. For a full
|
||||||
|
# list see the documentation:
|
||||||
|
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
||||||
|
|
||||||
|
# -- Path setup --------------------------------------------------------------
|
||||||
|
|
||||||
|
# If extensions (or modules to document with autodoc) are in another directory,
|
||||||
|
# add these directories to sys.path here. If the directory is relative to the
|
||||||
|
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||||
|
#
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.abspath("../../stree/"))
|
||||||
|
|
||||||
|
|
||||||
|
# -- Project information -----------------------------------------------------
|
||||||
|
|
||||||
|
project = "STree"
|
||||||
|
copyright = "2020 - 2021, Ricardo Montañana Gómez"
|
||||||
|
author = "Ricardo Montañana Gómez"
|
||||||
|
|
||||||
|
# The full version, including alpha/beta/rc tags
|
||||||
|
release = "1.0"
|
||||||
|
|
||||||
|
|
||||||
|
# -- General configuration ---------------------------------------------------
|
||||||
|
|
||||||
|
# Add any Sphinx extension module names here, as strings. They can be
|
||||||
|
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||||
|
# ones.
|
||||||
|
extensions = ["myst_parser", "sphinx.ext.autodoc", "sphinx.ext.viewcode"]
|
||||||
|
|
||||||
|
# Add any paths that contain templates here, relative to this directory.
|
||||||
|
templates_path = ["_templates"]
|
||||||
|
|
||||||
|
# List of patterns, relative to source directory, that match files and
|
||||||
|
# directories to ignore when looking for source files.
|
||||||
|
# This pattern also affects html_static_path and html_extra_path.
|
||||||
|
exclude_patterns = []
|
||||||
|
|
||||||
|
|
||||||
|
# -- Options for HTML output -------------------------------------------------
|
||||||
|
|
||||||
|
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||||
|
# a list of builtin themes.
|
||||||
|
#
|
||||||
|
html_theme = "sphinx_rtd_theme"
|
||||||
|
|
||||||
|
# Add any paths that contain custom static files (such as style sheets) here,
|
||||||
|
# relative to this directory. They are copied after the builtin static files,
|
||||||
|
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||||
|
html_static_path = ["_static"]
|
44
docs/source/example.md
Normal file
44
docs/source/example.md
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
# Examples
|
||||||
|
|
||||||
|
## Notebooks
|
||||||
|
|
||||||
|
- [](https://mybinder.org/v2/gh/Doctorado-ML/STree/master?urlpath=lab/tree/notebooks/benchmark.ipynb) Benchmark
|
||||||
|
|
||||||
|
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/benchmark.ipynb) Benchmark
|
||||||
|
|
||||||
|
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/features.ipynb) Some features
|
||||||
|
|
||||||
|
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/gridsearch.ipynb) Gridsearch
|
||||||
|
|
||||||
|
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/ensemble.ipynb) Ensembles
|
||||||
|
|
||||||
|
## Sample Code
|
||||||
|
|
||||||
|
```python
|
||||||
|
import time
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
from sklearn.datasets import load_iris
|
||||||
|
from stree import Stree
|
||||||
|
|
||||||
|
random_state = 1
|
||||||
|
X, y = load_iris(return_X_y=True)
|
||||||
|
Xtrain, Xtest, ytrain, ytest = train_test_split(
|
||||||
|
X, y, test_size=0.2, random_state=random_state
|
||||||
|
)
|
||||||
|
now = time.time()
|
||||||
|
print("Predicting with max_features=sqrt(n_features)")
|
||||||
|
clf = Stree(random_state=random_state, max_features="auto")
|
||||||
|
clf.fit(Xtrain, ytrain)
|
||||||
|
print(f"Took {time.time() - now:.2f} seconds to train")
|
||||||
|
print(clf)
|
||||||
|
print(f"Classifier's accuracy (train): {clf.score(Xtrain, ytrain):.4f}")
|
||||||
|
print(f"Classifier's accuracy (test) : {clf.score(Xtest, ytest):.4f}")
|
||||||
|
print("=" * 40)
|
||||||
|
print("Predicting with max_features=n_features")
|
||||||
|
clf = Stree(random_state=random_state)
|
||||||
|
clf.fit(Xtrain, ytrain)
|
||||||
|
print(f"Took {time.time() - now:.2f} seconds to train")
|
||||||
|
print(clf)
|
||||||
|
print(f"Classifier's accuracy (train): {clf.score(Xtrain, ytrain):.4f}")
|
||||||
|
print(f"Classifier's accuracy (test) : {clf.score(Xtest, ytest):.4f}")
|
||||||
|
```
|
BIN
docs/source/example.png
Normal file
BIN
docs/source/example.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 3.1 MiB |
29
docs/source/hyperparameters.md
Normal file
29
docs/source/hyperparameters.md
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
## Hyperparameters
|
||||||
|
|
||||||
|
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
|
||||||
|
| --- | ------------------- | ------------------------------------------------------ | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
|
||||||
|
| \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of ‘liblinear’, ‘linear’, ‘poly’ or ‘rbf’. liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library |
|
||||||
|
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
|
||||||
|
| \* | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls |
|
||||||
|
| | max_depth | \<int\> | None | Specifies the maximum depth of the tree |
|
||||||
|
| \* | tol | \<float\> | 1e-4 | Tolerance for stopping criterion. |
|
||||||
|
| \* | degree | \<int\> | 3 | Degree of the polynomial kernel function (‘poly’). Ignored by all other kernels. |
|
||||||
|
| \* | gamma | {"scale", "auto"} or \<float\> | scale | Kernel coefficient for ‘rbf’ and ‘poly’.<br>if gamma='scale' (default) is passed then it uses 1 / (n_features \* X.var()) as value of gamma,<br>if ‘auto’, uses 1 / n_features. |
|
||||||
|
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\*. max_samples is incompatible with 'ovo' multiclass_strategy |
|
||||||
|
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
|
||||||
|
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
|
||||||
|
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
|
||||||
|
| | splitter | {"best", "random", "mutual"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **“best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **“random”**: The algorithm generates 5 candidates and choose one randomly. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label |
|
||||||
|
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
|
||||||
|
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest |
|
||||||
|
|
||||||
|
\* Hyperparameter used by the support vector classifier of every node
|
||||||
|
|
||||||
|
\*\* **Splitting in a STree node**
|
||||||
|
|
||||||
|
The decision function is applied to the dataset and distances from samples to hyperplanes are computed in a matrix. This matrix has as many columns as classes the samples belongs to (if more than two, i.e. multiclass classification) or 1 column if it's a binary class dataset. In binary classification only one hyperplane is computed and therefore only one column is needed to store the distances of the samples to it. If three or more classes are present in the dataset we need as many hyperplanes as classes are there, and therefore one column per hyperplane is needed.
|
||||||
|
|
||||||
|
In case of multiclass classification we have to decide which column take into account to make the split, that depends on hyperparameter _split_criteria_, if "impurity" is chosen then STree computes information gain of every split candidate using each column and chooses the one that maximize the information gain, otherwise STree choses the column with more samples with a predicted class (the column with more positive numbers in it).
|
||||||
|
|
||||||
|
Once we have the column to take into account for the split, the algorithm splits samples with positive distances to hyperplane from the rest.
|
15
docs/source/index.rst
Normal file
15
docs/source/index.rst
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
Welcome to STree's documentation!
|
||||||
|
=================================
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:caption: Contents:
|
||||||
|
:titlesonly:
|
||||||
|
|
||||||
|
|
||||||
|
stree
|
||||||
|
install
|
||||||
|
hyperparameters
|
||||||
|
example
|
||||||
|
api/index
|
||||||
|
|
||||||
|
* :ref:`genindex`
|
16
docs/source/install.rst
Normal file
16
docs/source/install.rst
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
Install
|
||||||
|
=======
|
||||||
|
|
||||||
|
The main stable release
|
||||||
|
|
||||||
|
``pip install stree``
|
||||||
|
|
||||||
|
or the last development branch
|
||||||
|
|
||||||
|
``pip install git+https://github.com/doctorado-ml/stree``
|
||||||
|
|
||||||
|
Tests
|
||||||
|
*****
|
||||||
|
|
||||||
|
|
||||||
|
``python -m unittest -v stree.tests``
|
13
docs/source/stree.md
Normal file
13
docs/source/stree.md
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
# Stree
|
||||||
|
|
||||||
|
[](https://app.codeship.com/projects/399170)
|
||||||
|
[](https://codecov.io/gh/doctorado-ml/stree)
|
||||||
|
[](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
|
||||||
|
|
||||||
|
Oblique Tree classifier based on SVM nodes. The nodes are built and splitted with sklearn SVC models. Stree is a sklearn estimator and can be integrated in pipelines, grid searches, etc.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
STree is [MIT](https://github.com/doctorado-ml/stree/blob/master/LICENSE) licensed
|
29
main.py
29
main.py
@@ -1,29 +0,0 @@
|
|||||||
import time
|
|
||||||
from sklearn.model_selection import train_test_split
|
|
||||||
from sklearn.datasets import load_iris
|
|
||||||
from stree import Stree
|
|
||||||
|
|
||||||
random_state = 1
|
|
||||||
|
|
||||||
X, y = load_iris(return_X_y=True)
|
|
||||||
|
|
||||||
Xtrain, Xtest, ytrain, ytest = train_test_split(
|
|
||||||
X, y, test_size=0.3, random_state=random_state
|
|
||||||
)
|
|
||||||
|
|
||||||
now = time.time()
|
|
||||||
print("Predicting with max_features=sqrt(n_features)")
|
|
||||||
clf = Stree(C=0.01, random_state=random_state, max_features="auto")
|
|
||||||
clf.fit(Xtrain, ytrain)
|
|
||||||
print(f"Took {time.time() - now:.2f} seconds to train")
|
|
||||||
print(clf)
|
|
||||||
print(f"Classifier's accuracy (train): {clf.score(Xtrain, ytrain):.4f}")
|
|
||||||
print(f"Classifier's accuracy (test) : {clf.score(Xtest, ytest):.4f}")
|
|
||||||
print("=" * 40)
|
|
||||||
print("Predicting with max_features=n_features")
|
|
||||||
clf = Stree(C=0.01, random_state=random_state)
|
|
||||||
clf.fit(Xtrain, ytrain)
|
|
||||||
print(f"Took {time.time() - now:.2f} seconds to train")
|
|
||||||
print(clf)
|
|
||||||
print(f"Classifier's accuracy (train): {clf.score(Xtrain, ytrain):.4f}")
|
|
||||||
print(f"Classifier's accuracy (test) : {clf.score(Xtest, ytest):.4f}")
|
|
23
setup.py
23
setup.py
@@ -1,7 +1,5 @@
|
|||||||
import setuptools
|
import setuptools
|
||||||
|
import stree
|
||||||
__version__ = "1.0rc1"
|
|
||||||
__author__ = "Ricardo Montañana Gómez"
|
|
||||||
|
|
||||||
|
|
||||||
def readme():
|
def readme():
|
||||||
@@ -9,22 +7,27 @@ def readme():
|
|||||||
return f.read()
|
return f.read()
|
||||||
|
|
||||||
|
|
||||||
|
VERSION = stree.__version__
|
||||||
setuptools.setup(
|
setuptools.setup(
|
||||||
name="STree",
|
name="STree",
|
||||||
version=__version__,
|
version=stree.__version__,
|
||||||
license="MIT License",
|
license=stree.__license__,
|
||||||
description="Oblique decision tree with svm nodes",
|
description="Oblique decision tree with svm nodes",
|
||||||
long_description=readme(),
|
long_description=readme(),
|
||||||
long_description_content_type="text/markdown",
|
long_description_content_type="text/markdown",
|
||||||
packages=setuptools.find_packages(),
|
packages=setuptools.find_packages(),
|
||||||
url="https://github.com/doctorado-ml/stree",
|
url="https://github.com/Doctorado-ML/STree#stree",
|
||||||
author=__author__,
|
project_urls={
|
||||||
author_email="ricardo.montanana@alu.uclm.es",
|
"Code": "https://github.com/Doctorado-ML/STree",
|
||||||
|
"Documentation": "https://stree.readthedocs.io/en/latest/index.html",
|
||||||
|
},
|
||||||
|
author=stree.__author__,
|
||||||
|
author_email=stree.__author_email__,
|
||||||
keywords="scikit-learn oblique-classifier oblique-decision-tree decision-\
|
keywords="scikit-learn oblique-classifier oblique-decision-tree decision-\
|
||||||
tree svm svc",
|
tree svm svc",
|
||||||
classifiers=[
|
classifiers=[
|
||||||
"Development Status :: 4 - Beta",
|
"Development Status :: 5 - Production/Stable",
|
||||||
"License :: OSI Approved :: MIT License",
|
"License :: OSI Approved :: " + stree.__license__,
|
||||||
"Programming Language :: Python :: 3.8",
|
"Programming Language :: Python :: 3.8",
|
||||||
"Natural Language :: English",
|
"Natural Language :: English",
|
||||||
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
||||||
|
199
stree/Strees.py
199
stree/Strees.py
@@ -1,9 +1,5 @@
|
|||||||
"""
|
"""
|
||||||
__author__ = "Ricardo Montañana Gómez"
|
Oblique decision tree classifier based on SVM nodes
|
||||||
__copyright__ = "Copyright 2020, Ricardo Montañana Gómez"
|
|
||||||
__license__ = "MIT"
|
|
||||||
__version__ = "0.9"
|
|
||||||
Build an oblique tree classifier based on SVM nodes
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
import os
|
||||||
@@ -15,8 +11,8 @@ from typing import Optional
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
from sklearn.base import BaseEstimator, ClassifierMixin
|
from sklearn.base import BaseEstimator, ClassifierMixin
|
||||||
from sklearn.svm import SVC, LinearSVC
|
from sklearn.svm import SVC, LinearSVC
|
||||||
|
from sklearn.feature_selection import SelectKBest, mutual_info_classif
|
||||||
from sklearn.preprocessing import StandardScaler
|
from sklearn.preprocessing import StandardScaler
|
||||||
from sklearn.utils import check_consistent_length
|
|
||||||
from sklearn.utils.multiclass import check_classification_targets
|
from sklearn.utils.multiclass import check_classification_targets
|
||||||
from sklearn.exceptions import ConvergenceWarning
|
from sklearn.exceptions import ConvergenceWarning
|
||||||
from sklearn.utils.validation import (
|
from sklearn.utils.validation import (
|
||||||
@@ -25,7 +21,6 @@ from sklearn.utils.validation import (
|
|||||||
check_is_fitted,
|
check_is_fitted,
|
||||||
_check_sample_weight,
|
_check_sample_weight,
|
||||||
)
|
)
|
||||||
from sklearn.metrics._classification import _weighted_sum, _check_targets
|
|
||||||
|
|
||||||
|
|
||||||
class Snode:
|
class Snode:
|
||||||
@@ -146,12 +141,11 @@ class Snode:
|
|||||||
f"{self._belief: .6f} impurity={self._impurity:.4f} "
|
f"{self._belief: .6f} impurity={self._impurity:.4f} "
|
||||||
f"counts={count_values}"
|
f"counts={count_values}"
|
||||||
)
|
)
|
||||||
else:
|
return (
|
||||||
return (
|
f"{self._title} feaures={self._features} impurity="
|
||||||
f"{self._title} feaures={self._features} impurity="
|
f"{self._impurity:.4f} "
|
||||||
f"{self._impurity:.4f} "
|
f"counts={count_values}"
|
||||||
f"counts={count_values}"
|
)
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class Siterator:
|
class Siterator:
|
||||||
@@ -161,6 +155,10 @@ class Siterator:
|
|||||||
self._stack = []
|
self._stack = []
|
||||||
self._push(tree)
|
self._push(tree)
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
# To complete the iterator interface
|
||||||
|
return self
|
||||||
|
|
||||||
def _push(self, node: Snode):
|
def _push(self, node: Snode):
|
||||||
if node is not None:
|
if node is not None:
|
||||||
self._stack.append(node)
|
self._stack.append(node)
|
||||||
@@ -179,7 +177,7 @@ class Splitter:
|
|||||||
self,
|
self,
|
||||||
clf: SVC = None,
|
clf: SVC = None,
|
||||||
criterion: str = None,
|
criterion: str = None,
|
||||||
splitter_type: str = None,
|
feature_select: str = None,
|
||||||
criteria: str = None,
|
criteria: str = None,
|
||||||
min_samples_split: int = None,
|
min_samples_split: int = None,
|
||||||
random_state=None,
|
random_state=None,
|
||||||
@@ -192,7 +190,7 @@ class Splitter:
|
|||||||
self._criterion = criterion
|
self._criterion = criterion
|
||||||
self._min_samples_split = min_samples_split
|
self._min_samples_split = min_samples_split
|
||||||
self._criteria = criteria
|
self._criteria = criteria
|
||||||
self._splitter_type = splitter_type
|
self._feature_select = feature_select
|
||||||
self._normalize = normalize
|
self._normalize = normalize
|
||||||
|
|
||||||
if clf is None:
|
if clf is None:
|
||||||
@@ -211,9 +209,10 @@ class Splitter:
|
|||||||
f"criteria has to be max_samples or impurity; got ({criteria})"
|
f"criteria has to be max_samples or impurity; got ({criteria})"
|
||||||
)
|
)
|
||||||
|
|
||||||
if splitter_type not in ["random", "best"]:
|
if feature_select not in ["random", "best", "mutual"]:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"splitter must be either random or best, got({splitter_type})"
|
"splitter must be in {random, best, mutual} got "
|
||||||
|
f"({feature_select})"
|
||||||
)
|
)
|
||||||
self.criterion_function = getattr(self, f"_{self._criterion}")
|
self.criterion_function = getattr(self, f"_{self._criterion}")
|
||||||
self.decision_criteria = getattr(self, f"_{self._criteria}")
|
self.decision_criteria = getattr(self, f"_{self._criteria}")
|
||||||
@@ -296,6 +295,23 @@ class Splitter:
|
|||||||
def _select_best_set(
|
def _select_best_set(
|
||||||
self, dataset: np.array, labels: np.array, features_sets: list
|
self, dataset: np.array, labels: np.array, features_sets: list
|
||||||
) -> list:
|
) -> list:
|
||||||
|
"""Return the best set of features among feature_sets, the criterion is
|
||||||
|
the information gain
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
dataset : np.array
|
||||||
|
array of samples (# samples, # features)
|
||||||
|
labels : np.array
|
||||||
|
array of labels
|
||||||
|
features_sets : list
|
||||||
|
list of features sets to check
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
list
|
||||||
|
best feature set
|
||||||
|
"""
|
||||||
max_gain = 0
|
max_gain = 0
|
||||||
selected = None
|
selected = None
|
||||||
warnings.filterwarnings("ignore", category=ConvergenceWarning)
|
warnings.filterwarnings("ignore", category=ConvergenceWarning)
|
||||||
@@ -330,13 +346,10 @@ class Splitter:
|
|||||||
"""
|
"""
|
||||||
comb = set()
|
comb = set()
|
||||||
# Generate at most 5 combinations
|
# Generate at most 5 combinations
|
||||||
if max_features == features:
|
number = factorial(features) / (
|
||||||
set_length = 1
|
factorial(max_features) * factorial(features - max_features)
|
||||||
else:
|
)
|
||||||
number = factorial(features) / (
|
set_length = min(5, number)
|
||||||
factorial(max_features) * factorial(features - max_features)
|
|
||||||
)
|
|
||||||
set_length = min(5, number)
|
|
||||||
while len(comb) < set_length:
|
while len(comb) < set_length:
|
||||||
comb.add(
|
comb.add(
|
||||||
tuple(sorted(random.sample(range(features), max_features)))
|
tuple(sorted(random.sample(range(features), max_features)))
|
||||||
@@ -345,9 +358,9 @@ class Splitter:
|
|||||||
|
|
||||||
def _get_subspaces_set(
|
def _get_subspaces_set(
|
||||||
self, dataset: np.array, labels: np.array, max_features: int
|
self, dataset: np.array, labels: np.array, max_features: int
|
||||||
) -> np.array:
|
) -> tuple:
|
||||||
"""Compute the indices of the features selected by splitter depending
|
"""Compute the indices of the features selected by splitter depending
|
||||||
on the self._splitter_type hyper parameter
|
on the self._feature_select hyper parameter
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
@@ -361,23 +374,37 @@ class Splitter:
|
|||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
np.array
|
tuple
|
||||||
indices of the features selected
|
indices of the features selected
|
||||||
"""
|
"""
|
||||||
features_sets = self._generate_spaces(dataset.shape[1], max_features)
|
# No feature reduction
|
||||||
if len(features_sets) > 1:
|
if dataset.shape[1] == max_features:
|
||||||
if self._splitter_type == "random":
|
return tuple(range(dataset.shape[1]))
|
||||||
index = random.randint(0, len(features_sets) - 1)
|
# Random feature reduction
|
||||||
return features_sets[index]
|
if self._feature_select == "random":
|
||||||
else:
|
features_sets = self._generate_spaces(
|
||||||
return self._select_best_set(dataset, labels, features_sets)
|
dataset.shape[1], max_features
|
||||||
else:
|
)
|
||||||
return features_sets[0]
|
return self._select_best_set(dataset, labels, features_sets)
|
||||||
|
# return the KBest features
|
||||||
|
if self._feature_select == "best":
|
||||||
|
return (
|
||||||
|
SelectKBest(k=max_features)
|
||||||
|
.fit(dataset, labels)
|
||||||
|
.get_support(indices=True)
|
||||||
|
)
|
||||||
|
# return best features with mutual info with the label
|
||||||
|
feature_list = mutual_info_classif(dataset, labels)
|
||||||
|
return tuple(
|
||||||
|
sorted(
|
||||||
|
range(len(feature_list)), key=lambda sub: feature_list[sub]
|
||||||
|
)[-max_features:]
|
||||||
|
)
|
||||||
|
|
||||||
def get_subspace(
|
def get_subspace(
|
||||||
self, dataset: np.array, labels: np.array, max_features: int
|
self, dataset: np.array, labels: np.array, max_features: int
|
||||||
) -> tuple:
|
) -> tuple:
|
||||||
"""Return a subspace of the selected dataset of max_features length.
|
"""Re3turn a subspace of the selected dataset of max_features length.
|
||||||
Depending on hyperparmeter
|
Depending on hyperparmeter
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
@@ -447,6 +474,15 @@ class Splitter:
|
|||||||
def partition(self, samples: np.array, node: Snode, train: bool):
|
def partition(self, samples: np.array, node: Snode, train: bool):
|
||||||
"""Set the criteria to split arrays. Compute the indices of the samples
|
"""Set the criteria to split arrays. Compute the indices of the samples
|
||||||
that should go to one side of the tree (up)
|
that should go to one side of the tree (up)
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
samples : np.array
|
||||||
|
array of samples (# samples, # features)
|
||||||
|
node : Snode
|
||||||
|
Node of the tree where partition is going to be made
|
||||||
|
train : bool
|
||||||
|
Train time - True / Test time - False
|
||||||
"""
|
"""
|
||||||
# data contains the distances of every sample to every class hyperplane
|
# data contains the distances of every sample to every class hyperplane
|
||||||
# array of (m, nc) nc = # classes
|
# array of (m, nc) nc = # classes
|
||||||
@@ -538,6 +574,7 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
min_samples_split: int = 0,
|
min_samples_split: int = 0,
|
||||||
max_features=None,
|
max_features=None,
|
||||||
splitter: str = "random",
|
splitter: str = "random",
|
||||||
|
multiclass_strategy: str = "ovo",
|
||||||
normalize: bool = False,
|
normalize: bool = False,
|
||||||
):
|
):
|
||||||
self.max_iter = max_iter
|
self.max_iter = max_iter
|
||||||
@@ -554,6 +591,7 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
self.criterion = criterion
|
self.criterion = criterion
|
||||||
self.splitter = splitter
|
self.splitter = splitter
|
||||||
self.normalize = normalize
|
self.normalize = normalize
|
||||||
|
self.multiclass_strategy = multiclass_strategy
|
||||||
|
|
||||||
def _more_tags(self) -> dict:
|
def _more_tags(self) -> dict:
|
||||||
"""Required by sklearn to supply features of the classifier
|
"""Required by sklearn to supply features of the classifier
|
||||||
@@ -598,7 +636,25 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
f"Maximum depth has to be greater than 1... got (max_depth=\
|
f"Maximum depth has to be greater than 1... got (max_depth=\
|
||||||
{self.max_depth})"
|
{self.max_depth})"
|
||||||
)
|
)
|
||||||
|
if self.multiclass_strategy not in ["ovr", "ovo"]:
|
||||||
|
raise ValueError(
|
||||||
|
"mutliclass_strategy has to be either ovr or ovo"
|
||||||
|
f" but got {self.multiclass_strategy}"
|
||||||
|
)
|
||||||
|
if self.multiclass_strategy == "ovo":
|
||||||
|
if self.kernel == "liblinear":
|
||||||
|
raise ValueError(
|
||||||
|
"The kernel liblinear is incompatible with ovo "
|
||||||
|
"multiclass_strategy"
|
||||||
|
)
|
||||||
|
if self.split_criteria == "max_samples":
|
||||||
|
raise ValueError(
|
||||||
|
"The multiclass_strategy 'ovo' is incompatible with "
|
||||||
|
"split_criteria 'max_samples'"
|
||||||
|
)
|
||||||
|
kernels = ["liblinear", "linear", "rbf", "poly", "sigmoid"]
|
||||||
|
if self.kernel not in kernels:
|
||||||
|
raise ValueError(f"Kernel {self.kernel} not in {kernels}")
|
||||||
check_classification_targets(y)
|
check_classification_targets(y)
|
||||||
X, y = check_X_y(X, y)
|
X, y = check_X_y(X, y)
|
||||||
sample_weight = _check_sample_weight(
|
sample_weight = _check_sample_weight(
|
||||||
@@ -613,7 +669,7 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
self.splitter_ = Splitter(
|
self.splitter_ = Splitter(
|
||||||
clf=self._build_clf(),
|
clf=self._build_clf(),
|
||||||
criterion=self.criterion,
|
criterion=self.criterion,
|
||||||
splitter_type=self.splitter,
|
feature_select=self.splitter,
|
||||||
criteria=self.split_criteria,
|
criteria=self.split_criteria,
|
||||||
random_state=self.random_state,
|
random_state=self.random_state,
|
||||||
min_samples_split=self.min_samples_split,
|
min_samples_split=self.min_samples_split,
|
||||||
@@ -628,13 +684,12 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
self.n_features_ = X.shape[1]
|
self.n_features_ = X.shape[1]
|
||||||
self.n_features_in_ = X.shape[1]
|
self.n_features_in_ = X.shape[1]
|
||||||
self.max_features_ = self._initialize_max_features()
|
self.max_features_ = self._initialize_max_features()
|
||||||
self.tree_ = self.train(X, y, sample_weight, 1, "root")
|
self.tree_ = self._train(X, y, sample_weight, 1, "root")
|
||||||
self._build_predictor()
|
|
||||||
self.X_ = X
|
self.X_ = X
|
||||||
self.y_ = y
|
self.y_ = y
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def train(
|
def _train(
|
||||||
self,
|
self,
|
||||||
X: np.ndarray,
|
X: np.ndarray,
|
||||||
y: np.ndarray,
|
y: np.ndarray,
|
||||||
@@ -677,6 +732,7 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
if np.unique(y).shape[0] == 1:
|
if np.unique(y).shape[0] == 1:
|
||||||
# only 1 class => pure dataset
|
# only 1 class => pure dataset
|
||||||
node.set_title(title + ", <pure>")
|
node.set_title(title + ", <pure>")
|
||||||
|
node.make_predictor()
|
||||||
return node
|
return node
|
||||||
# Train the model
|
# Train the model
|
||||||
clf = self._build_clf()
|
clf = self._build_clf()
|
||||||
@@ -695,31 +751,20 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
if X_U is None or X_D is None:
|
if X_U is None or X_D is None:
|
||||||
# didn't part anything
|
# didn't part anything
|
||||||
node.set_title(title + ", <cgaf>")
|
node.set_title(title + ", <cgaf>")
|
||||||
|
node.make_predictor()
|
||||||
return node
|
return node
|
||||||
node.set_up(
|
node.set_up(
|
||||||
self.train(X_U, y_u, sw_u, depth + 1, title + f" - Up({depth+1})")
|
self._train(X_U, y_u, sw_u, depth + 1, title + f" - Up({depth+1})")
|
||||||
)
|
)
|
||||||
node.set_down(
|
node.set_down(
|
||||||
self.train(
|
self._train(
|
||||||
X_D, y_d, sw_d, depth + 1, title + f" - Down({depth+1})"
|
X_D, y_d, sw_d, depth + 1, title + f" - Down({depth+1})"
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
return node
|
return node
|
||||||
|
|
||||||
def _build_predictor(self):
|
|
||||||
"""Process the leaves to make them predictors"""
|
|
||||||
|
|
||||||
def run_tree(node: Snode):
|
|
||||||
if node.is_leaf():
|
|
||||||
node.make_predictor()
|
|
||||||
return
|
|
||||||
run_tree(node.get_down())
|
|
||||||
run_tree(node.get_up())
|
|
||||||
|
|
||||||
run_tree(self.tree_)
|
|
||||||
|
|
||||||
def _build_clf(self):
|
def _build_clf(self):
|
||||||
"""Build the correct classifier for the node"""
|
"""Build the right classifier for the node"""
|
||||||
return (
|
return (
|
||||||
LinearSVC(
|
LinearSVC(
|
||||||
max_iter=self.max_iter,
|
max_iter=self.max_iter,
|
||||||
@@ -727,7 +772,7 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
C=self.C,
|
C=self.C,
|
||||||
tol=self.tol,
|
tol=self.tol,
|
||||||
)
|
)
|
||||||
if self.kernel == "linear"
|
if self.kernel == "liblinear"
|
||||||
else SVC(
|
else SVC(
|
||||||
kernel=self.kernel,
|
kernel=self.kernel,
|
||||||
max_iter=self.max_iter,
|
max_iter=self.max_iter,
|
||||||
@@ -735,6 +780,8 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
C=self.C,
|
C=self.C,
|
||||||
gamma=self.gamma,
|
gamma=self.gamma,
|
||||||
degree=self.degree,
|
degree=self.degree,
|
||||||
|
random_state=self.random_state,
|
||||||
|
decision_function_shape=self.multiclass_strategy,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -816,36 +863,6 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
)
|
)
|
||||||
return self.classes_[result]
|
return self.classes_[result]
|
||||||
|
|
||||||
def score(
|
|
||||||
self, X: np.array, y: np.array, sample_weight: np.array = None
|
|
||||||
) -> float:
|
|
||||||
"""Compute accuracy of the prediction
|
|
||||||
|
|
||||||
Parameters
|
|
||||||
----------
|
|
||||||
X : np.array
|
|
||||||
dataset of samples to make predictions
|
|
||||||
y : np.array
|
|
||||||
samples labels
|
|
||||||
sample_weight : np.array, optional
|
|
||||||
weights of the samples. Rescale C per sample, by default None
|
|
||||||
|
|
||||||
Returns
|
|
||||||
-------
|
|
||||||
float
|
|
||||||
accuracy of the prediction
|
|
||||||
"""
|
|
||||||
# sklearn check
|
|
||||||
check_is_fitted(self)
|
|
||||||
check_classification_targets(y)
|
|
||||||
X, y = check_X_y(X, y)
|
|
||||||
y_pred = self.predict(X).reshape(y.shape)
|
|
||||||
# Compute accuracy for each possible representation
|
|
||||||
_, y_true, y_pred = _check_targets(y, y_pred)
|
|
||||||
check_consistent_length(y_true, y_pred, sample_weight)
|
|
||||||
score = y_true == y_pred
|
|
||||||
return _weighted_sum(score, sample_weight, normalize=True)
|
|
||||||
|
|
||||||
def nodes_leaves(self) -> tuple:
|
def nodes_leaves(self) -> tuple:
|
||||||
"""Compute the number of nodes and leaves in the built tree
|
"""Compute the number of nodes and leaves in the built tree
|
||||||
|
|
||||||
@@ -907,6 +924,12 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
elif self.max_features is None:
|
elif self.max_features is None:
|
||||||
max_features = self.n_features_
|
max_features = self.n_features_
|
||||||
elif isinstance(self.max_features, numbers.Integral):
|
elif isinstance(self.max_features, numbers.Integral):
|
||||||
|
if self.max_features > self.n_features_:
|
||||||
|
raise ValueError(
|
||||||
|
"Invalid value for max_features. "
|
||||||
|
"It can not be greater than number of features "
|
||||||
|
f"({self.n_features_})"
|
||||||
|
)
|
||||||
max_features = self.max_features
|
max_features = self.max_features
|
||||||
else: # float
|
else: # float
|
||||||
if self.max_features > 0.0:
|
if self.max_features > 0.0:
|
||||||
|
@@ -1,3 +1,10 @@
|
|||||||
from .Strees import Stree, Snode, Siterator, Splitter
|
from .Strees import Stree, Snode, Siterator, Splitter
|
||||||
|
|
||||||
|
__version__ = "1.1"
|
||||||
|
|
||||||
|
__author__ = "Ricardo Montañana Gómez"
|
||||||
|
__copyright__ = "Copyright 2020-2021, Ricardo Montañana Gómez"
|
||||||
|
__license__ = "MIT License"
|
||||||
|
__author_email__ = "ricardo.montanana@alu.uclm.es"
|
||||||
|
|
||||||
__all__ = ["Stree", "Snode", "Siterator", "Splitter"]
|
__all__ = ["Stree", "Snode", "Siterator", "Splitter"]
|
||||||
|
@@ -8,7 +8,11 @@ from .utils import load_dataset
|
|||||||
class Snode_test(unittest.TestCase):
|
class Snode_test(unittest.TestCase):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
self._random_state = 1
|
self._random_state = 1
|
||||||
self._clf = Stree(random_state=self._random_state)
|
self._clf = Stree(
|
||||||
|
random_state=self._random_state,
|
||||||
|
kernel="liblinear",
|
||||||
|
multiclass_strategy="ovr",
|
||||||
|
)
|
||||||
self._clf.fit(*load_dataset(self._random_state))
|
self._clf.fit(*load_dataset(self._random_state))
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
@@ -6,6 +6,7 @@ import numpy as np
|
|||||||
from sklearn.svm import SVC
|
from sklearn.svm import SVC
|
||||||
from sklearn.datasets import load_wine, load_iris
|
from sklearn.datasets import load_wine, load_iris
|
||||||
from stree import Splitter
|
from stree import Splitter
|
||||||
|
from .utils import load_dataset
|
||||||
|
|
||||||
|
|
||||||
class Splitter_test(unittest.TestCase):
|
class Splitter_test(unittest.TestCase):
|
||||||
@@ -17,7 +18,7 @@ class Splitter_test(unittest.TestCase):
|
|||||||
def build(
|
def build(
|
||||||
clf=SVC,
|
clf=SVC,
|
||||||
min_samples_split=0,
|
min_samples_split=0,
|
||||||
splitter_type="random",
|
feature_select="random",
|
||||||
criterion="gini",
|
criterion="gini",
|
||||||
criteria="max_samples",
|
criteria="max_samples",
|
||||||
random_state=None,
|
random_state=None,
|
||||||
@@ -25,7 +26,7 @@ class Splitter_test(unittest.TestCase):
|
|||||||
return Splitter(
|
return Splitter(
|
||||||
clf=clf(random_state=random_state, kernel="rbf"),
|
clf=clf(random_state=random_state, kernel="rbf"),
|
||||||
min_samples_split=min_samples_split,
|
min_samples_split=min_samples_split,
|
||||||
splitter_type=splitter_type,
|
feature_select=feature_select,
|
||||||
criterion=criterion,
|
criterion=criterion,
|
||||||
criteria=criteria,
|
criteria=criteria,
|
||||||
random_state=random_state,
|
random_state=random_state,
|
||||||
@@ -39,20 +40,20 @@ class Splitter_test(unittest.TestCase):
|
|||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
self.build(criterion="duck")
|
self.build(criterion="duck")
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
self.build(splitter_type="duck")
|
self.build(feature_select="duck")
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
self.build(criteria="duck")
|
self.build(criteria="duck")
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
_ = Splitter(clf=None)
|
_ = Splitter(clf=None)
|
||||||
for splitter_type in ["best", "random"]:
|
for feature_select in ["best", "random"]:
|
||||||
for criterion in ["gini", "entropy"]:
|
for criterion in ["gini", "entropy"]:
|
||||||
for criteria in ["max_samples", "impurity"]:
|
for criteria in ["max_samples", "impurity"]:
|
||||||
tcl = self.build(
|
tcl = self.build(
|
||||||
splitter_type=splitter_type,
|
feature_select=feature_select,
|
||||||
criterion=criterion,
|
criterion=criterion,
|
||||||
criteria=criteria,
|
criteria=criteria,
|
||||||
)
|
)
|
||||||
self.assertEqual(splitter_type, tcl._splitter_type)
|
self.assertEqual(feature_select, tcl._feature_select)
|
||||||
self.assertEqual(criterion, tcl._criterion)
|
self.assertEqual(criterion, tcl._criterion)
|
||||||
self.assertEqual(criteria, tcl._criteria)
|
self.assertEqual(criteria, tcl._criteria)
|
||||||
|
|
||||||
@@ -177,32 +178,38 @@ class Splitter_test(unittest.TestCase):
|
|||||||
def test_best_splitter_few_sets(self):
|
def test_best_splitter_few_sets(self):
|
||||||
X, y = load_iris(return_X_y=True)
|
X, y = load_iris(return_X_y=True)
|
||||||
X = np.delete(X, 3, 1)
|
X = np.delete(X, 3, 1)
|
||||||
tcl = self.build(splitter_type="best", random_state=self._random_state)
|
tcl = self.build(
|
||||||
|
feature_select="best", random_state=self._random_state
|
||||||
|
)
|
||||||
dataset, computed = tcl.get_subspace(X, y, max_features=2)
|
dataset, computed = tcl.get_subspace(X, y, max_features=2)
|
||||||
self.assertListEqual([0, 2], list(computed))
|
self.assertListEqual([0, 2], list(computed))
|
||||||
self.assertListEqual(X[:, computed].tolist(), dataset.tolist())
|
self.assertListEqual(X[:, computed].tolist(), dataset.tolist())
|
||||||
|
|
||||||
def test_splitter_parameter(self):
|
def test_splitter_parameter(self):
|
||||||
expected_values = [
|
expected_values = [
|
||||||
[1, 4, 9, 12], # best entropy max_samples
|
[0, 6, 11, 12], # best entropy max_samples
|
||||||
[1, 3, 6, 10], # best entropy impurity
|
[0, 6, 11, 12], # best entropy impurity
|
||||||
[6, 8, 10, 12], # best gini max_samples
|
[0, 6, 11, 12], # best gini max_samples
|
||||||
[7, 8, 10, 11], # best gini impurity
|
[0, 6, 11, 12], # best gini impurity
|
||||||
[0, 3, 8, 12], # random entropy max_samples
|
[0, 3, 8, 12], # random entropy max_samples
|
||||||
[0, 3, 9, 11], # random entropy impurity
|
[0, 3, 7, 12], # random entropy impurity
|
||||||
[0, 4, 7, 12], # random gini max_samples
|
[1, 7, 9, 12], # random gini max_samples
|
||||||
[0, 2, 5, 6], # random gini impurity
|
[1, 5, 8, 12], # random gini impurity
|
||||||
|
[6, 9, 11, 12], # mutual entropy max_samples
|
||||||
|
[6, 9, 11, 12], # mutual entropy impurity
|
||||||
|
[6, 9, 11, 12], # mutual gini max_samples
|
||||||
|
[6, 9, 11, 12], # mutual gini impurity
|
||||||
]
|
]
|
||||||
X, y = load_wine(return_X_y=True)
|
X, y = load_wine(return_X_y=True)
|
||||||
rn = 0
|
rn = 0
|
||||||
for splitter_type in ["best", "random"]:
|
for feature_select in ["best", "random", "mutual"]:
|
||||||
for criterion in ["entropy", "gini"]:
|
for criterion in ["entropy", "gini"]:
|
||||||
for criteria in [
|
for criteria in [
|
||||||
"max_samples",
|
"max_samples",
|
||||||
"impurity",
|
"impurity",
|
||||||
]:
|
]:
|
||||||
tcl = self.build(
|
tcl = self.build(
|
||||||
splitter_type=splitter_type,
|
feature_select=feature_select,
|
||||||
criterion=criterion,
|
criterion=criterion,
|
||||||
criteria=criteria,
|
criteria=criteria,
|
||||||
)
|
)
|
||||||
@@ -213,12 +220,27 @@ class Splitter_test(unittest.TestCase):
|
|||||||
# print(
|
# print(
|
||||||
# "{}, # {:7s}{:8s}{:15s}".format(
|
# "{}, # {:7s}{:8s}{:15s}".format(
|
||||||
# list(computed),
|
# list(computed),
|
||||||
# splitter_type,
|
# feature_select,
|
||||||
# criterion,
|
# criterion,
|
||||||
# criteria,
|
# criteria,
|
||||||
# )
|
# )
|
||||||
# )
|
# )
|
||||||
self.assertListEqual(expected, list(computed))
|
self.assertListEqual(expected, sorted(list(computed)))
|
||||||
self.assertListEqual(
|
self.assertListEqual(
|
||||||
X[:, computed].tolist(), dataset.tolist()
|
X[:, computed].tolist(), dataset.tolist()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def test_get_best_subspaces(self):
|
||||||
|
results = [
|
||||||
|
(4, [3, 4, 11, 13]),
|
||||||
|
(7, [1, 3, 4, 5, 11, 13, 16]),
|
||||||
|
(9, [1, 3, 4, 5, 7, 10, 11, 13, 16]),
|
||||||
|
]
|
||||||
|
X, y = load_dataset(n_features=20)
|
||||||
|
for k, expected in results:
|
||||||
|
tcl = self.build(
|
||||||
|
feature_select="best",
|
||||||
|
)
|
||||||
|
Xs, computed = tcl.get_subspace(X, y, k)
|
||||||
|
self.assertListEqual(expected, list(computed))
|
||||||
|
self.assertListEqual(X[:, expected].tolist(), Xs.tolist())
|
||||||
|
@@ -14,13 +14,27 @@ from .utils import load_dataset
|
|||||||
class Stree_test(unittest.TestCase):
|
class Stree_test(unittest.TestCase):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
self._random_state = 1
|
self._random_state = 1
|
||||||
self._kernels = ["linear", "rbf", "poly"]
|
self._kernels = ["liblinear", "linear", "rbf", "poly", "sigmoid"]
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def setUp(cls):
|
def setUp(cls):
|
||||||
os.environ["TESTING"] = "1"
|
os.environ["TESTING"] = "1"
|
||||||
|
|
||||||
|
def test_valid_kernels(self):
|
||||||
|
X, y = load_dataset()
|
||||||
|
for kernel in self._kernels:
|
||||||
|
clf = Stree(kernel=kernel, multiclass_strategy="ovr")
|
||||||
|
clf.fit(X, y)
|
||||||
|
self.assertIsNotNone(clf.tree_)
|
||||||
|
|
||||||
|
def test_bogus_kernel(self):
|
||||||
|
kernel = "other"
|
||||||
|
X, y = load_dataset()
|
||||||
|
clf = Stree(kernel=kernel)
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
clf.fit(X, y)
|
||||||
|
|
||||||
def _check_tree(self, node: Snode):
|
def _check_tree(self, node: Snode):
|
||||||
"""Check recursively that the nodes that are not leaves have the
|
"""Check recursively that the nodes that are not leaves have the
|
||||||
correct number of labels and its sons have the right number of elements
|
correct number of labels and its sons have the right number of elements
|
||||||
@@ -40,14 +54,19 @@ class Stree_test(unittest.TestCase):
|
|||||||
# i.e. The partition algorithm didn't forget any sample
|
# i.e. The partition algorithm didn't forget any sample
|
||||||
self.assertEqual(node._y.shape[0], y_down.shape[0] + y_up.shape[0])
|
self.assertEqual(node._y.shape[0], y_down.shape[0] + y_up.shape[0])
|
||||||
unique_y, count_y = np.unique(node._y, return_counts=True)
|
unique_y, count_y = np.unique(node._y, return_counts=True)
|
||||||
_, count_d = np.unique(y_down, return_counts=True)
|
labels_d, count_d = np.unique(y_down, return_counts=True)
|
||||||
_, count_u = np.unique(y_up, return_counts=True)
|
labels_u, count_u = np.unique(y_up, return_counts=True)
|
||||||
|
dict_d = {label: count_d[i] for i, label in enumerate(labels_d)}
|
||||||
|
dict_u = {label: count_u[i] for i, label in enumerate(labels_u)}
|
||||||
#
|
#
|
||||||
for i in unique_y:
|
for i in unique_y:
|
||||||
number_up = count_u[i]
|
|
||||||
try:
|
try:
|
||||||
number_down = count_d[i]
|
number_up = dict_u[i]
|
||||||
except IndexError:
|
except KeyError:
|
||||||
|
number_up = 0
|
||||||
|
try:
|
||||||
|
number_down = dict_d[i]
|
||||||
|
except KeyError:
|
||||||
number_down = 0
|
number_down = 0
|
||||||
self.assertEqual(count_y[i], number_down + number_up)
|
self.assertEqual(count_y[i], number_down + number_up)
|
||||||
# Is the partition made the same as the prediction?
|
# Is the partition made the same as the prediction?
|
||||||
@@ -62,14 +81,22 @@ class Stree_test(unittest.TestCase):
|
|||||||
"""Check if the tree is built the same way as predictions of models"""
|
"""Check if the tree is built the same way as predictions of models"""
|
||||||
warnings.filterwarnings("ignore")
|
warnings.filterwarnings("ignore")
|
||||||
for kernel in self._kernels:
|
for kernel in self._kernels:
|
||||||
clf = Stree(kernel=kernel, random_state=self._random_state)
|
clf = Stree(
|
||||||
|
kernel="sigmoid",
|
||||||
|
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
|
||||||
|
random_state=self._random_state,
|
||||||
|
)
|
||||||
clf.fit(*load_dataset(self._random_state))
|
clf.fit(*load_dataset(self._random_state))
|
||||||
self._check_tree(clf.tree_)
|
self._check_tree(clf.tree_)
|
||||||
|
|
||||||
def test_single_prediction(self):
|
def test_single_prediction(self):
|
||||||
X, y = load_dataset(self._random_state)
|
X, y = load_dataset(self._random_state)
|
||||||
for kernel in self._kernels:
|
for kernel in self._kernels:
|
||||||
clf = Stree(kernel=kernel, random_state=self._random_state)
|
clf = Stree(
|
||||||
|
kernel=kernel,
|
||||||
|
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
|
||||||
|
random_state=self._random_state,
|
||||||
|
)
|
||||||
yp = clf.fit(X, y).predict((X[0, :].reshape(-1, X.shape[1])))
|
yp = clf.fit(X, y).predict((X[0, :].reshape(-1, X.shape[1])))
|
||||||
self.assertEqual(yp[0], y[0])
|
self.assertEqual(yp[0], y[0])
|
||||||
|
|
||||||
@@ -77,8 +104,12 @@ class Stree_test(unittest.TestCase):
|
|||||||
# First 27 elements the predictions are the same as the truth
|
# First 27 elements the predictions are the same as the truth
|
||||||
num = 27
|
num = 27
|
||||||
X, y = load_dataset(self._random_state)
|
X, y = load_dataset(self._random_state)
|
||||||
for kernel in self._kernels:
|
for kernel in ["liblinear", "linear", "rbf", "poly"]:
|
||||||
clf = Stree(kernel=kernel, random_state=self._random_state)
|
clf = Stree(
|
||||||
|
kernel=kernel,
|
||||||
|
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
|
||||||
|
random_state=self._random_state,
|
||||||
|
)
|
||||||
yp = clf.fit(X, y).predict(X[:num, :])
|
yp = clf.fit(X, y).predict(X[:num, :])
|
||||||
self.assertListEqual(y[:num].tolist(), yp.tolist())
|
self.assertListEqual(y[:num].tolist(), yp.tolist())
|
||||||
|
|
||||||
@@ -88,7 +119,11 @@ class Stree_test(unittest.TestCase):
|
|||||||
"""
|
"""
|
||||||
X, y = load_dataset(self._random_state)
|
X, y = load_dataset(self._random_state)
|
||||||
for kernel in self._kernels:
|
for kernel in self._kernels:
|
||||||
clf = Stree(kernel=kernel, random_state=self._random_state)
|
clf = Stree(
|
||||||
|
kernel=kernel,
|
||||||
|
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
|
||||||
|
random_state=self._random_state,
|
||||||
|
)
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
# Compute prediction line by line
|
# Compute prediction line by line
|
||||||
yp_line = np.array([], dtype=int)
|
yp_line = np.array([], dtype=int)
|
||||||
@@ -120,9 +155,13 @@ class Stree_test(unittest.TestCase):
|
|||||||
]
|
]
|
||||||
computed = []
|
computed = []
|
||||||
expected_string = ""
|
expected_string = ""
|
||||||
clf = Stree(kernel="linear", random_state=self._random_state)
|
clf = Stree(
|
||||||
|
kernel="liblinear",
|
||||||
|
multiclass_strategy="ovr",
|
||||||
|
random_state=self._random_state,
|
||||||
|
)
|
||||||
clf.fit(*load_dataset(self._random_state))
|
clf.fit(*load_dataset(self._random_state))
|
||||||
for node in clf:
|
for node in iter(clf):
|
||||||
computed.append(str(node))
|
computed.append(str(node))
|
||||||
expected_string += str(node) + "\n"
|
expected_string += str(node) + "\n"
|
||||||
self.assertListEqual(expected, computed)
|
self.assertListEqual(expected, computed)
|
||||||
@@ -158,7 +197,12 @@ class Stree_test(unittest.TestCase):
|
|||||||
def test_check_max_depth(self):
|
def test_check_max_depth(self):
|
||||||
depths = (3, 4)
|
depths = (3, 4)
|
||||||
for depth in depths:
|
for depth in depths:
|
||||||
tcl = Stree(random_state=self._random_state, max_depth=depth)
|
tcl = Stree(
|
||||||
|
kernel="liblinear",
|
||||||
|
multiclass_strategy="ovr",
|
||||||
|
random_state=self._random_state,
|
||||||
|
max_depth=depth,
|
||||||
|
)
|
||||||
tcl.fit(*load_dataset(self._random_state))
|
tcl.fit(*load_dataset(self._random_state))
|
||||||
self.assertEqual(depth, tcl.depth_)
|
self.assertEqual(depth, tcl.depth_)
|
||||||
|
|
||||||
@@ -179,7 +223,7 @@ class Stree_test(unittest.TestCase):
|
|||||||
for kernel in self._kernels:
|
for kernel in self._kernels:
|
||||||
clf = Stree(
|
clf = Stree(
|
||||||
kernel=kernel,
|
kernel=kernel,
|
||||||
split_criteria="max_samples",
|
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
|
||||||
random_state=self._random_state,
|
random_state=self._random_state,
|
||||||
)
|
)
|
||||||
px = [[1, 2], [5, 6], [9, 10]]
|
px = [[1, 2], [5, 6], [9, 10]]
|
||||||
@@ -190,26 +234,36 @@ class Stree_test(unittest.TestCase):
|
|||||||
self.assertListEqual(py, clf.classes_.tolist())
|
self.assertListEqual(py, clf.classes_.tolist())
|
||||||
|
|
||||||
def test_muticlass_dataset(self):
|
def test_muticlass_dataset(self):
|
||||||
|
warnings.filterwarnings("ignore", category=ConvergenceWarning)
|
||||||
|
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
||||||
datasets = {
|
datasets = {
|
||||||
"Synt": load_dataset(random_state=self._random_state, n_classes=3),
|
"Synt": load_dataset(random_state=self._random_state, n_classes=3),
|
||||||
"Iris": load_wine(return_X_y=True),
|
"Iris": load_wine(return_X_y=True),
|
||||||
}
|
}
|
||||||
outcomes = {
|
outcomes = {
|
||||||
"Synt": {
|
"Synt": {
|
||||||
"max_samples linear": 0.9606666666666667,
|
"max_samples liblinear": 0.9493333333333334,
|
||||||
"max_samples rbf": 0.7133333333333334,
|
"max_samples linear": 0.9426666666666667,
|
||||||
"max_samples poly": 0.618,
|
"max_samples rbf": 0.9606666666666667,
|
||||||
"impurity linear": 0.9606666666666667,
|
"max_samples poly": 0.9373333333333334,
|
||||||
"impurity rbf": 0.7133333333333334,
|
"max_samples sigmoid": 0.824,
|
||||||
"impurity poly": 0.618,
|
"impurity liblinear": 0.9493333333333334,
|
||||||
|
"impurity linear": 0.9426666666666667,
|
||||||
|
"impurity rbf": 0.9606666666666667,
|
||||||
|
"impurity poly": 0.9373333333333334,
|
||||||
|
"impurity sigmoid": 0.824,
|
||||||
},
|
},
|
||||||
"Iris": {
|
"Iris": {
|
||||||
|
"max_samples liblinear": 0.9550561797752809,
|
||||||
"max_samples linear": 1.0,
|
"max_samples linear": 1.0,
|
||||||
"max_samples rbf": 0.6910112359550562,
|
"max_samples rbf": 0.6685393258426966,
|
||||||
"max_samples poly": 0.6966292134831461,
|
"max_samples poly": 0.6853932584269663,
|
||||||
"impurity linear": 1,
|
"max_samples sigmoid": 0.6404494382022472,
|
||||||
"impurity rbf": 0.6910112359550562,
|
"impurity liblinear": 0.9550561797752809,
|
||||||
"impurity poly": 0.6966292134831461,
|
"impurity linear": 1.0,
|
||||||
|
"impurity rbf": 0.6685393258426966,
|
||||||
|
"impurity poly": 0.6853932584269663,
|
||||||
|
"impurity sigmoid": 0.6404494382022472,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -218,18 +272,22 @@ class Stree_test(unittest.TestCase):
|
|||||||
for criteria in ["max_samples", "impurity"]:
|
for criteria in ["max_samples", "impurity"]:
|
||||||
for kernel in self._kernels:
|
for kernel in self._kernels:
|
||||||
clf = Stree(
|
clf = Stree(
|
||||||
C=55,
|
max_iter=1e4,
|
||||||
max_iter=1e5,
|
multiclass_strategy="ovr"
|
||||||
|
if kernel == "liblinear"
|
||||||
|
else "ovo",
|
||||||
kernel=kernel,
|
kernel=kernel,
|
||||||
random_state=self._random_state,
|
random_state=self._random_state,
|
||||||
)
|
)
|
||||||
clf.fit(px, py)
|
clf.fit(px, py)
|
||||||
outcome = outcomes[name][f"{criteria} {kernel}"]
|
outcome = outcomes[name][f"{criteria} {kernel}"]
|
||||||
# print(
|
# print(f'"{criteria} {kernel}": {clf.score(px, py)},')
|
||||||
# f"{name} {criteria} {kernel} {outcome} {clf.score(px"
|
self.assertAlmostEqual(
|
||||||
# ", py)}"
|
outcome,
|
||||||
# )
|
clf.score(px, py),
|
||||||
self.assertAlmostEqual(outcome, clf.score(px, py))
|
5,
|
||||||
|
f"{name} - {criteria} - {kernel}",
|
||||||
|
)
|
||||||
|
|
||||||
def test_max_features(self):
|
def test_max_features(self):
|
||||||
n_features = 16
|
n_features = 16
|
||||||
@@ -254,6 +312,12 @@ class Stree_test(unittest.TestCase):
|
|||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
_ = clf._initialize_max_features()
|
_ = clf._initialize_max_features()
|
||||||
|
|
||||||
|
def test_wrong_max_features(self):
|
||||||
|
X, y = load_dataset(n_features=15)
|
||||||
|
clf = Stree(max_features=16)
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
clf.fit(X, y)
|
||||||
|
|
||||||
def test_get_subspaces(self):
|
def test_get_subspaces(self):
|
||||||
dataset = np.random.random((10, 16))
|
dataset = np.random.random((10, 16))
|
||||||
y = np.random.randint(0, 2, 10)
|
y = np.random.randint(0, 2, 10)
|
||||||
@@ -291,17 +355,19 @@ class Stree_test(unittest.TestCase):
|
|||||||
clf.predict(X[:, :3])
|
clf.predict(X[:, :3])
|
||||||
|
|
||||||
# Tests of score
|
# Tests of score
|
||||||
|
|
||||||
def test_score_binary(self):
|
def test_score_binary(self):
|
||||||
X, y = load_dataset(self._random_state)
|
X, y = load_dataset(self._random_state)
|
||||||
accuracies = [
|
accuracies = [
|
||||||
0.9506666666666667,
|
0.9506666666666667,
|
||||||
|
0.9493333333333334,
|
||||||
0.9606666666666667,
|
0.9606666666666667,
|
||||||
0.9433333333333334,
|
0.9433333333333334,
|
||||||
|
0.9153333333333333,
|
||||||
]
|
]
|
||||||
for kernel, accuracy_expected in zip(self._kernels, accuracies):
|
for kernel, accuracy_expected in zip(self._kernels, accuracies):
|
||||||
clf = Stree(
|
clf = Stree(
|
||||||
random_state=self._random_state,
|
random_state=self._random_state,
|
||||||
|
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
|
||||||
kernel=kernel,
|
kernel=kernel,
|
||||||
)
|
)
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
@@ -313,9 +379,14 @@ class Stree_test(unittest.TestCase):
|
|||||||
|
|
||||||
def test_score_max_features(self):
|
def test_score_max_features(self):
|
||||||
X, y = load_dataset(self._random_state)
|
X, y = load_dataset(self._random_state)
|
||||||
clf = Stree(random_state=self._random_state, max_features=2)
|
clf = Stree(
|
||||||
|
kernel="liblinear",
|
||||||
|
multiclass_strategy="ovr",
|
||||||
|
random_state=self._random_state,
|
||||||
|
max_features=2,
|
||||||
|
)
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
self.assertAlmostEqual(0.9246666666666666, clf.score(X, y))
|
self.assertAlmostEqual(0.9453333333333334, clf.score(X, y))
|
||||||
|
|
||||||
def test_bogus_splitter_parameter(self):
|
def test_bogus_splitter_parameter(self):
|
||||||
clf = Stree(splitter="duck")
|
clf = Stree(splitter="duck")
|
||||||
@@ -325,7 +396,9 @@ class Stree_test(unittest.TestCase):
|
|||||||
def test_multiclass_classifier_integrity(self):
|
def test_multiclass_classifier_integrity(self):
|
||||||
"""Checks if the multiclass operation is done right"""
|
"""Checks if the multiclass operation is done right"""
|
||||||
X, y = load_iris(return_X_y=True)
|
X, y = load_iris(return_X_y=True)
|
||||||
clf = Stree(random_state=0)
|
clf = Stree(
|
||||||
|
kernel="liblinear", multiclass_strategy="ovr", random_state=0
|
||||||
|
)
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
score = clf.score(X, y)
|
score = clf.score(X, y)
|
||||||
# Check accuracy of the whole model
|
# Check accuracy of the whole model
|
||||||
@@ -381,10 +454,10 @@ class Stree_test(unittest.TestCase):
|
|||||||
clf2 = Stree(
|
clf2 = Stree(
|
||||||
kernel="rbf", random_state=self._random_state, normalize=True
|
kernel="rbf", random_state=self._random_state, normalize=True
|
||||||
)
|
)
|
||||||
self.assertEqual(0.768, clf.fit(X, y).score(X, y))
|
self.assertEqual(0.966, clf.fit(X, y).score(X, y))
|
||||||
self.assertEqual(0.814, clf2.fit(X, y).score(X, y))
|
self.assertEqual(0.964, clf2.fit(X, y).score(X, y))
|
||||||
X, y = load_wine(return_X_y=True)
|
X, y = load_wine(return_X_y=True)
|
||||||
self.assertEqual(0.6741573033707865, clf.fit(X, y).score(X, y))
|
self.assertEqual(0.6685393258426966, clf.fit(X, y).score(X, y))
|
||||||
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
||||||
|
|
||||||
def test_score_multiclass_poly(self):
|
def test_score_multiclass_poly(self):
|
||||||
@@ -402,24 +475,78 @@ class Stree_test(unittest.TestCase):
|
|||||||
random_state=self._random_state,
|
random_state=self._random_state,
|
||||||
normalize=True,
|
normalize=True,
|
||||||
)
|
)
|
||||||
self.assertEqual(0.786, clf.fit(X, y).score(X, y))
|
self.assertEqual(0.946, clf.fit(X, y).score(X, y))
|
||||||
self.assertEqual(0.818, clf2.fit(X, y).score(X, y))
|
self.assertEqual(0.972, clf2.fit(X, y).score(X, y))
|
||||||
X, y = load_wine(return_X_y=True)
|
X, y = load_wine(return_X_y=True)
|
||||||
self.assertEqual(0.702247191011236, clf.fit(X, y).score(X, y))
|
self.assertEqual(0.7808988764044944, clf.fit(X, y).score(X, y))
|
||||||
self.assertEqual(0.6067415730337079, clf2.fit(X, y).score(X, y))
|
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
||||||
|
|
||||||
|
def test_score_multiclass_liblinear(self):
|
||||||
|
X, y = load_dataset(
|
||||||
|
random_state=self._random_state,
|
||||||
|
n_classes=3,
|
||||||
|
n_features=5,
|
||||||
|
n_samples=500,
|
||||||
|
)
|
||||||
|
clf = Stree(
|
||||||
|
kernel="liblinear",
|
||||||
|
multiclass_strategy="ovr",
|
||||||
|
random_state=self._random_state,
|
||||||
|
C=10,
|
||||||
|
)
|
||||||
|
clf2 = Stree(
|
||||||
|
kernel="liblinear",
|
||||||
|
multiclass_strategy="ovr",
|
||||||
|
random_state=self._random_state,
|
||||||
|
normalize=True,
|
||||||
|
)
|
||||||
|
self.assertEqual(0.968, clf.fit(X, y).score(X, y))
|
||||||
|
self.assertEqual(0.97, clf2.fit(X, y).score(X, y))
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
self.assertEqual(1.0, clf.fit(X, y).score(X, y))
|
||||||
|
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
||||||
|
|
||||||
|
def test_score_multiclass_sigmoid(self):
|
||||||
|
X, y = load_dataset(
|
||||||
|
random_state=self._random_state,
|
||||||
|
n_classes=3,
|
||||||
|
n_features=5,
|
||||||
|
n_samples=500,
|
||||||
|
)
|
||||||
|
clf = Stree(kernel="sigmoid", random_state=self._random_state, C=10)
|
||||||
|
clf2 = Stree(
|
||||||
|
kernel="sigmoid",
|
||||||
|
random_state=self._random_state,
|
||||||
|
normalize=True,
|
||||||
|
C=10,
|
||||||
|
)
|
||||||
|
self.assertEqual(0.796, clf.fit(X, y).score(X, y))
|
||||||
|
self.assertEqual(0.952, clf2.fit(X, y).score(X, y))
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
self.assertEqual(0.6910112359550562, clf.fit(X, y).score(X, y))
|
||||||
|
self.assertEqual(0.9662921348314607, clf2.fit(X, y).score(X, y))
|
||||||
|
|
||||||
def test_score_multiclass_linear(self):
|
def test_score_multiclass_linear(self):
|
||||||
|
warnings.filterwarnings("ignore", category=ConvergenceWarning)
|
||||||
|
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
||||||
X, y = load_dataset(
|
X, y = load_dataset(
|
||||||
random_state=self._random_state,
|
random_state=self._random_state,
|
||||||
n_classes=3,
|
n_classes=3,
|
||||||
n_features=5,
|
n_features=5,
|
||||||
n_samples=1500,
|
n_samples=1500,
|
||||||
)
|
)
|
||||||
clf = Stree(kernel="linear", random_state=self._random_state)
|
clf = Stree(
|
||||||
|
kernel="liblinear",
|
||||||
|
multiclass_strategy="ovr",
|
||||||
|
random_state=self._random_state,
|
||||||
|
)
|
||||||
self.assertEqual(0.9533333333333334, clf.fit(X, y).score(X, y))
|
self.assertEqual(0.9533333333333334, clf.fit(X, y).score(X, y))
|
||||||
# Check with context based standardization
|
# Check with context based standardization
|
||||||
clf2 = Stree(
|
clf2 = Stree(
|
||||||
kernel="linear", random_state=self._random_state, normalize=True
|
kernel="liblinear",
|
||||||
|
multiclass_strategy="ovr",
|
||||||
|
random_state=self._random_state,
|
||||||
|
normalize=True,
|
||||||
)
|
)
|
||||||
self.assertEqual(0.9526666666666667, clf2.fit(X, y).score(X, y))
|
self.assertEqual(0.9526666666666667, clf2.fit(X, y).score(X, y))
|
||||||
X, y = load_wine(return_X_y=True)
|
X, y = load_wine(return_X_y=True)
|
||||||
@@ -446,7 +573,7 @@ class Stree_test(unittest.TestCase):
|
|||||||
]
|
]
|
||||||
)
|
)
|
||||||
y = np.array([1, 1, 1, 2, 2, 2, 5, 5, 5])
|
y = np.array([1, 1, 1, 2, 2, 2, 5, 5, 5])
|
||||||
yw = np.array([1, 1, 1, 5, 5, 5, 5, 5, 5])
|
yw = np.array([1, 1, 1, 1, 1, 1, 5, 5, 5])
|
||||||
w = [1, 1, 1, 0, 0, 0, 1, 1, 1]
|
w = [1, 1, 1, 0, 0, 0, 1, 1, 1]
|
||||||
model1 = Stree().fit(X, y)
|
model1 = Stree().fit(X, y)
|
||||||
model2 = Stree().fit(X, y, w)
|
model2 = Stree().fit(X, y, w)
|
||||||
@@ -483,14 +610,14 @@ class Stree_test(unittest.TestCase):
|
|||||||
clf = Stree(random_state=self._random_state)
|
clf = Stree(random_state=self._random_state)
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
nodes, leaves = clf.nodes_leaves()
|
nodes, leaves = clf.nodes_leaves()
|
||||||
self.assertEqual(25, nodes)
|
self.assertEqual(31, nodes)
|
||||||
self.assertEquals(13, leaves)
|
self.assertEqual(16, leaves)
|
||||||
X, y = load_wine(return_X_y=True)
|
X, y = load_wine(return_X_y=True)
|
||||||
clf = Stree(random_state=self._random_state)
|
clf = Stree(random_state=self._random_state)
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
nodes, leaves = clf.nodes_leaves()
|
nodes, leaves = clf.nodes_leaves()
|
||||||
self.assertEqual(9, nodes)
|
self.assertEqual(11, nodes)
|
||||||
self.assertEquals(5, leaves)
|
self.assertEqual(6, leaves)
|
||||||
|
|
||||||
def test_nodes_leaves_artificial(self):
|
def test_nodes_leaves_artificial(self):
|
||||||
n1 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test1")
|
n1 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test1")
|
||||||
@@ -509,3 +636,27 @@ class Stree_test(unittest.TestCase):
|
|||||||
nodes, leaves = clf.nodes_leaves()
|
nodes, leaves = clf.nodes_leaves()
|
||||||
self.assertEqual(6, nodes)
|
self.assertEqual(6, nodes)
|
||||||
self.assertEqual(2, leaves)
|
self.assertEqual(2, leaves)
|
||||||
|
|
||||||
|
def test_bogus_multiclass_strategy(self):
|
||||||
|
clf = Stree(multiclass_strategy="other")
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
clf.fit(X, y)
|
||||||
|
|
||||||
|
def test_multiclass_strategy(self):
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
clf_o = Stree(multiclass_strategy="ovo")
|
||||||
|
clf_r = Stree(multiclass_strategy="ovr")
|
||||||
|
score_o = clf_o.fit(X, y).score(X, y)
|
||||||
|
score_r = clf_r.fit(X, y).score(X, y)
|
||||||
|
self.assertEqual(1.0, score_o)
|
||||||
|
self.assertEqual(0.9269662921348315, score_r)
|
||||||
|
|
||||||
|
def test_incompatible_hyperparameters(self):
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
clf = Stree(kernel="liblinear", multiclass_strategy="ovo")
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
clf.fit(X, y)
|
||||||
|
clf = Stree(multiclass_strategy="ovo", split_criteria="max_samples")
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
clf.fit(X, y)
|
||||||
|
Reference in New Issue
Block a user