mirror of
https://github.com/Doctorado-ML/STree.git
synced 2025-08-17 16:36:01 +00:00
Compare commits
22 Commits
new_predic
...
master
Author | SHA1 | Date | |
---|---|---|---|
cb80e8606b
|
|||
c93d3fbcc7
|
|||
f4ca4bbd5b
|
|||
e676ddbfcc
|
|||
|
dc637018e8 | ||
517013be09
|
|||
941c2ff5e0
|
|||
2ebf48145d
|
|||
7fbfd3622e
|
|||
bc839a80d6
|
|||
ba15ea2cc0
|
|||
85b56785c8
|
|||
b627bb7531
|
|||
5f8ca8f3bb
|
|||
|
fb8b9b344f | ||
036d1ba2a7
|
|||
4de74973b8
|
|||
|
28dd04b95a | ||
|
542bbce7db
|
||
|
5b791bc5bf | ||
|
c37f044e3a | ||
|
2f6ae648a1 |
58
.github/workflows/codeql-analysis.yml
vendored
58
.github/workflows/codeql-analysis.yml
vendored
@@ -2,12 +2,12 @@ name: "CodeQL"
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [ master ]
|
branches: [master]
|
||||||
pull_request:
|
pull_request:
|
||||||
# The branches below must be a subset of the branches above
|
# The branches below must be a subset of the branches above
|
||||||
branches: [ master ]
|
branches: [master]
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '16 17 * * 3'
|
- cron: "16 17 * * 3"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
analyze:
|
analyze:
|
||||||
@@ -17,40 +17,40 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
language: [ 'python' ]
|
language: ["python"]
|
||||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||||
# Learn more:
|
# Learn more:
|
||||||
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
# Initializes the CodeQL tools for scanning.
|
# Initializes the CodeQL tools for scanning.
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@v1
|
uses: github/codeql-action/init@v2
|
||||||
with:
|
with:
|
||||||
languages: ${{ matrix.language }}
|
languages: ${{ matrix.language }}
|
||||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||||
# By default, queries listed here will override any specified in a config file.
|
# By default, queries listed here will override any specified in a config file.
|
||||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||||
|
|
||||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||||
# If this step fails, then you should remove it and run the build manually (see below)
|
# If this step fails, then you should remove it and run the build manually (see below)
|
||||||
- name: Autobuild
|
- name: Autobuild
|
||||||
uses: github/codeql-action/autobuild@v1
|
uses: github/codeql-action/autobuild@v2
|
||||||
|
|
||||||
# ℹ️ Command-line programs to run using the OS shell.
|
# ℹ️ Command-line programs to run using the OS shell.
|
||||||
# 📚 https://git.io/JvXDl
|
# 📚 https://git.io/JvXDl
|
||||||
|
|
||||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||||
# and modify them (or add more) to build your code if your project
|
# and modify them (or add more) to build your code if your project
|
||||||
# uses a compiled language
|
# uses a compiled language
|
||||||
|
|
||||||
#- run: |
|
#- run: |
|
||||||
# make bootstrap
|
# make bootstrap
|
||||||
# make release
|
# make release
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
- name: Perform CodeQL Analysis
|
||||||
uses: github/codeql-action/analyze@v1
|
uses: github/codeql-action/analyze@v2
|
||||||
|
10
.github/workflows/main.yml
vendored
10
.github/workflows/main.yml
vendored
@@ -13,12 +13,12 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [macos-latest, ubuntu-latest, windows-latest]
|
os: [macos-latest, ubuntu-latest, windows-latest]
|
||||||
python: [3.8]
|
python: [3.11, 3.12]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Python ${{ matrix.python }}
|
- name: Set up Python ${{ matrix.python }}
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python }}
|
python-version: ${{ matrix.python }}
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
@@ -28,14 +28,14 @@ jobs:
|
|||||||
pip install -q --upgrade codecov coverage black flake8 codacy-coverage
|
pip install -q --upgrade codecov coverage black flake8 codacy-coverage
|
||||||
- name: Lint
|
- name: Lint
|
||||||
run: |
|
run: |
|
||||||
black --check --diff stree
|
# black --check --diff stree
|
||||||
flake8 --count stree
|
flake8 --count stree
|
||||||
- name: Tests
|
- name: Tests
|
||||||
run: |
|
run: |
|
||||||
coverage run -m unittest -v stree.tests
|
coverage run -m unittest -v stree.tests
|
||||||
coverage xml
|
coverage xml
|
||||||
- name: Upload coverage to Codecov
|
- name: Upload coverage to Codecov
|
||||||
uses: codecov/codecov-action@v1
|
uses: codecov/codecov-action@v4
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.CODECOV_TOKEN }}
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
files: ./coverage.xml
|
files: ./coverage.xml
|
||||||
|
@@ -3,8 +3,12 @@ version: 2
|
|||||||
sphinx:
|
sphinx:
|
||||||
configuration: docs/source/conf.py
|
configuration: docs/source/conf.py
|
||||||
|
|
||||||
|
build:
|
||||||
|
os: ubuntu-22.04
|
||||||
|
tools:
|
||||||
|
python: "3.12"
|
||||||
|
|
||||||
python:
|
python:
|
||||||
version: 3.8
|
|
||||||
install:
|
install:
|
||||||
- requirements: requirements.txt
|
- requirements: requirements.txt
|
||||||
- requirements: docs/requirements.txt
|
- requirements: docs/requirements.txt
|
1
MANIFEST.in
Normal file
1
MANIFEST.in
Normal file
@@ -0,0 +1 @@
|
|||||||
|
include README.md LICENSE
|
44
Makefile
44
Makefile
@@ -1,46 +1,36 @@
|
|||||||
SHELL := /bin/bash
|
SHELL := /bin/bash
|
||||||
.DEFAULT_GOAL := help
|
.DEFAULT_GOAL := help
|
||||||
.PHONY: coverage deps help lint push test doc build
|
.PHONY: audit coverage help lint test doc doc-clean build
|
||||||
|
|
||||||
coverage: ## Run tests with coverage
|
coverage: ## Run tests with coverage
|
||||||
coverage erase
|
@coverage erase
|
||||||
coverage run -m unittest -v stree.tests
|
@coverage run -m unittest -v stree.tests
|
||||||
coverage report -m
|
@coverage report -m
|
||||||
|
|
||||||
deps: ## Install dependencies
|
lint: ## Lint source files
|
||||||
pip install -r requirements.txt
|
@black stree
|
||||||
|
@flake8 stree
|
||||||
devdeps: ## Install development dependencies
|
|
||||||
pip install black pip-audit flake8 mypy coverage
|
|
||||||
|
|
||||||
lint: ## Lint and static-check
|
|
||||||
black stree
|
|
||||||
flake8 stree
|
|
||||||
mypy stree
|
|
||||||
|
|
||||||
push: ## Push code with tags
|
|
||||||
git push && git push --tags
|
|
||||||
|
|
||||||
test: ## Run tests
|
test: ## Run tests
|
||||||
python -m unittest -v stree.tests
|
@python -m unittest -v stree.tests
|
||||||
|
|
||||||
doc: ## Update documentation
|
doc: ## Update documentation
|
||||||
make -C docs --makefile=Makefile html
|
@make -C docs --makefile=Makefile html
|
||||||
|
|
||||||
build: ## Build package
|
build: ## Build package
|
||||||
rm -fr dist/*
|
@rm -fr dist/*
|
||||||
rm -fr build/*
|
@rm -fr build/*
|
||||||
python setup.py sdist bdist_wheel
|
@hatch build
|
||||||
|
|
||||||
doc-clean: ## Update documentation
|
doc-clean: ## Clean documentation folders
|
||||||
make -C docs --makefile=Makefile clean
|
@make -C docs --makefile=Makefile clean
|
||||||
|
|
||||||
audit: ## Audit pip
|
audit: ## Audit pip
|
||||||
pip-audit
|
@pip-audit
|
||||||
|
|
||||||
help: ## Show help message
|
help: ## Show this help message
|
||||||
@IFS=$$'\n' ; \
|
@IFS=$$'\n' ; \
|
||||||
help_lines=(`fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##/:/'`); \
|
help_lines=(`grep -Fh "##" $(MAKEFILE_LIST) | grep -Fv fgrep | sed -e 's/\\$$//' | sed -e 's/##/:/'`); \
|
||||||
printf "%s\n\n" "Usage: make [task]"; \
|
printf "%s\n\n" "Usage: make [task]"; \
|
||||||
printf "%-20s %s\n" "task" "help" ; \
|
printf "%-20s %s\n" "task" "help" ; \
|
||||||
printf "%-20s %s\n" "------" "----" ; \
|
printf "%-20s %s\n" "------" "----" ; \
|
||||||
|
15
README.md
15
README.md
@@ -1,21 +1,23 @@
|
|||||||
|
# STree
|
||||||
|
|
||||||

|

|
||||||
|
[](https://github.com/Doctorado-ML/STree/actions/workflows/codeql-analysis.yml)
|
||||||
[](https://codecov.io/gh/doctorado-ml/stree)
|
[](https://codecov.io/gh/doctorado-ml/stree)
|
||||||
[](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
|
[](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
|
||||||
[](https://lgtm.com/projects/g/Doctorado-ML/STree/context:python)
|
|
||||||
[](https://badge.fury.io/py/STree)
|
[](https://badge.fury.io/py/STree)
|
||||||

|

|
||||||
|
[](https://deepwiki.com/Doctorado-ML/STree)
|
||||||
[](https://zenodo.org/badge/latestdoi/262658230)
|
[](https://zenodo.org/badge/latestdoi/262658230)
|
||||||
|
|
||||||
# STree
|

|
||||||
|
|
||||||
Oblique Tree classifier based on SVM nodes. The nodes are built and splitted with sklearn SVC models. Stree is a sklearn estimator and can be integrated in pipelines, grid searches, etc.
|
Oblique Tree classifier based on SVM nodes. The nodes are built and splitted with sklearn SVC models. Stree is a sklearn estimator and can be integrated in pipelines, grid searches, etc.
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install git+https://github.com/doctorado-ml/stree
|
pip install Stree
|
||||||
```
|
```
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
@@ -50,7 +52,8 @@ Can be found in [stree.readthedocs.io](https://stree.readthedocs.io/en/stable/)
|
|||||||
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
|
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
|
||||||
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
|
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
|
||||||
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
|
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
|
||||||
| | splitter | {"best", "random", "trandom", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **“best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **“random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them. **“trandom”**: The algorithm generates only one random combination. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label. **"cfs"**: Apply Correlation-based Feature Selection. **"fcbf"**: Apply Fast Correlation-Based Filter. **"iwss"**: IWSS based algorithm |
|
| | splitter | {"best", "random", "trandom", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features).
|
||||||
|
Supported strategies are: **“best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **“random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them. **“trandom”**: The algorithm generates only one random combination. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label. **"cfs"**: Apply Correlation-based Feature Selection. **"fcbf"**: Apply Fast Correlation-Based Filter. **"iwss"**: IWSS based algorithm |
|
||||||
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
|
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
|
||||||
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest |
|
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest |
|
||||||
|
|
||||||
|
@@ -1,9 +1,10 @@
|
|||||||
Siterator
|
Siterator
|
||||||
=========
|
=========
|
||||||
|
|
||||||
.. automodule:: Splitter
|
.. automodule:: stree
|
||||||
.. autoclass:: Siterator
|
.. autoclass:: Siterator
|
||||||
:members:
|
:members:
|
||||||
:undoc-members:
|
:undoc-members:
|
||||||
:private-members:
|
:private-members:
|
||||||
:show-inheritance:
|
:show-inheritance:
|
||||||
|
:noindex:
|
@@ -1,9 +1,9 @@
|
|||||||
Snode
|
Snode
|
||||||
=====
|
=====
|
||||||
|
|
||||||
.. automodule:: Splitter
|
.. autoclass:: stree.Splitter.Snode
|
||||||
.. autoclass:: Snode
|
|
||||||
:members:
|
:members:
|
||||||
:undoc-members:
|
:undoc-members:
|
||||||
:private-members:
|
:private-members:
|
||||||
:show-inheritance:
|
:show-inheritance:
|
||||||
|
:noindex:
|
@@ -1,9 +1,10 @@
|
|||||||
Splitter
|
Splitter
|
||||||
========
|
========
|
||||||
|
|
||||||
.. automodule:: Splitter
|
.. automodule:: stree.Splitter
|
||||||
.. autoclass:: Splitter
|
.. autoclass:: Splitter
|
||||||
:members:
|
:members:
|
||||||
:undoc-members:
|
:undoc-members:
|
||||||
:private-members:
|
:private-members:
|
||||||
:show-inheritance:
|
:show-inheritance:
|
||||||
|
:noindex:
|
@@ -6,4 +6,5 @@ Stree
|
|||||||
:members:
|
:members:
|
||||||
:undoc-members:
|
:undoc-members:
|
||||||
:private-members:
|
:private-members:
|
||||||
:show-inheritance:
|
:show-inheritance:
|
||||||
|
:noindex:
|
@@ -6,27 +6,21 @@
|
|||||||
|
|
||||||
# -- Path setup --------------------------------------------------------------
|
# -- Path setup --------------------------------------------------------------
|
||||||
|
|
||||||
# If extensions (or modules to document with autodoc) are in another directory,
|
|
||||||
# add these directories to sys.path here. If the directory is relative to the
|
|
||||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
|
||||||
#
|
|
||||||
import os
|
|
||||||
import sys
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
||||||
|
|
||||||
import stree
|
import stree
|
||||||
|
|
||||||
sys.path.insert(0, os.path.abspath("../../stree/"))
|
|
||||||
|
|
||||||
|
|
||||||
# -- Project information -----------------------------------------------------
|
# -- Project information -----------------------------------------------------
|
||||||
|
|
||||||
project = "STree"
|
project = "STree"
|
||||||
copyright = "2020 - 2021, Ricardo Montañana Gómez"
|
copyright = "2020 - 2024, Ricardo Montañana Gómez"
|
||||||
author = "Ricardo Montañana Gómez"
|
author = "Ricardo Montañana Gómez"
|
||||||
|
|
||||||
# The full version, including alpha/beta/rc tags
|
# The full version, including alpha/beta/rc tags
|
||||||
version = stree.__version__
|
version = release = stree.__version__
|
||||||
release = version
|
|
||||||
|
|
||||||
|
|
||||||
# -- General configuration ---------------------------------------------------
|
# -- General configuration ---------------------------------------------------
|
||||||
|
|
||||||
|
@@ -3,20 +3,20 @@
|
|||||||
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
|
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
|
||||||
| --- | ------------------- | -------------------------------------------------------------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
| --- | ------------------- | -------------------------------------------------------------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
|
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
|
||||||
| \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of ‘liblinear’, ‘linear’, ‘poly’ or ‘rbf’. liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library |
|
| \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of ‘liblinear’, ‘linear’, ‘poly’ or ‘rbf’.<br>liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library |
|
||||||
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
|
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
|
||||||
| \* | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls |
|
| \* | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls |
|
||||||
| | max_depth | \<int\> | None | Specifies the maximum depth of the tree |
|
| | max_depth | \<int\> | None | Specifies the maximum depth of the tree |
|
||||||
| \* | tol | \<float\> | 1e-4 | Tolerance for stopping criterion. |
|
| \* | tol | \<float\> | 1e-4 | Tolerance for stopping criterion. |
|
||||||
| \* | degree | \<int\> | 3 | Degree of the polynomial kernel function (‘poly’). Ignored by all other kernels. |
|
| \* | degree | \<int\> | 3 | Degree of the polynomial kernel function (‘poly’). Ignored by all other kernels. |
|
||||||
| \* | gamma | {"scale", "auto"} or \<float\> | scale | Kernel coefficient for ‘rbf’, ‘poly’ and ‘sigmoid’.<br>if gamma='scale' (default) is passed then it uses 1 / (n_features \* X.var()) as value of gamma,<br>if ‘auto’, uses 1 / n_features. |
|
| \* | gamma | {"scale", "auto"} or \<float\> | scale | Kernel coefficient for ‘rbf’, ‘poly’ and ‘sigmoid’.<br>if gamma='scale' (default) is passed then it uses 1 / (n_features \* X.var()) as value of gamma,<br>if ‘auto’, uses 1 / n_features. |
|
||||||
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\*. max_samples is incompatible with 'ovo' multiclass_strategy |
|
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\*.<br>max_samples is incompatible with 'ovo' multiclass_strategy |
|
||||||
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
|
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features).<br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
|
||||||
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
|
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
|
||||||
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
|
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
|
||||||
| | splitter | {"best", "random", "trandom", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **“best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **“random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them. **“trandom”**: The algorithm generates only one random combination. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label. **"cfs"**: Apply Correlation-based Feature Selection. **"fcbf"**: Apply Fast Correlation-Based Filter. **"iwss"**: IWSS based algorithm |
|
| | splitter | {"best", "random", "trandom", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features).<br>Supported strategies are:<br>**“best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features.<br>**“random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them.<br>**“trandom”**: The algorithm generates only one random combination.<br>**"mutual"**: Chooses the best features w.r.t. their mutual info with the label.<br>**"cfs"**: Apply Correlation-based Feature Selection.<br>**"fcbf"**: Apply Fast Correlation-Based Filter.<br>**"iwss"**: IWSS based algorithm |
|
||||||
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
|
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
|
||||||
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest |
|
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets:<br>**"ovo"**: one versus one.<br>**"ovr"**: one versus rest |
|
||||||
|
|
||||||
\* Hyperparameter used by the support vector classifier of every node
|
\* Hyperparameter used by the support vector classifier of every node
|
||||||
|
|
||||||
|
@@ -5,7 +5,6 @@ Welcome to STree's documentation!
|
|||||||
:caption: Contents:
|
:caption: Contents:
|
||||||
:titlesonly:
|
:titlesonly:
|
||||||
|
|
||||||
|
|
||||||
stree
|
stree
|
||||||
install
|
install
|
||||||
hyperparameters
|
hyperparameters
|
||||||
|
@@ -178,7 +178,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Stree\n",
|
"# Stree\n",
|
||||||
"stree = Stree(random_state=random_state, C=.01, max_iter=1e3, kernel=\"liblinear\", multiclass_strategy=\"ovr\")"
|
"stree = Stree(random_state=random_state, C=.01, max_iter=1000, kernel=\"liblinear\", multiclass_strategy=\"ovr\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -198,7 +198,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# SVC (linear)\n",
|
"# SVC (linear)\n",
|
||||||
"svc = LinearSVC(random_state=random_state, C=.01, max_iter=1e3)"
|
"svc = LinearSVC(random_state=random_state, C=.01, max_iter=1000)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@@ -1,253 +1,253 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"# Test Gridsearch\n",
|
"# Test Gridsearch\n",
|
||||||
"with different kernels and different configurations"
|
"with different kernels and different configurations"
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Setup\n",
|
||||||
|
"Uncomment the next cell if STree is not already installed"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#\n",
|
||||||
|
"# Google Colab setup\n",
|
||||||
|
"#\n",
|
||||||
|
"#!pip install git+https://github.com/doctorado-ml/stree\n",
|
||||||
|
"!pip install pandas"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "zIHKVxthDZEa"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import random\n",
|
||||||
|
"import os\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"from sklearn.ensemble import AdaBoostClassifier\n",
|
||||||
|
"from sklearn.svm import LinearSVC\n",
|
||||||
|
"from sklearn.model_selection import GridSearchCV, train_test_split\n",
|
||||||
|
"from stree import Stree"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "IEmq50QgDZEi"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"if not os.path.isfile('data/creditcard.csv'):\n",
|
||||||
|
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
||||||
|
" !tar xzf creditcard.tgz"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "z9Q-YUfBDZEq",
|
||||||
|
"outputId": "afc822fb-f16a-4302-8a67-2b9e2880159b",
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"random_state=1\n",
|
||||||
|
"\n",
|
||||||
|
"def load_creditcard(n_examples=0):\n",
|
||||||
|
" df = pd.read_csv('data/creditcard.csv')\n",
|
||||||
|
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
||||||
|
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
||||||
|
" y = df.Class\n",
|
||||||
|
" X = df.drop(['Class', 'Time', 'Amount'], axis=1).values\n",
|
||||||
|
" if n_examples > 0:\n",
|
||||||
|
" # Take first n_examples samples\n",
|
||||||
|
" X = X[:n_examples, :]\n",
|
||||||
|
" y = y[:n_examples, :]\n",
|
||||||
|
" else:\n",
|
||||||
|
" # Take all the positive samples with a number of random negatives\n",
|
||||||
|
" if n_examples < 0:\n",
|
||||||
|
" Xt = X[(y == 1).ravel()]\n",
|
||||||
|
" yt = y[(y == 1).ravel()]\n",
|
||||||
|
" indices = random.sample(range(X.shape[0]), -1 * n_examples)\n",
|
||||||
|
" X = np.append(Xt, X[indices], axis=0)\n",
|
||||||
|
" y = np.append(yt, y[indices], axis=0)\n",
|
||||||
|
" print(\"X.shape\", X.shape, \" y.shape\", y.shape)\n",
|
||||||
|
" print(\"Fraud: {0:.3f}% {1}\".format(len(y[y == 1])*100/X.shape[0], len(y[y == 1])))\n",
|
||||||
|
" print(\"Valid: {0:.3f}% {1}\".format(len(y[y == 0]) * 100 / X.shape[0], len(y[y == 0])))\n",
|
||||||
|
" Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=0.7, shuffle=True, random_state=random_state, stratify=y)\n",
|
||||||
|
" return Xtrain, Xtest, ytrain, ytest\n",
|
||||||
|
"\n",
|
||||||
|
"data = load_creditcard(-1000) # Take all true samples + 1000 of the others\n",
|
||||||
|
"# data = load_creditcard(5000) # Take the first 5000 samples\n",
|
||||||
|
"# data = load_creditcard(0) # Take all the samples\n",
|
||||||
|
"\n",
|
||||||
|
"Xtrain = data[0]\n",
|
||||||
|
"Xtest = data[1]\n",
|
||||||
|
"ytrain = data[2]\n",
|
||||||
|
"ytest = data[3]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Tests"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "HmX3kR4PDZEw"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"parameters = [{\n",
|
||||||
|
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
||||||
|
" 'n_estimators': [10, 25],\n",
|
||||||
|
" 'learning_rate': [.5, 1],\n",
|
||||||
|
" 'estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||||
|
" 'estimator__tol': [.1, 1e-02],\n",
|
||||||
|
" 'estimator__max_depth': [3, 5, 7],\n",
|
||||||
|
" 'estimator__C': [1, 7, 55],\n",
|
||||||
|
" 'estimator__kernel': ['linear']\n",
|
||||||
|
"},\n",
|
||||||
|
"{\n",
|
||||||
|
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
||||||
|
" 'n_estimators': [10, 25],\n",
|
||||||
|
" 'learning_rate': [.5, 1],\n",
|
||||||
|
" 'estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||||
|
" 'estimator__tol': [.1, 1e-02],\n",
|
||||||
|
" 'estimator__max_depth': [3, 5, 7],\n",
|
||||||
|
" 'estimator__C': [1, 7, 55],\n",
|
||||||
|
" 'estimator__degree': [3, 5, 7],\n",
|
||||||
|
" 'estimator__kernel': ['poly']\n",
|
||||||
|
"},\n",
|
||||||
|
"{\n",
|
||||||
|
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
||||||
|
" 'n_estimators': [10, 25],\n",
|
||||||
|
" 'learning_rate': [.5, 1],\n",
|
||||||
|
" 'estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||||
|
" 'estimator__tol': [.1, 1e-02],\n",
|
||||||
|
" 'estimator__max_depth': [3, 5, 7],\n",
|
||||||
|
" 'estimator__C': [1, 7, 55],\n",
|
||||||
|
" 'estimator__gamma': [.1, 1, 10],\n",
|
||||||
|
" 'estimator__kernel': ['rbf']\n",
|
||||||
|
"}]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"Stree().get_params()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "CrcB8o6EDZE5",
|
||||||
|
"outputId": "7703413a-d563-4289-a13b-532f38f82762",
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"clf = AdaBoostClassifier(random_state=random_state, algorithm=\"SAMME\")\n",
|
||||||
|
"grid = GridSearchCV(clf, parameters, verbose=5, n_jobs=-1, return_train_score=True)\n",
|
||||||
|
"grid.fit(Xtrain, ytrain)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "ZjX88NoYDZE8",
|
||||||
|
"outputId": "285163c8-fa33-4915-8ae7-61c4f7844344",
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print(\"Best estimator: \", grid.best_estimator_)\n",
|
||||||
|
"print(\"Best hyperparameters: \", grid.best_params_)\n",
|
||||||
|
"print(\"Best accuracy: \", grid.best_score_)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Best estimator: AdaBoostClassifier(algorithm='SAMME',\n",
|
||||||
|
" base_estimator=Stree(C=55, max_depth=7, random_state=1,\n",
|
||||||
|
" split_criteria='max_samples', tol=0.1),\n",
|
||||||
|
" learning_rate=0.5, n_estimators=25, random_state=1)\n",
|
||||||
|
"Best hyperparameters: {'base_estimator': Stree(C=55, max_depth=7, random_state=1, split_criteria='max_samples', tol=0.1), 'estimator__C': 55, 'estimator__kernel': 'linear', 'estimator__max_depth': 7, 'estimator__split_criteria': 'max_samples', 'estimator__tol': 0.1, 'learning_rate': 0.5, 'n_estimators': 25}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Best accuracy: 0.9511777695988222"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"colab": {
|
||||||
|
"name": "gridsearch.ipynb",
|
||||||
|
"provenance": []
|
||||||
|
},
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.8.2-final"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
{
|
"nbformat": 4,
|
||||||
"cell_type": "markdown",
|
"nbformat_minor": 4
|
||||||
"metadata": {},
|
}
|
||||||
"source": [
|
|
||||||
"# Setup\n",
|
|
||||||
"Uncomment the next cell if STree is not already installed"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#\n",
|
|
||||||
"# Google Colab setup\n",
|
|
||||||
"#\n",
|
|
||||||
"#!pip install git+https://github.com/doctorado-ml/stree\n",
|
|
||||||
"!pip install pandas"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"colab": {},
|
|
||||||
"colab_type": "code",
|
|
||||||
"id": "zIHKVxthDZEa"
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import random\n",
|
|
||||||
"import os\n",
|
|
||||||
"import pandas as pd\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"from sklearn.ensemble import AdaBoostClassifier\n",
|
|
||||||
"from sklearn.svm import LinearSVC\n",
|
|
||||||
"from sklearn.model_selection import GridSearchCV, train_test_split\n",
|
|
||||||
"from stree import Stree"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"colab": {},
|
|
||||||
"colab_type": "code",
|
|
||||||
"id": "IEmq50QgDZEi"
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"if not os.path.isfile('data/creditcard.csv'):\n",
|
|
||||||
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
|
||||||
" !tar xzf creditcard.tgz"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"colab": {},
|
|
||||||
"colab_type": "code",
|
|
||||||
"id": "z9Q-YUfBDZEq",
|
|
||||||
"outputId": "afc822fb-f16a-4302-8a67-2b9e2880159b",
|
|
||||||
"tags": []
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"random_state=1\n",
|
|
||||||
"\n",
|
|
||||||
"def load_creditcard(n_examples=0):\n",
|
|
||||||
" df = pd.read_csv('data/creditcard.csv')\n",
|
|
||||||
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
|
||||||
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
|
||||||
" y = df.Class\n",
|
|
||||||
" X = df.drop(['Class', 'Time', 'Amount'], axis=1).values\n",
|
|
||||||
" if n_examples > 0:\n",
|
|
||||||
" # Take first n_examples samples\n",
|
|
||||||
" X = X[:n_examples, :]\n",
|
|
||||||
" y = y[:n_examples, :]\n",
|
|
||||||
" else:\n",
|
|
||||||
" # Take all the positive samples with a number of random negatives\n",
|
|
||||||
" if n_examples < 0:\n",
|
|
||||||
" Xt = X[(y == 1).ravel()]\n",
|
|
||||||
" yt = y[(y == 1).ravel()]\n",
|
|
||||||
" indices = random.sample(range(X.shape[0]), -1 * n_examples)\n",
|
|
||||||
" X = np.append(Xt, X[indices], axis=0)\n",
|
|
||||||
" y = np.append(yt, y[indices], axis=0)\n",
|
|
||||||
" print(\"X.shape\", X.shape, \" y.shape\", y.shape)\n",
|
|
||||||
" print(\"Fraud: {0:.3f}% {1}\".format(len(y[y == 1])*100/X.shape[0], len(y[y == 1])))\n",
|
|
||||||
" print(\"Valid: {0:.3f}% {1}\".format(len(y[y == 0]) * 100 / X.shape[0], len(y[y == 0])))\n",
|
|
||||||
" Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=0.7, shuffle=True, random_state=random_state, stratify=y)\n",
|
|
||||||
" return Xtrain, Xtest, ytrain, ytest\n",
|
|
||||||
"\n",
|
|
||||||
"data = load_creditcard(-1000) # Take all true samples + 1000 of the others\n",
|
|
||||||
"# data = load_creditcard(5000) # Take the first 5000 samples\n",
|
|
||||||
"# data = load_creditcard(0) # Take all the samples\n",
|
|
||||||
"\n",
|
|
||||||
"Xtrain = data[0]\n",
|
|
||||||
"Xtest = data[1]\n",
|
|
||||||
"ytrain = data[2]\n",
|
|
||||||
"ytest = data[3]"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"# Tests"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"colab": {},
|
|
||||||
"colab_type": "code",
|
|
||||||
"id": "HmX3kR4PDZEw"
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"parameters = [{\n",
|
|
||||||
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
|
||||||
" 'n_estimators': [10, 25],\n",
|
|
||||||
" 'learning_rate': [.5, 1],\n",
|
|
||||||
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
|
||||||
" 'base_estimator__tol': [.1, 1e-02],\n",
|
|
||||||
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
|
||||||
" 'base_estimator__C': [1, 7, 55],\n",
|
|
||||||
" 'base_estimator__kernel': ['linear']\n",
|
|
||||||
"},\n",
|
|
||||||
"{\n",
|
|
||||||
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
|
||||||
" 'n_estimators': [10, 25],\n",
|
|
||||||
" 'learning_rate': [.5, 1],\n",
|
|
||||||
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
|
||||||
" 'base_estimator__tol': [.1, 1e-02],\n",
|
|
||||||
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
|
||||||
" 'base_estimator__C': [1, 7, 55],\n",
|
|
||||||
" 'base_estimator__degree': [3, 5, 7],\n",
|
|
||||||
" 'base_estimator__kernel': ['poly']\n",
|
|
||||||
"},\n",
|
|
||||||
"{\n",
|
|
||||||
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
|
||||||
" 'n_estimators': [10, 25],\n",
|
|
||||||
" 'learning_rate': [.5, 1],\n",
|
|
||||||
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
|
||||||
" 'base_estimator__tol': [.1, 1e-02],\n",
|
|
||||||
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
|
||||||
" 'base_estimator__C': [1, 7, 55],\n",
|
|
||||||
" 'base_estimator__gamma': [.1, 1, 10],\n",
|
|
||||||
" 'base_estimator__kernel': ['rbf']\n",
|
|
||||||
"}]"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"Stree().get_params()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"colab": {},
|
|
||||||
"colab_type": "code",
|
|
||||||
"id": "CrcB8o6EDZE5",
|
|
||||||
"outputId": "7703413a-d563-4289-a13b-532f38f82762",
|
|
||||||
"tags": []
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"clf = AdaBoostClassifier(random_state=random_state, algorithm=\"SAMME\")\n",
|
|
||||||
"grid = GridSearchCV(clf, parameters, verbose=5, n_jobs=-1, return_train_score=True)\n",
|
|
||||||
"grid.fit(Xtrain, ytrain)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"colab": {},
|
|
||||||
"colab_type": "code",
|
|
||||||
"id": "ZjX88NoYDZE8",
|
|
||||||
"outputId": "285163c8-fa33-4915-8ae7-61c4f7844344",
|
|
||||||
"tags": []
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"Best estimator: \", grid.best_estimator_)\n",
|
|
||||||
"print(\"Best hyperparameters: \", grid.best_params_)\n",
|
|
||||||
"print(\"Best accuracy: \", grid.best_score_)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Best estimator: AdaBoostClassifier(algorithm='SAMME',\n",
|
|
||||||
" base_estimator=Stree(C=55, max_depth=7, random_state=1,\n",
|
|
||||||
" split_criteria='max_samples', tol=0.1),\n",
|
|
||||||
" learning_rate=0.5, n_estimators=25, random_state=1)\n",
|
|
||||||
"Best hyperparameters: {'base_estimator': Stree(C=55, max_depth=7, random_state=1, split_criteria='max_samples', tol=0.1), 'base_estimator__C': 55, 'base_estimator__kernel': 'linear', 'base_estimator__max_depth': 7, 'base_estimator__split_criteria': 'max_samples', 'base_estimator__tol': 0.1, 'learning_rate': 0.5, 'n_estimators': 25}"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Best accuracy: 0.9511777695988222"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"colab": {
|
|
||||||
"name": "gridsearch.ipynb",
|
|
||||||
"provenance": []
|
|
||||||
},
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python3"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.8.2-final"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 4
|
|
||||||
}
|
|
||||||
|
@@ -1,5 +1,65 @@
|
|||||||
|
[build-system]
|
||||||
|
requires = ["hatchling"]
|
||||||
|
build-backend = "hatchling.build"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "STree"
|
||||||
|
dependencies = ["scikit-learn>1.0", "mufs"]
|
||||||
|
license = { file = "LICENSE" }
|
||||||
|
description = "Oblique decision tree with svm nodes."
|
||||||
|
readme = "README.md"
|
||||||
|
authors = [
|
||||||
|
{ name = "Ricardo Montañana", email = "ricardo.montanana@alu.uclm.es" },
|
||||||
|
]
|
||||||
|
dynamic = ['version']
|
||||||
|
requires-python = ">=3.11"
|
||||||
|
keywords = [
|
||||||
|
"scikit-learn",
|
||||||
|
"oblique-classifier",
|
||||||
|
"oblique-decision-tree",
|
||||||
|
"decision-tree",
|
||||||
|
"svm",
|
||||||
|
"svc",
|
||||||
|
]
|
||||||
|
classifiers = [
|
||||||
|
"Development Status :: 5 - Production/Stable",
|
||||||
|
"Intended Audience :: Science/Research",
|
||||||
|
"Intended Audience :: Developers",
|
||||||
|
"Topic :: Software Development",
|
||||||
|
"Topic :: Scientific/Engineering",
|
||||||
|
"License :: OSI Approved :: MIT License",
|
||||||
|
"Natural Language :: English",
|
||||||
|
"Operating System :: OS Independent",
|
||||||
|
"Programming Language :: Python :: 3.11",
|
||||||
|
"Programming Language :: Python :: 3.12",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.optional-dependencies]
|
||||||
|
dev = ["black", "flake8", "coverage", "hatch", "pip-audit"]
|
||||||
|
doc = ["sphinx", "myst-parser", "sphinx_rtd_theme", "sphinx-autodoc-typehints"]
|
||||||
|
|
||||||
|
[project.urls]
|
||||||
|
Code = "https://github.com/Doctorado-ML/STree"
|
||||||
|
Documentation = "https://stree.readthedocs.io/en/latest/index.html"
|
||||||
|
|
||||||
|
[tool.hatch.version]
|
||||||
|
path = "stree/_version.py"
|
||||||
|
|
||||||
|
[tool.hatch.build.targets.sdist]
|
||||||
|
include = ["/stree"]
|
||||||
|
|
||||||
|
[tool.coverage.run]
|
||||||
|
branch = true
|
||||||
|
source = ["stree"]
|
||||||
|
command_line = "-m unittest discover -s stree.tests"
|
||||||
|
|
||||||
|
[tool.coverage.report]
|
||||||
|
show_missing = true
|
||||||
|
fail_under = 100
|
||||||
|
|
||||||
[tool.black]
|
[tool.black]
|
||||||
line-length = 79
|
line-length = 79
|
||||||
|
target-version = ["py311"]
|
||||||
include = '\.pyi?$'
|
include = '\.pyi?$'
|
||||||
exclude = '''
|
exclude = '''
|
||||||
/(
|
/(
|
||||||
@@ -13,4 +73,4 @@ exclude = '''
|
|||||||
| build
|
| build
|
||||||
| dist
|
| dist
|
||||||
)/
|
)/
|
||||||
'''
|
'''
|
||||||
|
@@ -1,2 +1,3 @@
|
|||||||
scikit-learn>0.24
|
scikit-learn==1.5.2
|
||||||
mufs
|
coverage
|
||||||
|
mufs
|
||||||
|
@@ -1 +0,0 @@
|
|||||||
python-3.8
|
|
52
setup.py
52
setup.py
@@ -1,52 +0,0 @@
|
|||||||
import setuptools
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
def readme():
|
|
||||||
with open("README.md") as f:
|
|
||||||
return f.read()
|
|
||||||
|
|
||||||
|
|
||||||
def get_data(field):
|
|
||||||
item = ""
|
|
||||||
file_name = "_version.py" if field == "version" else "__init__.py"
|
|
||||||
with open(os.path.join("stree", file_name)) as f:
|
|
||||||
for line in f.readlines():
|
|
||||||
if line.startswith(f"__{field}__"):
|
|
||||||
delim = '"' if '"' in line else "'"
|
|
||||||
item = line.split(delim)[1]
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
raise RuntimeError(f"Unable to find {field} string.")
|
|
||||||
return item
|
|
||||||
|
|
||||||
|
|
||||||
setuptools.setup(
|
|
||||||
name="STree",
|
|
||||||
version=get_data("version"),
|
|
||||||
license=get_data("license"),
|
|
||||||
description="Oblique decision tree with svm nodes",
|
|
||||||
long_description=readme(),
|
|
||||||
long_description_content_type="text/markdown",
|
|
||||||
packages=setuptools.find_packages(),
|
|
||||||
url="https://github.com/Doctorado-ML/STree#stree",
|
|
||||||
project_urls={
|
|
||||||
"Code": "https://github.com/Doctorado-ML/STree",
|
|
||||||
"Documentation": "https://stree.readthedocs.io/en/latest/index.html",
|
|
||||||
},
|
|
||||||
author=get_data("author"),
|
|
||||||
author_email=get_data("author_email"),
|
|
||||||
keywords="scikit-learn oblique-classifier oblique-decision-tree decision-\
|
|
||||||
tree svm svc",
|
|
||||||
classifiers=[
|
|
||||||
"Development Status :: 5 - Production/Stable",
|
|
||||||
"License :: OSI Approved :: " + get_data("license"),
|
|
||||||
"Programming Language :: Python :: 3.8",
|
|
||||||
"Natural Language :: English",
|
|
||||||
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
||||||
"Intended Audience :: Science/Research",
|
|
||||||
],
|
|
||||||
install_requires=["scikit-learn", "mufs"],
|
|
||||||
test_suite="stree.tests",
|
|
||||||
zip_safe=False,
|
|
||||||
)
|
|
@@ -68,6 +68,7 @@ class Snode:
|
|||||||
self._impurity = impurity
|
self._impurity = impurity
|
||||||
self._partition_column: int = -1
|
self._partition_column: int = -1
|
||||||
self._scaler = scaler
|
self._scaler = scaler
|
||||||
|
self._proba = None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def copy(cls, node: "Snode") -> "Snode":
|
def copy(cls, node: "Snode") -> "Snode":
|
||||||
@@ -127,23 +128,22 @@ class Snode:
|
|||||||
def get_up(self) -> "Snode":
|
def get_up(self) -> "Snode":
|
||||||
return self._up
|
return self._up
|
||||||
|
|
||||||
def make_predictor(self):
|
def make_predictor(self, num_classes: int) -> None:
|
||||||
"""Compute the class of the predictor and its belief based on the
|
"""Compute the class of the predictor and its belief based on the
|
||||||
subdataset of the node only if it is a leaf
|
subdataset of the node only if it is a leaf
|
||||||
"""
|
"""
|
||||||
if not self.is_leaf():
|
if not self.is_leaf():
|
||||||
return
|
return
|
||||||
classes, card = np.unique(self._y, return_counts=True)
|
classes, card = np.unique(self._y, return_counts=True)
|
||||||
if len(classes) > 1:
|
self._proba = np.zeros((num_classes,), dtype=np.int64)
|
||||||
|
for c, n in zip(classes, card):
|
||||||
|
self._proba[c] = n
|
||||||
|
try:
|
||||||
max_card = max(card)
|
max_card = max(card)
|
||||||
self._class = classes[card == max_card][0]
|
self._class = classes[card == max_card][0]
|
||||||
self._belief = max_card / np.sum(card)
|
self._belief = max_card / np.sum(card)
|
||||||
else:
|
except ValueError:
|
||||||
self._belief = 1
|
self._class = None
|
||||||
try:
|
|
||||||
self._class = classes[0]
|
|
||||||
except IndexError:
|
|
||||||
self._class = None
|
|
||||||
|
|
||||||
def graph(self):
|
def graph(self):
|
||||||
"""
|
"""
|
||||||
@@ -155,7 +155,7 @@ class Snode:
|
|||||||
output += (
|
output += (
|
||||||
f'N{id(self)} [shape=box style=filled label="'
|
f'N{id(self)} [shape=box style=filled label="'
|
||||||
f"class={self._class} impurity={self._impurity:.3f} "
|
f"class={self._class} impurity={self._impurity:.3f} "
|
||||||
f'classes={count_values[0]} samples={count_values[1]}"];\n'
|
f'counts={self._proba}"];\n'
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
output += (
|
output += (
|
||||||
@@ -267,7 +267,6 @@ class Splitter:
|
|||||||
random_state=None,
|
random_state=None,
|
||||||
normalize=False,
|
normalize=False,
|
||||||
):
|
):
|
||||||
|
|
||||||
self._clf = clf
|
self._clf = clf
|
||||||
self._random_state = random_state
|
self._random_state = random_state
|
||||||
if random_state is not None:
|
if random_state is not None:
|
||||||
@@ -415,7 +414,8 @@ class Splitter:
|
|||||||
)
|
)
|
||||||
return tuple(
|
return tuple(
|
||||||
sorted(
|
sorted(
|
||||||
range(len(feature_list)), key=lambda sub: feature_list[sub]
|
range(len(feature_list)),
|
||||||
|
key=lambda sub: feature_list[sub],
|
||||||
)[-max_features:]
|
)[-max_features:]
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -530,7 +530,10 @@ class Splitter:
|
|||||||
return entropy
|
return entropy
|
||||||
|
|
||||||
def information_gain(
|
def information_gain(
|
||||||
self, labels: np.array, labels_up: np.array, labels_dn: np.array
|
self,
|
||||||
|
labels: np.array,
|
||||||
|
labels_up: np.array,
|
||||||
|
labels_dn: np.array,
|
||||||
) -> float:
|
) -> float:
|
||||||
"""Compute information gain of a split candidate
|
"""Compute information gain of a split candidate
|
||||||
|
|
||||||
@@ -743,7 +746,7 @@ class Splitter:
|
|||||||
Train time - True / Test time - False
|
Train time - True / Test time - False
|
||||||
"""
|
"""
|
||||||
# data contains the distances of every sample to every class hyperplane
|
# data contains the distances of every sample to every class hyperplane
|
||||||
# array of (m, nc) nc = # classes
|
# array of (m, nc) nc = k if ovr, nc = k*(k-1)/2 if ovo
|
||||||
data = self._distances(node, samples)
|
data = self._distances(node, samples)
|
||||||
if data.shape[0] < self._min_samples_split:
|
if data.shape[0] < self._min_samples_split:
|
||||||
# there aren't enough samples to split
|
# there aren't enough samples to split
|
||||||
|
191
stree/Strees.py
191
stree/Strees.py
@@ -139,7 +139,7 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
self,
|
self,
|
||||||
C: float = 1.0,
|
C: float = 1.0,
|
||||||
kernel: str = "linear",
|
kernel: str = "linear",
|
||||||
max_iter: int = 1e5,
|
max_iter: int = int(1e5),
|
||||||
random_state: int = None,
|
random_state: int = None,
|
||||||
max_depth: int = None,
|
max_depth: int = None,
|
||||||
tol: float = 1e-4,
|
tol: float = 1e-4,
|
||||||
@@ -153,7 +153,6 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
multiclass_strategy: str = "ovo",
|
multiclass_strategy: str = "ovo",
|
||||||
normalize: bool = False,
|
normalize: bool = False,
|
||||||
):
|
):
|
||||||
|
|
||||||
self.max_iter = max_iter
|
self.max_iter = max_iter
|
||||||
self.C = C
|
self.C = C
|
||||||
self.kernel = kernel
|
self.kernel = kernel
|
||||||
@@ -169,12 +168,18 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
self.splitter = splitter
|
self.splitter = splitter
|
||||||
self.normalize = normalize
|
self.normalize = normalize
|
||||||
self.multiclass_strategy = multiclass_strategy
|
self.multiclass_strategy = multiclass_strategy
|
||||||
|
self.depth_ = 0
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def version() -> str:
|
def version() -> str:
|
||||||
"""Return the version of the package."""
|
"""Return the version of the package."""
|
||||||
return __version__
|
return __version__
|
||||||
|
|
||||||
|
def __call__(self) -> None:
|
||||||
|
"""Only added to comply with scikit-learn base sestimator for
|
||||||
|
ensembles"""
|
||||||
|
pass
|
||||||
|
|
||||||
def _more_tags(self) -> dict:
|
def _more_tags(self) -> dict:
|
||||||
"""Required by sklearn to supply features of the classifier
|
"""Required by sklearn to supply features of the classifier
|
||||||
make mandatory the labels array
|
make mandatory the labels array
|
||||||
@@ -185,7 +190,10 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
return {"requires_y": True}
|
return {"requires_y": True}
|
||||||
|
|
||||||
def fit(
|
def fit(
|
||||||
self, X: np.ndarray, y: np.ndarray, sample_weight: np.array = None
|
self,
|
||||||
|
X: np.ndarray,
|
||||||
|
y: np.ndarray,
|
||||||
|
sample_weight: np.array = None,
|
||||||
) -> "Stree":
|
) -> "Stree":
|
||||||
"""Build the tree based on the dataset of samples and its labels
|
"""Build the tree based on the dataset of samples and its labels
|
||||||
|
|
||||||
@@ -314,7 +322,7 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
if np.unique(y).shape[0] == 1:
|
if np.unique(y).shape[0] == 1:
|
||||||
# only 1 class => pure dataset
|
# only 1 class => pure dataset
|
||||||
node.set_title(title + ", <pure>")
|
node.set_title(title + ", <pure>")
|
||||||
node.make_predictor()
|
node.make_predictor(self.n_classes_)
|
||||||
return node
|
return node
|
||||||
# Train the model
|
# Train the model
|
||||||
clf = self._build_clf()
|
clf = self._build_clf()
|
||||||
@@ -333,14 +341,18 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
if X_U is None or X_D is None:
|
if X_U is None or X_D is None:
|
||||||
# didn't part anything
|
# didn't part anything
|
||||||
node.set_title(title + ", <cgaf>")
|
node.set_title(title + ", <cgaf>")
|
||||||
node.make_predictor()
|
node.make_predictor(self.n_classes_)
|
||||||
return node
|
return node
|
||||||
node.set_up(
|
node.set_up(
|
||||||
self._train(X_U, y_u, sw_u, depth + 1, title + f" - Up({depth+1})")
|
self._train(X_U, y_u, sw_u, depth + 1, title + f" - Up({depth+1})")
|
||||||
)
|
)
|
||||||
node.set_down(
|
node.set_down(
|
||||||
self._train(
|
self._train(
|
||||||
X_D, y_d, sw_d, depth + 1, title + f" - Down({depth+1})"
|
X_D,
|
||||||
|
y_d,
|
||||||
|
sw_d,
|
||||||
|
depth + 1,
|
||||||
|
title + f" - Down({depth+1})",
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
return node
|
return node
|
||||||
@@ -367,28 +379,100 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
def __predict_class(self, X: np.array) -> np.array:
|
||||||
def _reorder_results(y: np.array, indices: np.array) -> np.array:
|
"""Compute the predicted class for the samples in X. Returns the number
|
||||||
"""Reorder an array based on the array of indices passed
|
of samples of each class in the corresponding leaf node.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
y : np.array
|
X : np.array
|
||||||
data untidy
|
Array of samples
|
||||||
indices : np.array
|
|
||||||
indices used to set order
|
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
np.array
|
np.array
|
||||||
array y ordered
|
Array of shape (n_samples, n_classes) with the number of samples
|
||||||
|
of each class in the corresponding leaf node
|
||||||
"""
|
"""
|
||||||
# return array of same type given in y
|
|
||||||
y_ordered = y.copy()
|
def compute_prediction(xp, indices, node):
|
||||||
indices = indices.astype(int)
|
if xp is None:
|
||||||
for i, index in enumerate(indices):
|
return
|
||||||
y_ordered[index] = y[i]
|
if node.is_leaf():
|
||||||
return y_ordered
|
# set a class for indices
|
||||||
|
result[indices] = node._proba
|
||||||
|
return
|
||||||
|
self.splitter_.partition(xp, node, train=False)
|
||||||
|
x_u, x_d = self.splitter_.part(xp)
|
||||||
|
i_u, i_d = self.splitter_.part(indices)
|
||||||
|
compute_prediction(x_u, i_u, node.get_up())
|
||||||
|
compute_prediction(x_d, i_d, node.get_down())
|
||||||
|
|
||||||
|
# setup prediction & make it happen
|
||||||
|
result = np.zeros((X.shape[0], self.n_classes_))
|
||||||
|
indices = np.arange(X.shape[0])
|
||||||
|
compute_prediction(X, indices, self.tree_)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def check_predict(self, X) -> np.array:
|
||||||
|
"""Checks predict and predict_proba preconditions. If input X is not an
|
||||||
|
np.array convert it to one.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
X : np.ndarray
|
||||||
|
Array of samples
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
np.array
|
||||||
|
Array of samples
|
||||||
|
|
||||||
|
Raises
|
||||||
|
------
|
||||||
|
ValueError
|
||||||
|
If number of features of X is different of the number of features
|
||||||
|
in training data
|
||||||
|
"""
|
||||||
|
check_is_fitted(self, ["tree_"])
|
||||||
|
# Input validation
|
||||||
|
X = check_array(X)
|
||||||
|
if X.shape[1] != self.n_features_:
|
||||||
|
raise ValueError(
|
||||||
|
f"Expected {self.n_features_} features but got "
|
||||||
|
f"({X.shape[1]})"
|
||||||
|
)
|
||||||
|
return X
|
||||||
|
|
||||||
|
def predict_proba(self, X: np.array) -> np.array:
|
||||||
|
"""Predict class probabilities of the input samples X.
|
||||||
|
|
||||||
|
The predicted class probability is the fraction of samples of the same
|
||||||
|
class in a leaf.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
X : dataset of samples.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
proba : array of shape (n_samples, n_classes)
|
||||||
|
The class probabilities of the input samples.
|
||||||
|
|
||||||
|
Raises
|
||||||
|
------
|
||||||
|
ValueError
|
||||||
|
if dataset with inconsistent number of features
|
||||||
|
NotFittedError
|
||||||
|
if model is not fitted
|
||||||
|
"""
|
||||||
|
|
||||||
|
X = self.check_predict(X)
|
||||||
|
# return # of samples of each class in leaf node
|
||||||
|
values = self.__predict_class(X)
|
||||||
|
normalizer = values.sum(axis=1)[:, np.newaxis]
|
||||||
|
normalizer[normalizer == 0.0] = 1.0
|
||||||
|
return values / normalizer
|
||||||
|
|
||||||
def predict(self, X: np.array) -> np.array:
|
def predict(self, X: np.array) -> np.array:
|
||||||
"""Predict labels for each sample in dataset passed
|
"""Predict labels for each sample in dataset passed
|
||||||
@@ -410,40 +494,45 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
NotFittedError
|
NotFittedError
|
||||||
if model is not fitted
|
if model is not fitted
|
||||||
"""
|
"""
|
||||||
|
X = self.check_predict(X)
|
||||||
|
return self.classes_[np.argmax(self.__predict_class(X), axis=1)]
|
||||||
|
|
||||||
def predict_class(
|
def get_nodes(self) -> int:
|
||||||
xp: np.array, indices: np.array, node: Snode
|
"""Return the number of nodes in the tree
|
||||||
) -> np.array:
|
|
||||||
if xp is None:
|
Returns
|
||||||
return [], []
|
-------
|
||||||
|
int
|
||||||
|
number of nodes
|
||||||
|
"""
|
||||||
|
nodes = 0
|
||||||
|
for _ in self:
|
||||||
|
nodes += 1
|
||||||
|
return nodes
|
||||||
|
|
||||||
|
def get_leaves(self) -> int:
|
||||||
|
"""Return the number of leaves in the tree
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
int
|
||||||
|
number of leaves
|
||||||
|
"""
|
||||||
|
leaves = 0
|
||||||
|
for node in self:
|
||||||
if node.is_leaf():
|
if node.is_leaf():
|
||||||
# set a class for every sample in dataset
|
leaves += 1
|
||||||
prediction = np.full((xp.shape[0], 1), node._class)
|
return leaves
|
||||||
return prediction, indices
|
|
||||||
self.splitter_.partition(xp, node, train=False)
|
|
||||||
x_u, x_d = self.splitter_.part(xp)
|
|
||||||
i_u, i_d = self.splitter_.part(indices)
|
|
||||||
prx_u, prin_u = predict_class(x_u, i_u, node.get_up())
|
|
||||||
prx_d, prin_d = predict_class(x_d, i_d, node.get_down())
|
|
||||||
return np.append(prx_u, prx_d), np.append(prin_u, prin_d)
|
|
||||||
|
|
||||||
# sklearn check
|
def get_depth(self) -> int:
|
||||||
check_is_fitted(self, ["tree_"])
|
"""Return the depth of the tree
|
||||||
# Input validation
|
|
||||||
X = check_array(X)
|
Returns
|
||||||
if X.shape[1] != self.n_features_:
|
-------
|
||||||
raise ValueError(
|
int
|
||||||
f"Expected {self.n_features_} features but got "
|
depth of the tree
|
||||||
f"({X.shape[1]})"
|
"""
|
||||||
)
|
return self.depth_
|
||||||
# setup prediction & make it happen
|
|
||||||
indices = np.arange(X.shape[0])
|
|
||||||
result = (
|
|
||||||
self._reorder_results(*predict_class(X, indices, self.tree_))
|
|
||||||
.astype(int)
|
|
||||||
.ravel()
|
|
||||||
)
|
|
||||||
return self.classes_[result]
|
|
||||||
|
|
||||||
def nodes_leaves(self) -> tuple:
|
def nodes_leaves(self) -> tuple:
|
||||||
"""Compute the number of nodes and leaves in the built tree
|
"""Compute the number of nodes and leaves in the built tree
|
||||||
|
@@ -1,8 +1,9 @@
|
|||||||
from .Strees import Stree, Siterator
|
from .Strees import Stree, Siterator
|
||||||
|
from ._version import __version__
|
||||||
|
|
||||||
__author__ = "Ricardo Montañana Gómez"
|
__author__ = "Ricardo Montañana Gómez"
|
||||||
__copyright__ = "Copyright 2020-2021, Ricardo Montañana Gómez"
|
__copyright__ = "Copyright 2020-2021, Ricardo Montañana Gómez"
|
||||||
__license__ = "MIT License"
|
__license__ = "MIT License"
|
||||||
__author_email__ = "ricardo.montanana@alu.uclm.es"
|
__author_email__ = "ricardo.montanana@alu.uclm.es"
|
||||||
|
|
||||||
__all__ = ["Stree", "Siterator"]
|
__all__ = ["__version__", "Stree", "Siterator"]
|
||||||
|
@@ -1 +1 @@
|
|||||||
__version__ = "1.2.4"
|
__version__ = "1.4.0"
|
||||||
|
@@ -67,10 +67,28 @@ class Snode_test(unittest.TestCase):
|
|||||||
|
|
||||||
def test_make_predictor_on_leaf(self):
|
def test_make_predictor_on_leaf(self):
|
||||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||||
test.make_predictor()
|
test.make_predictor(2)
|
||||||
self.assertEqual(1, test._class)
|
self.assertEqual(1, test._class)
|
||||||
self.assertEqual(0.75, test._belief)
|
self.assertEqual(0.75, test._belief)
|
||||||
self.assertEqual(-1, test._partition_column)
|
self.assertEqual(-1, test._partition_column)
|
||||||
|
self.assertListEqual([1, 3], test._proba.tolist())
|
||||||
|
|
||||||
|
def test_make_predictor_on_not_leaf(self):
|
||||||
|
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||||
|
test.set_up(Snode(None, [1], [1], [], 0.0, "another_test"))
|
||||||
|
test.make_predictor(2)
|
||||||
|
self.assertIsNone(test._class)
|
||||||
|
self.assertEqual(0, test._belief)
|
||||||
|
self.assertEqual(-1, test._partition_column)
|
||||||
|
self.assertEqual(-1, test.get_up()._partition_column)
|
||||||
|
self.assertIsNone(test._proba)
|
||||||
|
|
||||||
|
def test_make_predictor_on_leaf_bogus_data(self):
|
||||||
|
test = Snode(None, [1, 2, 3, 4], [], [], 0.0, "test")
|
||||||
|
test.make_predictor(2)
|
||||||
|
self.assertIsNone(test._class)
|
||||||
|
self.assertEqual(-1, test._partition_column)
|
||||||
|
self.assertListEqual([0, 0], test._proba.tolist())
|
||||||
|
|
||||||
def test_set_title(self):
|
def test_set_title(self):
|
||||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||||
@@ -97,21 +115,6 @@ class Snode_test(unittest.TestCase):
|
|||||||
test.set_features([1, 2])
|
test.set_features([1, 2])
|
||||||
self.assertListEqual([1, 2], test.get_features())
|
self.assertListEqual([1, 2], test.get_features())
|
||||||
|
|
||||||
def test_make_predictor_on_not_leaf(self):
|
|
||||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
|
||||||
test.set_up(Snode(None, [1], [1], [], 0.0, "another_test"))
|
|
||||||
test.make_predictor()
|
|
||||||
self.assertIsNone(test._class)
|
|
||||||
self.assertEqual(0, test._belief)
|
|
||||||
self.assertEqual(-1, test._partition_column)
|
|
||||||
self.assertEqual(-1, test.get_up()._partition_column)
|
|
||||||
|
|
||||||
def test_make_predictor_on_leaf_bogus_data(self):
|
|
||||||
test = Snode(None, [1, 2, 3, 4], [], [], 0.0, "test")
|
|
||||||
test.make_predictor()
|
|
||||||
self.assertIsNone(test._class)
|
|
||||||
self.assertEqual(-1, test._partition_column)
|
|
||||||
|
|
||||||
def test_copy_node(self):
|
def test_copy_node(self):
|
||||||
px = [1, 2, 3, 4]
|
px = [1, 2, 3, 4]
|
||||||
py = [1]
|
py = [1]
|
||||||
|
@@ -115,6 +115,38 @@ class Stree_test(unittest.TestCase):
|
|||||||
yp = clf.fit(X, y).predict(X[:num, :])
|
yp = clf.fit(X, y).predict(X[:num, :])
|
||||||
self.assertListEqual(y[:num].tolist(), yp.tolist())
|
self.assertListEqual(y[:num].tolist(), yp.tolist())
|
||||||
|
|
||||||
|
def test_multiple_predict_proba(self):
|
||||||
|
expected = {
|
||||||
|
"liblinear": {
|
||||||
|
0: [0.02401129943502825, 0.9759887005649718],
|
||||||
|
17: [0.9282970550576184, 0.07170294494238157],
|
||||||
|
},
|
||||||
|
"linear": {
|
||||||
|
0: [0.029329608938547486, 0.9706703910614525],
|
||||||
|
17: [0.9298469387755102, 0.07015306122448979],
|
||||||
|
},
|
||||||
|
"rbf": {
|
||||||
|
0: [0.023448275862068966, 0.976551724137931],
|
||||||
|
17: [0.9458064516129032, 0.05419354838709677],
|
||||||
|
},
|
||||||
|
"poly": {
|
||||||
|
0: [0.01601164483260553, 0.9839883551673945],
|
||||||
|
17: [0.9089790897908979, 0.0910209102091021],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
indices = [0, 17]
|
||||||
|
X, y = load_dataset(self._random_state)
|
||||||
|
for kernel in ["liblinear", "linear", "rbf", "poly"]:
|
||||||
|
clf = Stree(
|
||||||
|
kernel=kernel,
|
||||||
|
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
|
||||||
|
random_state=self._random_state,
|
||||||
|
)
|
||||||
|
yp = clf.fit(X, y).predict_proba(X)
|
||||||
|
for index in indices:
|
||||||
|
for exp, comp in zip(expected[kernel][index], yp[index]):
|
||||||
|
self.assertAlmostEqual(exp, comp)
|
||||||
|
|
||||||
def test_single_vs_multiple_prediction(self):
|
def test_single_vs_multiple_prediction(self):
|
||||||
"""Check if predicting sample by sample gives the same result as
|
"""Check if predicting sample by sample gives the same result as
|
||||||
predicting all samples at once
|
predicting all samples at once
|
||||||
@@ -207,6 +239,7 @@ class Stree_test(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
tcl.fit(*load_dataset(self._random_state))
|
tcl.fit(*load_dataset(self._random_state))
|
||||||
self.assertEqual(depth, tcl.depth_)
|
self.assertEqual(depth, tcl.depth_)
|
||||||
|
self.assertEqual(depth, tcl.get_depth())
|
||||||
|
|
||||||
def test_unfitted_tree_is_iterable(self):
|
def test_unfitted_tree_is_iterable(self):
|
||||||
tcl = Stree()
|
tcl = Stree()
|
||||||
@@ -256,12 +289,12 @@ class Stree_test(unittest.TestCase):
|
|||||||
"impurity sigmoid": 0.824,
|
"impurity sigmoid": 0.824,
|
||||||
},
|
},
|
||||||
"Iris": {
|
"Iris": {
|
||||||
"max_samples liblinear": 0.9550561797752809,
|
"max_samples liblinear": 0.9887640449438202,
|
||||||
"max_samples linear": 1.0,
|
"max_samples linear": 1.0,
|
||||||
"max_samples rbf": 0.6685393258426966,
|
"max_samples rbf": 0.6685393258426966,
|
||||||
"max_samples poly": 0.6853932584269663,
|
"max_samples poly": 0.6853932584269663,
|
||||||
"max_samples sigmoid": 0.6404494382022472,
|
"max_samples sigmoid": 0.6404494382022472,
|
||||||
"impurity liblinear": 0.9550561797752809,
|
"impurity liblinear": 0.9887640449438202,
|
||||||
"impurity linear": 1.0,
|
"impurity linear": 1.0,
|
||||||
"impurity rbf": 0.6685393258426966,
|
"impurity rbf": 0.6685393258426966,
|
||||||
"impurity poly": 0.6853932584269663,
|
"impurity poly": 0.6853932584269663,
|
||||||
@@ -274,10 +307,10 @@ class Stree_test(unittest.TestCase):
|
|||||||
for criteria in ["max_samples", "impurity"]:
|
for criteria in ["max_samples", "impurity"]:
|
||||||
for kernel in self._kernels:
|
for kernel in self._kernels:
|
||||||
clf = Stree(
|
clf = Stree(
|
||||||
max_iter=1e4,
|
max_iter=int(1e4),
|
||||||
multiclass_strategy="ovr"
|
multiclass_strategy=(
|
||||||
if kernel == "liblinear"
|
"ovr" if kernel == "liblinear" else "ovo"
|
||||||
else "ovo",
|
),
|
||||||
kernel=kernel,
|
kernel=kernel,
|
||||||
random_state=self._random_state,
|
random_state=self._random_state,
|
||||||
)
|
)
|
||||||
@@ -407,10 +440,10 @@ class Stree_test(unittest.TestCase):
|
|||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
score = clf.score(X, y)
|
score = clf.score(X, y)
|
||||||
# Check accuracy of the whole model
|
# Check accuracy of the whole model
|
||||||
self.assertAlmostEquals(0.98, score, 5)
|
self.assertAlmostEqual(0.98, score, 5)
|
||||||
svm = LinearSVC(random_state=0)
|
svm = LinearSVC(random_state=0)
|
||||||
svm.fit(X, y)
|
svm.fit(X, y)
|
||||||
self.assertAlmostEquals(0.9666666666666667, svm.score(X, y), 5)
|
self.assertAlmostEqual(0.9666666666666667, svm.score(X, y), 5)
|
||||||
data = svm.decision_function(X)
|
data = svm.decision_function(X)
|
||||||
expected = [
|
expected = [
|
||||||
0.4444444444444444,
|
0.4444444444444444,
|
||||||
@@ -422,7 +455,7 @@ class Stree_test(unittest.TestCase):
|
|||||||
ty[data > 0] = 1
|
ty[data > 0] = 1
|
||||||
ty = ty.astype(int)
|
ty = ty.astype(int)
|
||||||
for i in range(3):
|
for i in range(3):
|
||||||
self.assertAlmostEquals(
|
self.assertAlmostEqual(
|
||||||
expected[i],
|
expected[i],
|
||||||
clf.splitter_._gini(ty[:, i]),
|
clf.splitter_._gini(ty[:, i]),
|
||||||
)
|
)
|
||||||
@@ -560,7 +593,7 @@ class Stree_test(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
self.assertEqual(0.9526666666666667, clf2.fit(X, y).score(X, y))
|
self.assertEqual(0.9526666666666667, clf2.fit(X, y).score(X, y))
|
||||||
X, y = load_wine(return_X_y=True)
|
X, y = load_wine(return_X_y=True)
|
||||||
self.assertEqual(0.9831460674157303, clf.fit(X, y).score(X, y))
|
self.assertEqual(0.9887640449438202, clf.fit(X, y).score(X, y))
|
||||||
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
||||||
|
|
||||||
def test_zero_all_sample_weights(self):
|
def test_zero_all_sample_weights(self):
|
||||||
@@ -608,10 +641,12 @@ class Stree_test(unittest.TestCase):
|
|||||||
clf = Stree(random_state=self._random_state)
|
clf = Stree(random_state=self._random_state)
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
self.assertEqual(6, clf.depth_)
|
self.assertEqual(6, clf.depth_)
|
||||||
|
self.assertEqual(6, clf.get_depth())
|
||||||
X, y = load_wine(return_X_y=True)
|
X, y = load_wine(return_X_y=True)
|
||||||
clf = Stree(random_state=self._random_state)
|
clf = Stree(random_state=self._random_state)
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
self.assertEqual(4, clf.depth_)
|
self.assertEqual(4, clf.depth_)
|
||||||
|
self.assertEqual(4, clf.get_depth())
|
||||||
|
|
||||||
def test_nodes_leaves(self):
|
def test_nodes_leaves(self):
|
||||||
"""Check number of nodes and leaves."""
|
"""Check number of nodes and leaves."""
|
||||||
@@ -625,13 +660,17 @@ class Stree_test(unittest.TestCase):
|
|||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
nodes, leaves = clf.nodes_leaves()
|
nodes, leaves = clf.nodes_leaves()
|
||||||
self.assertEqual(31, nodes)
|
self.assertEqual(31, nodes)
|
||||||
|
self.assertEqual(31, clf.get_nodes())
|
||||||
self.assertEqual(16, leaves)
|
self.assertEqual(16, leaves)
|
||||||
|
self.assertEqual(16, clf.get_leaves())
|
||||||
X, y = load_wine(return_X_y=True)
|
X, y = load_wine(return_X_y=True)
|
||||||
clf = Stree(random_state=self._random_state)
|
clf = Stree(random_state=self._random_state)
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
nodes, leaves = clf.nodes_leaves()
|
nodes, leaves = clf.nodes_leaves()
|
||||||
self.assertEqual(11, nodes)
|
self.assertEqual(11, nodes)
|
||||||
|
self.assertEqual(11, clf.get_nodes())
|
||||||
self.assertEqual(6, leaves)
|
self.assertEqual(6, leaves)
|
||||||
|
self.assertEqual(6, clf.get_leaves())
|
||||||
|
|
||||||
def test_nodes_leaves_artificial(self):
|
def test_nodes_leaves_artificial(self):
|
||||||
"""Check leaves of artificial dataset."""
|
"""Check leaves of artificial dataset."""
|
||||||
@@ -650,7 +689,9 @@ class Stree_test(unittest.TestCase):
|
|||||||
clf.tree_ = n1
|
clf.tree_ = n1
|
||||||
nodes, leaves = clf.nodes_leaves()
|
nodes, leaves = clf.nodes_leaves()
|
||||||
self.assertEqual(6, nodes)
|
self.assertEqual(6, nodes)
|
||||||
|
self.assertEqual(6, clf.get_nodes())
|
||||||
self.assertEqual(2, leaves)
|
self.assertEqual(2, leaves)
|
||||||
|
self.assertEqual(2, clf.get_leaves())
|
||||||
|
|
||||||
def test_bogus_multiclass_strategy(self):
|
def test_bogus_multiclass_strategy(self):
|
||||||
"""Check invalid multiclass strategy."""
|
"""Check invalid multiclass strategy."""
|
||||||
@@ -684,6 +725,11 @@ class Stree_test(unittest.TestCase):
|
|||||||
clf = Stree()
|
clf = Stree()
|
||||||
self.assertEqual(__version__, clf.version())
|
self.assertEqual(__version__, clf.version())
|
||||||
|
|
||||||
|
def test_call(self) -> None:
|
||||||
|
"""Check call method."""
|
||||||
|
clf = Stree()
|
||||||
|
self.assertIsNone(clf())
|
||||||
|
|
||||||
def test_graph(self):
|
def test_graph(self):
|
||||||
"""Check graphviz representation of the tree."""
|
"""Check graphviz representation of the tree."""
|
||||||
X, y = load_wine(return_X_y=True)
|
X, y = load_wine(return_X_y=True)
|
||||||
@@ -695,7 +741,7 @@ class Stree_test(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
expected_tail = (
|
expected_tail = (
|
||||||
' [shape=box style=filled label="class=1 impurity=0.000 '
|
' [shape=box style=filled label="class=1 impurity=0.000 '
|
||||||
'classes=[1] samples=[1]"];\n}\n'
|
'counts=[0 1 0]"];\n}\n'
|
||||||
)
|
)
|
||||||
self.assertEqual(clf.graph(), expected_head + "}\n")
|
self.assertEqual(clf.graph(), expected_head + "}\n")
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
@@ -715,7 +761,7 @@ class Stree_test(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
expected_tail = (
|
expected_tail = (
|
||||||
' [shape=box style=filled label="class=1 impurity=0.000 '
|
' [shape=box style=filled label="class=1 impurity=0.000 '
|
||||||
'classes=[1] samples=[1]"];\n}\n'
|
'counts=[0 1 0]"];\n}\n'
|
||||||
)
|
)
|
||||||
self.assertEqual(clf.graph("Sample title"), expected_head + "}\n")
|
self.assertEqual(clf.graph("Sample title"), expected_head + "}\n")
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
|
Reference in New Issue
Block a user