mirror of
https://github.com/Doctorado-ML/STree.git
synced 2025-08-17 16:36:01 +00:00
Compare commits
55 Commits
enhance-pa
...
v1.3.2
Author | SHA1 | Date | |
---|---|---|---|
5f8ca8f3bb
|
|||
|
fb8b9b344f | ||
036d1ba2a7
|
|||
4de74973b8
|
|||
|
28dd04b95a | ||
|
542bbce7db
|
||
|
5b791bc5bf | ||
|
c37f044e3a | ||
|
2f6ae648a1 | ||
|
93be8a89a8 | ||
82838fa3e0
|
|||
f0b2ce3c7b
|
|||
00ed57c015
|
|||
|
08222f109e | ||
cc931d8547
|
|||
b044a057df
|
|||
fc48bc8ba4
|
|||
|
8251f07674 | ||
|
0b15a5af11 | ||
|
28d905368b | ||
e5d49132ec
|
|||
8daecc4726
|
|||
|
bf678df159 | ||
|
36b08b1bcf | ||
36ff3da26d
|
|||
|
6b281ebcc8 | ||
|
3aaddd096f | ||
|
15a5a4c407 | ||
|
0afe14a447 | ||
|
fc9b7b5c92 | ||
|
3f79d2877f | ||
ecc2800705
|
|||
0524d47d64
|
|||
d46f544466
|
|||
79190ef2e1
|
|||
|
4f04e72670 | ||
5cef0f4875
|
|||
28c7558f01
|
|||
|
e19d10f6a7 | ||
|
02de394c96 | ||
|
a4aac9d310 | ||
|
8a18c998df | ||
b55f59a3ec
|
|||
783d105099
|
|||
c36f685263
|
|||
0f89b044f1
|
|||
|
6ba973dfe1 | ||
|
460c63a6d0 | ||
|
f438124057 | ||
|
147dad684c | ||
|
3bdac9bd60 | ||
|
e4ac5075e5 | ||
|
36816074ff | ||
475ad7e752
|
|||
|
1c869e154e |
56
.github/workflows/codeql-analysis.yml
vendored
Normal file
56
.github/workflows/codeql-analysis.yml
vendored
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
name: "CodeQL"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [master]
|
||||||
|
pull_request:
|
||||||
|
# The branches below must be a subset of the branches above
|
||||||
|
branches: [master]
|
||||||
|
schedule:
|
||||||
|
- cron: "16 17 * * 3"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
name: Analyze
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
language: ["python"]
|
||||||
|
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||||
|
# Learn more:
|
||||||
|
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
# Initializes the CodeQL tools for scanning.
|
||||||
|
- name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v2
|
||||||
|
with:
|
||||||
|
languages: ${{ matrix.language }}
|
||||||
|
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||||
|
# By default, queries listed here will override any specified in a config file.
|
||||||
|
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||||
|
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||||
|
|
||||||
|
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||||
|
# If this step fails, then you should remove it and run the build manually (see below)
|
||||||
|
- name: Autobuild
|
||||||
|
uses: github/codeql-action/autobuild@v2
|
||||||
|
|
||||||
|
# ℹ️ Command-line programs to run using the OS shell.
|
||||||
|
# 📚 https://git.io/JvXDl
|
||||||
|
|
||||||
|
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||||
|
# and modify them (or add more) to build your code if your project
|
||||||
|
# uses a compiled language
|
||||||
|
|
||||||
|
#- run: |
|
||||||
|
# make bootstrap
|
||||||
|
# make release
|
||||||
|
|
||||||
|
- name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@v2
|
47
.github/workflows/main.yml
vendored
Normal file
47
.github/workflows/main.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
name: CI
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [master]
|
||||||
|
pull_request:
|
||||||
|
branches: [master]
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [macos-latest, ubuntu-latest, windows-latest]
|
||||||
|
python: [3.8, "3.10"]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- name: Set up Python ${{ matrix.python }}
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python }}
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
pip install -q --upgrade pip
|
||||||
|
pip install -q -r requirements.txt
|
||||||
|
pip install -q --upgrade codecov coverage black flake8 codacy-coverage
|
||||||
|
- name: Lint
|
||||||
|
run: |
|
||||||
|
black --check --diff stree
|
||||||
|
flake8 --count stree
|
||||||
|
- name: Tests
|
||||||
|
run: |
|
||||||
|
coverage run -m unittest -v stree.tests
|
||||||
|
coverage xml
|
||||||
|
- name: Upload coverage to Codecov
|
||||||
|
uses: codecov/codecov-action@v3
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
files: ./coverage.xml
|
||||||
|
- name: Run codacy-coverage-reporter
|
||||||
|
if: runner.os == 'Linux'
|
||||||
|
uses: codacy/codacy-coverage-reporter-action@master
|
||||||
|
with:
|
||||||
|
project-token: ${{ secrets.CODACY_PROJECT_TOKEN }}
|
||||||
|
coverage-reports: coverage.xml
|
3
.gitignore
vendored
3
.gitignore
vendored
@@ -132,4 +132,5 @@ dmypy.json
|
|||||||
.vscode
|
.vscode
|
||||||
.pre-commit-config.yaml
|
.pre-commit-config.yaml
|
||||||
|
|
||||||
**.csv
|
**.csv
|
||||||
|
.virtual_documents
|
37
CITATION.cff
Normal file
37
CITATION.cff
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
cff-version: 1.2.0
|
||||||
|
message: "If you use this software, please cite it as below."
|
||||||
|
authors:
|
||||||
|
- family-names: "Montañana"
|
||||||
|
given-names: "Ricardo"
|
||||||
|
orcid: "https://orcid.org/0000-0003-3242-5452"
|
||||||
|
- family-names: "Gámez"
|
||||||
|
given-names: "José A."
|
||||||
|
orcid: "https://orcid.org/0000-0003-1188-1117"
|
||||||
|
- family-names: "Puerta"
|
||||||
|
given-names: "José M."
|
||||||
|
orcid: "https://orcid.org/0000-0002-9164-5191"
|
||||||
|
title: "STree"
|
||||||
|
version: 1.2.3
|
||||||
|
doi: 10.5281/zenodo.5504083
|
||||||
|
date-released: 2021-11-02
|
||||||
|
url: "https://github.com/Doctorado-ML/STree"
|
||||||
|
preferred-citation:
|
||||||
|
type: article
|
||||||
|
authors:
|
||||||
|
- family-names: "Montañana"
|
||||||
|
given-names: "Ricardo"
|
||||||
|
orcid: "https://orcid.org/0000-0003-3242-5452"
|
||||||
|
- family-names: "Gámez"
|
||||||
|
given-names: "José A."
|
||||||
|
orcid: "https://orcid.org/0000-0003-1188-1117"
|
||||||
|
- family-names: "Puerta"
|
||||||
|
given-names: "José M."
|
||||||
|
orcid: "https://orcid.org/0000-0002-9164-5191"
|
||||||
|
doi: "10.1007/978-3-030-85713-4_6"
|
||||||
|
journal: "Lecture Notes in Computer Science"
|
||||||
|
month: 9
|
||||||
|
start: 54
|
||||||
|
end: 64
|
||||||
|
title: "STree: A Single Multi-class Oblique Decision Tree Based on Support Vector Machines"
|
||||||
|
volume: 12882
|
||||||
|
year: 2021
|
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2020 Doctorado-ML
|
Copyright (c) 2020-2021, Ricardo Montañana Gómez
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
56
Makefile
Normal file
56
Makefile
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
SHELL := /bin/bash
|
||||||
|
.DEFAULT_GOAL := help
|
||||||
|
.PHONY: coverage deps help lint push test doc build
|
||||||
|
|
||||||
|
coverage: ## Run tests with coverage
|
||||||
|
coverage erase
|
||||||
|
coverage run -m unittest -v stree.tests
|
||||||
|
coverage report -m
|
||||||
|
|
||||||
|
deps: ## Install dependencies
|
||||||
|
pip install -r requirements.txt
|
||||||
|
|
||||||
|
devdeps: ## Install development dependencies
|
||||||
|
pip install black pip-audit flake8 mypy coverage
|
||||||
|
|
||||||
|
lint: ## Lint and static-check
|
||||||
|
black stree
|
||||||
|
flake8 stree
|
||||||
|
mypy stree
|
||||||
|
|
||||||
|
push: ## Push code with tags
|
||||||
|
git push && git push --tags
|
||||||
|
|
||||||
|
test: ## Run tests
|
||||||
|
python -m unittest -v stree.tests
|
||||||
|
|
||||||
|
doc: ## Update documentation
|
||||||
|
make -C docs --makefile=Makefile html
|
||||||
|
|
||||||
|
build: ## Build package
|
||||||
|
rm -fr dist/*
|
||||||
|
rm -fr build/*
|
||||||
|
python setup.py sdist bdist_wheel
|
||||||
|
|
||||||
|
doc-clean: ## Update documentation
|
||||||
|
make -C docs --makefile=Makefile clean
|
||||||
|
|
||||||
|
audit: ## Audit pip
|
||||||
|
pip-audit
|
||||||
|
|
||||||
|
help: ## Show help message
|
||||||
|
@IFS=$$'\n' ; \
|
||||||
|
help_lines=(`fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##/:/'`); \
|
||||||
|
printf "%s\n\n" "Usage: make [task]"; \
|
||||||
|
printf "%-20s %s\n" "task" "help" ; \
|
||||||
|
printf "%-20s %s\n" "------" "----" ; \
|
||||||
|
for help_line in $${help_lines[@]}; do \
|
||||||
|
IFS=$$':' ; \
|
||||||
|
help_split=($$help_line) ; \
|
||||||
|
help_command=`echo $${help_split[0]} | sed -e 's/^ *//' -e 's/ *$$//'` ; \
|
||||||
|
help_info=`echo $${help_split[2]} | sed -e 's/^ *//' -e 's/ *$$//'` ; \
|
||||||
|
printf '\033[36m'; \
|
||||||
|
printf "%-20s %s" $$help_command ; \
|
||||||
|
printf '\033[0m'; \
|
||||||
|
printf "%s\n" $$help_info; \
|
||||||
|
done
|
65
README.md
65
README.md
@@ -1,8 +1,12 @@
|
|||||||
[](https://app.codeship.com/projects/399170)
|

|
||||||
|
[](https://github.com/Doctorado-ML/STree/actions/workflows/codeql-analysis.yml)
|
||||||
[](https://codecov.io/gh/doctorado-ml/stree)
|
[](https://codecov.io/gh/doctorado-ml/stree)
|
||||||
[](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
|
[](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
|
||||||
|
[](https://badge.fury.io/py/STree)
|
||||||
|

|
||||||
|
[](https://zenodo.org/badge/latestdoi/262658230)
|
||||||
|
|
||||||
# Stree
|
# STree
|
||||||
|
|
||||||
Oblique Tree classifier based on SVM nodes. The nodes are built and splitted with sklearn SVC models. Stree is a sklearn estimator and can be integrated in pipelines, grid searches, etc.
|
Oblique Tree classifier based on SVM nodes. The nodes are built and splitted with sklearn SVC models. Stree is a sklearn estimator and can be integrated in pipelines, grid searches, etc.
|
||||||
|
|
||||||
@@ -11,33 +15,66 @@ Oblique Tree classifier based on SVM nodes. The nodes are built and splitted wit
|
|||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install git+https://github.com/doctorado-ml/stree
|
pip install Stree
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
Can be found in [stree.readthedocs.io](https://stree.readthedocs.io/en/stable/)
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
### Jupyter notebooks
|
### Jupyter notebooks
|
||||||
|
|
||||||
* [](https://mybinder.org/v2/gh/Doctorado-ML/STree/master?urlpath=lab/tree/notebooks/benchmark.ipynb) Benchmark
|
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/benchmark.ipynb) Benchmark
|
||||||
|
|
||||||
* [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/benchmark.ipynb) Benchmark
|
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/features.ipynb) Some features
|
||||||
|
|
||||||
* [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/features.ipynb) Test features
|
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/gridsearch.ipynb) Gridsearch
|
||||||
|
|
||||||
* [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/adaboost.ipynb) Adaboost
|
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/ensemble.ipynb) Ensembles
|
||||||
|
|
||||||
* [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/gridsearch.ipynb) Gridsearch
|
## Hyperparameters
|
||||||
|
|
||||||
* [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/test_graphs.ipynb) Test Graphics
|
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
|
||||||
|
| --- | ------------------- | -------------------------------------------------------------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
|
||||||
|
| \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of ‘liblinear’, ‘linear’, ‘poly’ or ‘rbf’. liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library |
|
||||||
|
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
|
||||||
|
| \* | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls |
|
||||||
|
| | max_depth | \<int\> | None | Specifies the maximum depth of the tree |
|
||||||
|
| \* | tol | \<float\> | 1e-4 | Tolerance for stopping criterion. |
|
||||||
|
| \* | degree | \<int\> | 3 | Degree of the polynomial kernel function (‘poly’). Ignored by all other kernels. |
|
||||||
|
| \* | gamma | {"scale", "auto"} or \<float\> | scale | Kernel coefficient for ‘rbf’, ‘poly’ and ‘sigmoid’.<br>if gamma='scale' (default) is passed then it uses 1 / (n_features \* X.var()) as value of gamma,<br>if ‘auto’, uses 1 / n_features. |
|
||||||
|
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\*. max_samples is incompatible with 'ovo' multiclass_strategy |
|
||||||
|
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
|
||||||
|
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
|
||||||
|
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
|
||||||
|
| | splitter | {"best", "random", "trandom", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features).
|
||||||
|
Supported strategies are: **“best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **“random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them. **“trandom”**: The algorithm generates only one random combination. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label. **"cfs"**: Apply Correlation-based Feature Selection. **"fcbf"**: Apply Fast Correlation-Based Filter. **"iwss"**: IWSS based algorithm |
|
||||||
|
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
|
||||||
|
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest |
|
||||||
|
|
||||||
### Command line
|
\* Hyperparameter used by the support vector classifier of every node
|
||||||
|
|
||||||
```bash
|
\*\* **Splitting in a STree node**
|
||||||
python main.py
|
|
||||||
```
|
The decision function is applied to the dataset and distances from samples to hyperplanes are computed in a matrix. This matrix has as many columns as classes the samples belongs to (if more than two, i.e. multiclass classification) or 1 column if it's a binary class dataset. In binary classification only one hyperplane is computed and therefore only one column is needed to store the distances of the samples to it. If three or more classes are present in the dataset we need as many hyperplanes as classes are there, and therefore one column per hyperplane is needed.
|
||||||
|
|
||||||
|
In case of multiclass classification we have to decide which column take into account to make the split, that depends on hyperparameter _split_criteria_, if "impurity" is chosen then STree computes information gain of every split candidate using each column and chooses the one that maximize the information gain, otherwise STree choses the column with more samples with a predicted class (the column with more positive numbers in it).
|
||||||
|
|
||||||
|
Once we have the column to take into account for the split, the algorithm splits samples with positive distances to hyperplane from the rest.
|
||||||
|
|
||||||
## Tests
|
## Tests
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python -m unittest -v stree.tests
|
python -m unittest -v stree.tests
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
STree is [MIT](https://github.com/doctorado-ml/stree/blob/master/LICENSE) licensed
|
||||||
|
|
||||||
|
## Reference
|
||||||
|
|
||||||
|
R. Montañana, J. A. Gámez, J. M. Puerta, "STree: a single multi-class oblique decision tree based on support vector machines.", 2021 LNAI 12882, pg. 54-64
|
||||||
|
10
codecov.yml
10
codecov.yml
@@ -1,12 +1,12 @@
|
|||||||
overage:
|
coverage:
|
||||||
status:
|
status:
|
||||||
project:
|
project:
|
||||||
default:
|
default:
|
||||||
target: 90%
|
target: 100%
|
||||||
comment:
|
comment:
|
||||||
layout: "reach, diff, flags, files"
|
layout: "reach, diff, flags, files"
|
||||||
behavior: default
|
behavior: default
|
||||||
require_changes: false
|
require_changes: false
|
||||||
require_base: yes
|
require_base: yes
|
||||||
require_head: yes
|
require_head: yes
|
||||||
branches: null
|
branches: null
|
||||||
|
20
docs/Makefile
Normal file
20
docs/Makefile
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Minimal makefile for Sphinx documentation
|
||||||
|
#
|
||||||
|
|
||||||
|
# You can set these variables from the command line, and also
|
||||||
|
# from the environment for the first two.
|
||||||
|
SPHINXOPTS ?=
|
||||||
|
SPHINXBUILD ?= sphinx-build
|
||||||
|
SOURCEDIR = source
|
||||||
|
BUILDDIR = build
|
||||||
|
|
||||||
|
# Put it first so that "make" without argument is like "make help".
|
||||||
|
help:
|
||||||
|
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||||
|
|
||||||
|
.PHONY: help Makefile
|
||||||
|
|
||||||
|
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||||
|
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||||
|
%: Makefile
|
||||||
|
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
4
docs/requirements.txt
Normal file
4
docs/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
sphinx
|
||||||
|
sphinx-rtd-theme
|
||||||
|
myst-parser
|
||||||
|
mufs
|
9
docs/source/api/Siterator.rst
Normal file
9
docs/source/api/Siterator.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Siterator
|
||||||
|
=========
|
||||||
|
|
||||||
|
.. automodule:: Splitter
|
||||||
|
.. autoclass:: Siterator
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
|
:show-inheritance:
|
9
docs/source/api/Snode.rst
Normal file
9
docs/source/api/Snode.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Snode
|
||||||
|
=====
|
||||||
|
|
||||||
|
.. automodule:: Splitter
|
||||||
|
.. autoclass:: Snode
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
|
:show-inheritance:
|
9
docs/source/api/Splitter.rst
Normal file
9
docs/source/api/Splitter.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Splitter
|
||||||
|
========
|
||||||
|
|
||||||
|
.. automodule:: Splitter
|
||||||
|
.. autoclass:: Splitter
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
|
:show-inheritance:
|
9
docs/source/api/Stree.rst
Normal file
9
docs/source/api/Stree.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Stree
|
||||||
|
=====
|
||||||
|
|
||||||
|
.. automodule:: stree
|
||||||
|
.. autoclass:: Stree
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
|
:show-inheritance:
|
11
docs/source/api/index.rst
Normal file
11
docs/source/api/index.rst
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
API index
|
||||||
|
=========
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 2
|
||||||
|
:caption: Contents:
|
||||||
|
|
||||||
|
Stree
|
||||||
|
Siterator
|
||||||
|
Snode
|
||||||
|
Splitter
|
56
docs/source/conf.py
Normal file
56
docs/source/conf.py
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
# Configuration file for the Sphinx documentation builder.
|
||||||
|
#
|
||||||
|
# This file only contains a selection of the most common options. For a full
|
||||||
|
# list see the documentation:
|
||||||
|
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
||||||
|
|
||||||
|
# -- Path setup --------------------------------------------------------------
|
||||||
|
|
||||||
|
# If extensions (or modules to document with autodoc) are in another directory,
|
||||||
|
# add these directories to sys.path here. If the directory is relative to the
|
||||||
|
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||||
|
#
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from stree._version import __version__
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.abspath("../../stree/"))
|
||||||
|
|
||||||
|
# -- Project information -----------------------------------------------------
|
||||||
|
|
||||||
|
project = "STree"
|
||||||
|
copyright = "2020 - 2022, Ricardo Montañana Gómez"
|
||||||
|
author = "Ricardo Montañana Gómez"
|
||||||
|
|
||||||
|
# The full version, including alpha/beta/rc tags
|
||||||
|
version = __version__
|
||||||
|
release = version
|
||||||
|
|
||||||
|
|
||||||
|
# -- General configuration ---------------------------------------------------
|
||||||
|
|
||||||
|
# Add any Sphinx extension module names here, as strings. They can be
|
||||||
|
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||||
|
# ones.
|
||||||
|
extensions = ["myst_parser", "sphinx.ext.autodoc", "sphinx.ext.viewcode"]
|
||||||
|
|
||||||
|
# Add any paths that contain templates here, relative to this directory.
|
||||||
|
templates_path = ["_templates"]
|
||||||
|
|
||||||
|
# List of patterns, relative to source directory, that match files and
|
||||||
|
# directories to ignore when looking for source files.
|
||||||
|
# This pattern also affects html_static_path and html_extra_path.
|
||||||
|
exclude_patterns = []
|
||||||
|
|
||||||
|
|
||||||
|
# -- Options for HTML output -------------------------------------------------
|
||||||
|
|
||||||
|
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||||
|
# a list of builtin themes.
|
||||||
|
#
|
||||||
|
html_theme = "sphinx_rtd_theme"
|
||||||
|
|
||||||
|
# Add any paths that contain custom static files (such as style sheets) here,
|
||||||
|
# relative to this directory. They are copied after the builtin static files,
|
||||||
|
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||||
|
html_static_path = []
|
42
docs/source/example.md
Normal file
42
docs/source/example.md
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
# Examples
|
||||||
|
|
||||||
|
## Notebooks
|
||||||
|
|
||||||
|
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/benchmark.ipynb) Benchmark
|
||||||
|
|
||||||
|
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/features.ipynb) Some features
|
||||||
|
|
||||||
|
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/gridsearch.ipynb) Gridsearch
|
||||||
|
|
||||||
|
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/ensemble.ipynb) Ensembles
|
||||||
|
|
||||||
|
## Sample Code
|
||||||
|
|
||||||
|
```python
|
||||||
|
import time
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
from sklearn.datasets import load_iris
|
||||||
|
from stree import Stree
|
||||||
|
|
||||||
|
random_state = 1
|
||||||
|
X, y = load_iris(return_X_y=True)
|
||||||
|
Xtrain, Xtest, ytrain, ytest = train_test_split(
|
||||||
|
X, y, test_size=0.2, random_state=random_state
|
||||||
|
)
|
||||||
|
now = time.time()
|
||||||
|
print("Predicting with max_features=sqrt(n_features)")
|
||||||
|
clf = Stree(random_state=random_state, max_features="auto")
|
||||||
|
clf.fit(Xtrain, ytrain)
|
||||||
|
print(f"Took {time.time() - now:.2f} seconds to train")
|
||||||
|
print(clf)
|
||||||
|
print(f"Classifier's accuracy (train): {clf.score(Xtrain, ytrain):.4f}")
|
||||||
|
print(f"Classifier's accuracy (test) : {clf.score(Xtest, ytest):.4f}")
|
||||||
|
print("=" * 40)
|
||||||
|
print("Predicting with max_features=n_features")
|
||||||
|
clf = Stree(random_state=random_state)
|
||||||
|
clf.fit(Xtrain, ytrain)
|
||||||
|
print(f"Took {time.time() - now:.2f} seconds to train")
|
||||||
|
print(clf)
|
||||||
|
print(f"Classifier's accuracy (train): {clf.score(Xtrain, ytrain):.4f}")
|
||||||
|
print(f"Classifier's accuracy (test) : {clf.score(Xtest, ytest):.4f}")
|
||||||
|
```
|
BIN
docs/source/example.png
Normal file
BIN
docs/source/example.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 3.1 MiB |
29
docs/source/hyperparameters.md
Normal file
29
docs/source/hyperparameters.md
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
# Hyperparameters
|
||||||
|
|
||||||
|
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
|
||||||
|
| --- | ------------------- | -------------------------------------------------------------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
|
||||||
|
| \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of ‘liblinear’, ‘linear’, ‘poly’ or ‘rbf’.<br>liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library |
|
||||||
|
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
|
||||||
|
| \* | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls |
|
||||||
|
| | max_depth | \<int\> | None | Specifies the maximum depth of the tree |
|
||||||
|
| \* | tol | \<float\> | 1e-4 | Tolerance for stopping criterion. |
|
||||||
|
| \* | degree | \<int\> | 3 | Degree of the polynomial kernel function (‘poly’). Ignored by all other kernels. |
|
||||||
|
| \* | gamma | {"scale", "auto"} or \<float\> | scale | Kernel coefficient for ‘rbf’, ‘poly’ and ‘sigmoid’.<br>if gamma='scale' (default) is passed then it uses 1 / (n_features \* X.var()) as value of gamma,<br>if ‘auto’, uses 1 / n_features. |
|
||||||
|
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\*.<br>max_samples is incompatible with 'ovo' multiclass_strategy |
|
||||||
|
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features).<br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
|
||||||
|
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
|
||||||
|
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
|
||||||
|
| | splitter | {"best", "random", "trandom", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features).<br>Supported strategies are:<br>**“best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features.<br>**“random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them.<br>**“trandom”**: The algorithm generates only one random combination.<br>**"mutual"**: Chooses the best features w.r.t. their mutual info with the label.<br>**"cfs"**: Apply Correlation-based Feature Selection.<br>**"fcbf"**: Apply Fast Correlation-Based Filter.<br>**"iwss"**: IWSS based algorithm |
|
||||||
|
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
|
||||||
|
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets:<br>**"ovo"**: one versus one.<br>**"ovr"**: one versus rest |
|
||||||
|
|
||||||
|
\* Hyperparameter used by the support vector classifier of every node
|
||||||
|
|
||||||
|
\*\* **Splitting in a STree node**
|
||||||
|
|
||||||
|
The decision function is applied to the dataset and distances from samples to hyperplanes are computed in a matrix. This matrix has as many columns as classes the samples belongs to (if more than two, i.e. multiclass classification) or 1 column if it's a binary class dataset. In binary classification only one hyperplane is computed and therefore only one column is needed to store the distances of the samples to it. If three or more classes are present in the dataset we need as many hyperplanes as classes are there, and therefore one column per hyperplane is needed.
|
||||||
|
|
||||||
|
In case of multiclass classification we have to decide which column take into account to make the split, that depends on hyperparameter _split_criteria_, if "impurity" is chosen then STree computes information gain of every split candidate using each column and chooses the one that maximize the information gain, otherwise STree choses the column with more samples with a predicted class (the column with more positive numbers in it).
|
||||||
|
|
||||||
|
Once we have the column to take into account for the split, the algorithm splits samples with positive distances to hyperplane from the rest.
|
15
docs/source/index.rst
Normal file
15
docs/source/index.rst
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
Welcome to STree's documentation!
|
||||||
|
=================================
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:caption: Contents:
|
||||||
|
:titlesonly:
|
||||||
|
|
||||||
|
|
||||||
|
stree
|
||||||
|
install
|
||||||
|
hyperparameters
|
||||||
|
example
|
||||||
|
api/index
|
||||||
|
|
||||||
|
* :ref:`genindex`
|
16
docs/source/install.rst
Normal file
16
docs/source/install.rst
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
Install
|
||||||
|
=======
|
||||||
|
|
||||||
|
The main stable release
|
||||||
|
|
||||||
|
``pip install stree``
|
||||||
|
|
||||||
|
or the last development branch
|
||||||
|
|
||||||
|
``pip install git+https://github.com/doctorado-ml/stree``
|
||||||
|
|
||||||
|
Tests
|
||||||
|
*****
|
||||||
|
|
||||||
|
|
||||||
|
``python -m unittest -v stree.tests``
|
17
docs/source/stree.md
Normal file
17
docs/source/stree.md
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
# STree
|
||||||
|
|
||||||
|

|
||||||
|
[](https://codecov.io/gh/doctorado-ml/stree)
|
||||||
|
[](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
|
||||||
|
[](https://lgtm.com/projects/g/Doctorado-ML/STree/context:python)
|
||||||
|
[](https://badge.fury.io/py/STree)
|
||||||
|

|
||||||
|
[](https://zenodo.org/badge/latestdoi/262658230)
|
||||||
|
|
||||||
|
Oblique Tree classifier based on SVM nodes. The nodes are built and splitted with sklearn SVC models. Stree is a sklearn estimator and can be integrated in pipelines, grid searches, etc.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
STree is [MIT](https://github.com/doctorado-ml/stree/blob/master/LICENSE) licensed
|
29
main.py
29
main.py
@@ -1,29 +0,0 @@
|
|||||||
import time
|
|
||||||
from sklearn.model_selection import train_test_split
|
|
||||||
from sklearn.datasets import load_iris
|
|
||||||
from stree import Stree
|
|
||||||
|
|
||||||
random_state = 1
|
|
||||||
|
|
||||||
X, y = load_iris(return_X_y=True)
|
|
||||||
|
|
||||||
Xtrain, Xtest, ytrain, ytest = train_test_split(
|
|
||||||
X, y, test_size=0.2, random_state=random_state
|
|
||||||
)
|
|
||||||
|
|
||||||
now = time.time()
|
|
||||||
print("Predicting with max_features=sqrt(n_features)")
|
|
||||||
clf = Stree(C=0.01, random_state=random_state, max_features="auto")
|
|
||||||
clf.fit(Xtrain, ytrain)
|
|
||||||
print(f"Took {time.time() - now:.2f} seconds to train")
|
|
||||||
print(clf)
|
|
||||||
print(f"Classifier's accuracy (train): {clf.score(Xtrain, ytrain):.4f}")
|
|
||||||
print(f"Classifier's accuracy (test) : {clf.score(Xtest, ytest):.4f}")
|
|
||||||
print("=" * 40)
|
|
||||||
print("Predicting with max_features=n_features")
|
|
||||||
clf = Stree(C=0.01, random_state=random_state)
|
|
||||||
clf.fit(Xtrain, ytrain)
|
|
||||||
print(f"Took {time.time() - now:.2f} seconds to train")
|
|
||||||
print(clf)
|
|
||||||
print(f"Classifier's accuracy (train): {clf.score(Xtrain, ytrain):.4f}")
|
|
||||||
print(f"Classifier's accuracy (test) : {clf.score(Xtest, ytest):.4f}")
|
|
File diff suppressed because one or more lines are too long
@@ -17,35 +17,43 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 1,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"#\n",
|
"#\n",
|
||||||
"# Google Colab setup\n",
|
"# Google Colab setup\n",
|
||||||
"#\n",
|
"#\n",
|
||||||
"#!pip install git+https://github.com/doctorado-ml/stree"
|
"#!pip install git+https://github.com/doctorado-ml/stree\n",
|
||||||
|
"!pip install pandas"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 2,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import time\n",
|
"import time\n",
|
||||||
|
"import os\n",
|
||||||
|
"import random\n",
|
||||||
|
"import warnings\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"import numpy as np\n",
|
||||||
"from sklearn.ensemble import AdaBoostClassifier, BaggingClassifier\n",
|
"from sklearn.ensemble import AdaBoostClassifier, BaggingClassifier\n",
|
||||||
"from sklearn.model_selection import train_test_split\n",
|
"from sklearn.model_selection import train_test_split\n",
|
||||||
"from stree import Stree"
|
"from sklearn.exceptions import ConvergenceWarning\n",
|
||||||
|
"from stree import Stree\n",
|
||||||
|
"\n",
|
||||||
|
"warnings.filterwarnings(\"ignore\", category=ConvergenceWarning)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 3,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import os\n",
|
|
||||||
"if not os.path.isfile('data/creditcard.csv'):\n",
|
"if not os.path.isfile('data/creditcard.csv'):\n",
|
||||||
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
||||||
" !tar xzf creditcard.tgz"
|
" !tar xzf creditcard.tgz"
|
||||||
@@ -53,24 +61,15 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 4,
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"tags": []
|
"tags": []
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
|
||||||
"text": "Fraud: 0.173% 492\nValid: 99.827% 284315\nX.shape (100492, 28) y.shape (100492,)\nFraud: 0.644% 647\nValid: 99.356% 99845\n"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"random_state=1\n",
|
"random_state=1\n",
|
||||||
"\n",
|
"\n",
|
||||||
"def load_creditcard(n_examples=0):\n",
|
"def load_creditcard(n_examples=0):\n",
|
||||||
" import pandas as pd\n",
|
|
||||||
" import numpy as np\n",
|
|
||||||
" import random\n",
|
|
||||||
" df = pd.read_csv('data/creditcard.csv')\n",
|
" df = pd.read_csv('data/creditcard.csv')\n",
|
||||||
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
||||||
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
||||||
@@ -121,20 +120,14 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 5,
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"tags": []
|
"tags": []
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
|
||||||
"text": "Score Train: 0.9985784146480154\nScore Test: 0.9981093273185617\nTook 73.27 seconds\n"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"now = time.time()\n",
|
"now = time.time()\n",
|
||||||
"clf = Stree(max_depth=3, random_state=random_state)\n",
|
"clf = Stree(max_depth=3, random_state=random_state, max_iter=1e3)\n",
|
||||||
"clf.fit(Xtrain, ytrain)\n",
|
"clf.fit(Xtrain, ytrain)\n",
|
||||||
"print(\"Score Train: \", clf.score(Xtrain, ytrain))\n",
|
"print(\"Score Train: \", clf.score(Xtrain, ytrain))\n",
|
||||||
"print(\"Score Test: \", clf.score(Xtest, ytest))\n",
|
"print(\"Score Test: \", clf.score(Xtest, ytest))\n",
|
||||||
@@ -150,7 +143,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 6,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -161,21 +154,15 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 7,
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"tags": []
|
"tags": []
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
|
||||||
"text": "Kernel: linear\tTime: 93.78 seconds\tScore Train: 0.9983083\tScore Test: 0.9983083\nKernel: rbf\tTime: 18.32 seconds\tScore Train: 0.9935602\tScore Test: 0.9935651\nKernel: poly\tTime: 69.68 seconds\tScore Train: 0.9973132\tScore Test: 0.9972801\n"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"for kernel in ['linear', 'rbf', 'poly']:\n",
|
"for kernel in ['linear', 'rbf', 'poly']:\n",
|
||||||
" now = time.time()\n",
|
" now = time.time()\n",
|
||||||
" clf = AdaBoostClassifier(base_estimator=Stree(C=C, kernel=kernel, max_depth=max_depth, random_state=random_state), algorithm=\"SAMME\", n_estimators=n_estimators, random_state=random_state)\n",
|
" clf = AdaBoostClassifier(base_estimator=Stree(C=C, kernel=kernel, max_depth=max_depth, random_state=random_state, max_iter=1e3), algorithm=\"SAMME\", n_estimators=n_estimators, random_state=random_state)\n",
|
||||||
" clf.fit(Xtrain, ytrain)\n",
|
" clf.fit(Xtrain, ytrain)\n",
|
||||||
" score_train = clf.score(Xtrain, ytrain)\n",
|
" score_train = clf.score(Xtrain, ytrain)\n",
|
||||||
" score_test = clf.score(Xtest, ytest)\n",
|
" score_test = clf.score(Xtest, ytest)\n",
|
||||||
@@ -191,7 +178,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 8,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -202,21 +189,15 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 9,
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"tags": []
|
"tags": []
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
|
||||||
"text": "Kernel: linear\tTime: 387.06 seconds\tScore Train: 0.9985784\tScore Test: 0.9981093\nKernel: rbf\tTime: 144.00 seconds\tScore Train: 0.9992750\tScore Test: 0.9983415\nKernel: poly\tTime: 101.78 seconds\tScore Train: 0.9992466\tScore Test: 0.9981757\n"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"for kernel in ['linear', 'rbf', 'poly']:\n",
|
"for kernel in ['linear', 'rbf', 'poly']:\n",
|
||||||
" now = time.time()\n",
|
" now = time.time()\n",
|
||||||
" clf = BaggingClassifier(base_estimator=Stree(C=C, kernel=kernel, max_depth=max_depth, random_state=random_state), n_estimators=n_estimators, random_state=random_state)\n",
|
" clf = BaggingClassifier(base_estimator=Stree(C=C, kernel=kernel, max_depth=max_depth, random_state=random_state, max_iter=1e3), n_estimators=n_estimators, random_state=random_state)\n",
|
||||||
" clf.fit(Xtrain, ytrain)\n",
|
" clf.fit(Xtrain, ytrain)\n",
|
||||||
" score_train = clf.score(Xtrain, ytrain)\n",
|
" score_train = clf.score(Xtrain, ytrain)\n",
|
||||||
" score_test = clf.score(Xtest, ytest)\n",
|
" score_test = clf.score(Xtest, ytest)\n",
|
||||||
@@ -225,6 +206,11 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
"name": "ipython",
|
"name": "ipython",
|
||||||
@@ -235,14 +221,9 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.7.6-final"
|
"version": "3.8.2-final"
|
||||||
},
|
|
||||||
"orig_nbformat": 2,
|
|
||||||
"kernelspec": {
|
|
||||||
"name": "python37664bitgeneralvenve3128601eb614c5da59c5055670b6040",
|
|
||||||
"display_name": "Python 3.7.6 64-bit ('general': venv)"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 2
|
"nbformat_minor": 4
|
||||||
}
|
}
|
File diff suppressed because one or more lines are too long
@@ -18,64 +18,67 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 1,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"#\n",
|
"#\n",
|
||||||
"# Google Colab setup\n",
|
"# Google Colab setup\n",
|
||||||
"#\n",
|
"#\n",
|
||||||
"#!pip install git+https://github.com/doctorado-ml/stree"
|
"#!pip install git+https://github.com/doctorado-ml/stree\n",
|
||||||
|
"!pip install pandas"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "zIHKVxthDZEa",
|
"colab": {},
|
||||||
"colab_type": "code",
|
"colab_type": "code",
|
||||||
"colab": {}
|
"id": "zIHKVxthDZEa"
|
||||||
},
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
|
"import random\n",
|
||||||
|
"import os\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"import numpy as np\n",
|
||||||
"from sklearn.ensemble import AdaBoostClassifier\n",
|
"from sklearn.ensemble import AdaBoostClassifier\n",
|
||||||
"from sklearn.svm import LinearSVC\n",
|
"from sklearn.svm import LinearSVC\n",
|
||||||
"from sklearn.model_selection import GridSearchCV, train_test_split\n",
|
"from sklearn.model_selection import GridSearchCV, train_test_split\n",
|
||||||
"from stree import Stree"
|
"from stree import Stree"
|
||||||
],
|
]
|
||||||
"execution_count": 2,
|
|
||||||
"outputs": []
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "IEmq50QgDZEi",
|
"colab": {},
|
||||||
"colab_type": "code",
|
"colab_type": "code",
|
||||||
"colab": {}
|
"id": "IEmq50QgDZEi"
|
||||||
},
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import os\n",
|
|
||||||
"if not os.path.isfile('data/creditcard.csv'):\n",
|
"if not os.path.isfile('data/creditcard.csv'):\n",
|
||||||
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
||||||
" !tar xzf creditcard.tgz"
|
" !tar xzf creditcard.tgz"
|
||||||
],
|
]
|
||||||
"execution_count": 3,
|
|
||||||
"outputs": []
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "z9Q-YUfBDZEq",
|
|
||||||
"colab_type": "code",
|
|
||||||
"colab": {},
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "z9Q-YUfBDZEq",
|
||||||
"outputId": "afc822fb-f16a-4302-8a67-2b9e2880159b",
|
"outputId": "afc822fb-f16a-4302-8a67-2b9e2880159b",
|
||||||
"tags": []
|
"tags": []
|
||||||
},
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"random_state=1\n",
|
"random_state=1\n",
|
||||||
"\n",
|
"\n",
|
||||||
"def load_creditcard(n_examples=0):\n",
|
"def load_creditcard(n_examples=0):\n",
|
||||||
" import pandas as pd\n",
|
|
||||||
" import numpy as np\n",
|
|
||||||
" import random\n",
|
|
||||||
" df = pd.read_csv('data/creditcard.csv')\n",
|
" df = pd.read_csv('data/creditcard.csv')\n",
|
||||||
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
||||||
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
||||||
@@ -107,14 +110,6 @@
|
|||||||
"Xtest = data[1]\n",
|
"Xtest = data[1]\n",
|
||||||
"ytrain = data[2]\n",
|
"ytrain = data[2]\n",
|
||||||
"ytest = data[3]"
|
"ytest = data[3]"
|
||||||
],
|
|
||||||
"execution_count": 4,
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
|
||||||
"text": "Fraud: 0.173% 492\nValid: 99.827% 284315\nX.shape (1492, 28) y.shape (1492,)\nFraud: 32.976% 492\nValid: 67.024% 1000\n"
|
|
||||||
}
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -126,100 +121,120 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "HmX3kR4PDZEw",
|
"colab": {},
|
||||||
"colab_type": "code",
|
"colab_type": "code",
|
||||||
"colab": {}
|
"id": "HmX3kR4PDZEw"
|
||||||
},
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"parameters = {\n",
|
"parameters = [{\n",
|
||||||
" 'base_estimator': [Stree()],\n",
|
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
||||||
" 'n_estimators': [10, 25],\n",
|
" 'n_estimators': [10, 25],\n",
|
||||||
" 'learning_rate': [.5, 1],\n",
|
" 'learning_rate': [.5, 1],\n",
|
||||||
" 'base_estimator__tol': [.1, 1e-02],\n",
|
" 'estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||||
" 'base_estimator__max_depth': [3, 5],\n",
|
" 'estimator__tol': [.1, 1e-02],\n",
|
||||||
" 'base_estimator__C': [7, 55],\n",
|
" 'estimator__max_depth': [3, 5, 7],\n",
|
||||||
" 'base_estimator__kernel': ['linear', 'poly', 'rbf']\n",
|
" 'estimator__C': [1, 7, 55],\n",
|
||||||
"}"
|
" 'estimator__kernel': ['linear']\n",
|
||||||
],
|
"},\n",
|
||||||
"execution_count": 5,
|
"{\n",
|
||||||
"outputs": []
|
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
||||||
|
" 'n_estimators': [10, 25],\n",
|
||||||
|
" 'learning_rate': [.5, 1],\n",
|
||||||
|
" 'estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||||
|
" 'estimator__tol': [.1, 1e-02],\n",
|
||||||
|
" 'estimator__max_depth': [3, 5, 7],\n",
|
||||||
|
" 'estimator__C': [1, 7, 55],\n",
|
||||||
|
" 'estimator__degree': [3, 5, 7],\n",
|
||||||
|
" 'estimator__kernel': ['poly']\n",
|
||||||
|
"},\n",
|
||||||
|
"{\n",
|
||||||
|
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
||||||
|
" 'n_estimators': [10, 25],\n",
|
||||||
|
" 'learning_rate': [.5, 1],\n",
|
||||||
|
" 'estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||||
|
" 'estimator__tol': [.1, 1e-02],\n",
|
||||||
|
" 'estimator__max_depth': [3, 5, 7],\n",
|
||||||
|
" 'estimator__C': [1, 7, 55],\n",
|
||||||
|
" 'estimator__gamma': [.1, 1, 10],\n",
|
||||||
|
" 'estimator__kernel': ['rbf']\n",
|
||||||
|
"}]"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 6,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"output_type": "execute_result",
|
|
||||||
"data": {
|
|
||||||
"text/plain": "{'C': 1.0,\n 'criterion': 'gini',\n 'degree': 3,\n 'gamma': 'scale',\n 'kernel': 'linear',\n 'max_depth': None,\n 'max_features': None,\n 'max_iter': 1000,\n 'min_samples_split': 0,\n 'random_state': None,\n 'split_criteria': 'max_samples',\n 'splitter': 'random',\n 'tol': 0.0001}"
|
|
||||||
},
|
|
||||||
"metadata": {},
|
|
||||||
"execution_count": 6
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"Stree().get_params()"
|
"Stree().get_params()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "CrcB8o6EDZE5",
|
|
||||||
"colab_type": "code",
|
|
||||||
"colab": {},
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "CrcB8o6EDZE5",
|
||||||
"outputId": "7703413a-d563-4289-a13b-532f38f82762",
|
"outputId": "7703413a-d563-4289-a13b-532f38f82762",
|
||||||
"tags": []
|
"tags": []
|
||||||
},
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"random_state=2020\n",
|
|
||||||
"clf = AdaBoostClassifier(random_state=random_state, algorithm=\"SAMME\")\n",
|
"clf = AdaBoostClassifier(random_state=random_state, algorithm=\"SAMME\")\n",
|
||||||
"grid = GridSearchCV(clf, parameters, verbose=10, n_jobs=-1, return_train_score=True)\n",
|
"grid = GridSearchCV(clf, parameters, verbose=5, n_jobs=-1, return_train_score=True)\n",
|
||||||
"grid.fit(Xtrain, ytrain)"
|
"grid.fit(Xtrain, ytrain)"
|
||||||
],
|
|
||||||
"execution_count": 7,
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
|
||||||
"text": "Fitting 5 folds for each of 96 candidates, totalling 480 fits\n[Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers.\n[Parallel(n_jobs=-1)]: Done 2 tasks | elapsed: 2.0s\n[Parallel(n_jobs=-1)]: Done 9 tasks | elapsed: 2.4s\n[Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 2.7s\n[Parallel(n_jobs=-1)]: Done 25 tasks | elapsed: 3.3s\n[Parallel(n_jobs=-1)]: Done 34 tasks | elapsed: 4.3s\n[Parallel(n_jobs=-1)]: Done 45 tasks | elapsed: 5.3s\n[Parallel(n_jobs=-1)]: Done 56 tasks | elapsed: 6.6s\n[Parallel(n_jobs=-1)]: Done 69 tasks | elapsed: 8.1s\n[Parallel(n_jobs=-1)]: Done 82 tasks | elapsed: 9.4s\n[Parallel(n_jobs=-1)]: Done 97 tasks | elapsed: 10.1s\n[Parallel(n_jobs=-1)]: Done 112 tasks | elapsed: 11.1s\n[Parallel(n_jobs=-1)]: Done 129 tasks | elapsed: 12.3s\n[Parallel(n_jobs=-1)]: Done 146 tasks | elapsed: 13.6s\n[Parallel(n_jobs=-1)]: Done 165 tasks | elapsed: 14.9s\n[Parallel(n_jobs=-1)]: Done 184 tasks | elapsed: 16.2s\n[Parallel(n_jobs=-1)]: Done 205 tasks | elapsed: 17.6s\n[Parallel(n_jobs=-1)]: Done 226 tasks | elapsed: 19.1s\n[Parallel(n_jobs=-1)]: Done 249 tasks | elapsed: 21.6s\n[Parallel(n_jobs=-1)]: Done 272 tasks | elapsed: 25.9s\n[Parallel(n_jobs=-1)]: Done 297 tasks | elapsed: 30.4s\n[Parallel(n_jobs=-1)]: Done 322 tasks | elapsed: 36.7s\n[Parallel(n_jobs=-1)]: Done 349 tasks | elapsed: 38.1s\n[Parallel(n_jobs=-1)]: Done 376 tasks | elapsed: 39.6s\n[Parallel(n_jobs=-1)]: Done 405 tasks | elapsed: 41.9s\n[Parallel(n_jobs=-1)]: Done 434 tasks | elapsed: 44.9s\n[Parallel(n_jobs=-1)]: Done 465 tasks | elapsed: 48.2s\n[Parallel(n_jobs=-1)]: Done 480 out of 480 | elapsed: 49.2s finished\n"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"output_type": "execute_result",
|
|
||||||
"data": {
|
|
||||||
"text/plain": "GridSearchCV(estimator=AdaBoostClassifier(algorithm='SAMME', random_state=2020),\n n_jobs=-1,\n param_grid={'base_estimator': [Stree(C=55, max_depth=3, tol=0.01)],\n 'base_estimator__C': [7, 55],\n 'base_estimator__kernel': ['linear', 'poly', 'rbf'],\n 'base_estimator__max_depth': [3, 5],\n 'base_estimator__tol': [0.1, 0.01],\n 'learning_rate': [0.5, 1], 'n_estimators': [10, 25]},\n return_train_score=True, verbose=10)"
|
|
||||||
},
|
|
||||||
"metadata": {},
|
|
||||||
"execution_count": 7
|
|
||||||
}
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "ZjX88NoYDZE8",
|
|
||||||
"colab_type": "code",
|
|
||||||
"colab": {},
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "ZjX88NoYDZE8",
|
||||||
"outputId": "285163c8-fa33-4915-8ae7-61c4f7844344",
|
"outputId": "285163c8-fa33-4915-8ae7-61c4f7844344",
|
||||||
"tags": []
|
"tags": []
|
||||||
},
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"Best estimator: \", grid.best_estimator_)\n",
|
"print(\"Best estimator: \", grid.best_estimator_)\n",
|
||||||
"print(\"Best hyperparameters: \", grid.best_params_)\n",
|
"print(\"Best hyperparameters: \", grid.best_params_)\n",
|
||||||
"print(\"Best accuracy: \", grid.best_score_)"
|
"print(\"Best accuracy: \", grid.best_score_)"
|
||||||
],
|
]
|
||||||
"execution_count": 8,
|
},
|
||||||
"outputs": [
|
{
|
||||||
{
|
"cell_type": "markdown",
|
||||||
"output_type": "stream",
|
"metadata": {},
|
||||||
"name": "stdout",
|
"source": [
|
||||||
"text": "Best estimator: AdaBoostClassifier(algorithm='SAMME',\n base_estimator=Stree(C=55, max_depth=3, tol=0.01),\n learning_rate=0.5, n_estimators=25, random_state=2020)\nBest hyperparameters: {'base_estimator': Stree(C=55, max_depth=3, tol=0.01), 'base_estimator__C': 55, 'base_estimator__kernel': 'linear', 'base_estimator__max_depth': 3, 'base_estimator__tol': 0.01, 'learning_rate': 0.5, 'n_estimators': 25}\nBest accuracy: 0.9559440559440558\n"
|
"Best estimator: AdaBoostClassifier(algorithm='SAMME',\n",
|
||||||
}
|
" base_estimator=Stree(C=55, max_depth=7, random_state=1,\n",
|
||||||
|
" split_criteria='max_samples', tol=0.1),\n",
|
||||||
|
" learning_rate=0.5, n_estimators=25, random_state=1)\n",
|
||||||
|
"Best hyperparameters: {'base_estimator': Stree(C=55, max_depth=7, random_state=1, split_criteria='max_samples', tol=0.1), 'estimator__C': 55, 'estimator__kernel': 'linear', 'estimator__max_depth': 7, 'estimator__split_criteria': 'max_samples', 'estimator__tol': 0.1, 'learning_rate': 0.5, 'n_estimators': 25}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Best accuracy: 0.9511777695988222"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
"colab": {
|
||||||
|
"name": "gridsearch.ipynb",
|
||||||
|
"provenance": []
|
||||||
|
},
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
"name": "ipython",
|
"name": "ipython",
|
||||||
@@ -230,18 +245,9 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.7.6-final"
|
"version": "3.8.2-final"
|
||||||
},
|
|
||||||
"orig_nbformat": 2,
|
|
||||||
"kernelspec": {
|
|
||||||
"name": "python37664bitgeneralvenvfbd0a23e74cf4e778460f5ffc6761f39",
|
|
||||||
"display_name": "Python 3.7.6 64-bit ('general': venv)"
|
|
||||||
},
|
|
||||||
"colab": {
|
|
||||||
"name": "gridsearch.ipynb",
|
|
||||||
"provenance": []
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 0
|
"nbformat_minor": 4
|
||||||
}
|
}
|
||||||
|
@@ -1,4 +1,2 @@
|
|||||||
numpy
|
scikit-learn>0.24
|
||||||
scikit-learn
|
mufs
|
||||||
pandas
|
|
||||||
ipympl
|
|
1
runtime.txt
Normal file
1
runtime.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
python-3.8
|
44
setup.py
44
setup.py
@@ -1,7 +1,5 @@
|
|||||||
import setuptools
|
import setuptools
|
||||||
|
import os
|
||||||
__version__ = "0.9rc5"
|
|
||||||
__author__ = "Ricardo Montañana Gómez"
|
|
||||||
|
|
||||||
|
|
||||||
def readme():
|
def readme():
|
||||||
@@ -9,28 +7,50 @@ def readme():
|
|||||||
return f.read()
|
return f.read()
|
||||||
|
|
||||||
|
|
||||||
|
def get_data(field, file_name="__init__.py"):
|
||||||
|
item = ""
|
||||||
|
with open(os.path.join("stree", file_name)) as f:
|
||||||
|
for line in f.readlines():
|
||||||
|
if line.startswith(f"__{field}__"):
|
||||||
|
delim = '"' if '"' in line else "'"
|
||||||
|
item = line.split(delim)[1]
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise RuntimeError(f"Unable to find {field} string.")
|
||||||
|
return item
|
||||||
|
|
||||||
|
|
||||||
|
def get_requirements():
|
||||||
|
with open("requirements.txt") as f:
|
||||||
|
return f.read().splitlines()
|
||||||
|
|
||||||
|
|
||||||
setuptools.setup(
|
setuptools.setup(
|
||||||
name="STree",
|
name="STree",
|
||||||
version=__version__,
|
version=get_data("version", "_version.py"),
|
||||||
license="MIT License",
|
license=get_data("license"),
|
||||||
description="Oblique decision tree with svm nodes",
|
description="Oblique decision tree with svm nodes",
|
||||||
long_description=readme(),
|
long_description=readme(),
|
||||||
long_description_content_type="text/markdown",
|
long_description_content_type="text/markdown",
|
||||||
packages=setuptools.find_packages(),
|
packages=setuptools.find_packages(),
|
||||||
url="https://github.com/doctorado-ml/stree",
|
url="https://github.com/Doctorado-ML/STree#stree",
|
||||||
author=__author__,
|
project_urls={
|
||||||
author_email="ricardo.montanana@alu.uclm.es",
|
"Code": "https://github.com/Doctorado-ML/STree",
|
||||||
|
"Documentation": "https://stree.readthedocs.io/en/latest/index.html",
|
||||||
|
},
|
||||||
|
author=get_data("author"),
|
||||||
|
author_email=get_data("author_email"),
|
||||||
keywords="scikit-learn oblique-classifier oblique-decision-tree decision-\
|
keywords="scikit-learn oblique-classifier oblique-decision-tree decision-\
|
||||||
tree svm svc",
|
tree svm svc",
|
||||||
classifiers=[
|
classifiers=[
|
||||||
"Development Status :: 4 - Beta",
|
"Development Status :: 5 - Production/Stable",
|
||||||
"License :: OSI Approved :: MIT License",
|
"License :: OSI Approved :: " + get_data("license"),
|
||||||
"Programming Language :: Python :: 3.7",
|
"Programming Language :: Python :: 3.8",
|
||||||
"Natural Language :: English",
|
"Natural Language :: English",
|
||||||
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
||||||
"Intended Audience :: Science/Research",
|
"Intended Audience :: Science/Research",
|
||||||
],
|
],
|
||||||
install_requires=["scikit-learn>=0.23.0", "numpy", "ipympl"],
|
install_requires=get_requirements(),
|
||||||
test_suite="stree.tests",
|
test_suite="stree.tests",
|
||||||
zip_safe=False,
|
zip_safe=False,
|
||||||
)
|
)
|
||||||
|
10
stree/.readthedocs.yaml
Normal file
10
stree/.readthedocs.yaml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
version: 2
|
||||||
|
|
||||||
|
sphinx:
|
||||||
|
configuration: docs/source/conf.py
|
||||||
|
|
||||||
|
python:
|
||||||
|
version: 3.8
|
||||||
|
install:
|
||||||
|
- requirements: requirements.txt
|
||||||
|
- requirements: docs/requirements.txt
|
808
stree/Splitter.py
Normal file
808
stree/Splitter.py
Normal file
@@ -0,0 +1,808 @@
|
|||||||
|
"""
|
||||||
|
Oblique decision tree classifier based on SVM nodes
|
||||||
|
Splitter class
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import warnings
|
||||||
|
import random
|
||||||
|
from math import log, factorial
|
||||||
|
import numpy as np
|
||||||
|
from sklearn.feature_selection import SelectKBest, mutual_info_classif
|
||||||
|
from sklearn.preprocessing import StandardScaler
|
||||||
|
from sklearn.svm import SVC
|
||||||
|
from sklearn.exceptions import ConvergenceWarning
|
||||||
|
from mufs import MUFS
|
||||||
|
|
||||||
|
|
||||||
|
class Snode:
|
||||||
|
"""
|
||||||
|
Nodes of the tree that keeps the svm classifier and if testing the
|
||||||
|
dataset assigned to it
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
clf : SVC
|
||||||
|
Classifier used
|
||||||
|
X : np.ndarray
|
||||||
|
input dataset in train time (only in testing)
|
||||||
|
y : np.ndarray
|
||||||
|
input labes in train time
|
||||||
|
features : np.array
|
||||||
|
features used to compute hyperplane
|
||||||
|
impurity : float
|
||||||
|
impurity of the node
|
||||||
|
title : str
|
||||||
|
label describing the route to the node
|
||||||
|
weight : np.ndarray, optional
|
||||||
|
weights applied to input dataset in train time, by default None
|
||||||
|
scaler : StandardScaler, optional
|
||||||
|
scaler used if any, by default None
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
clf: SVC,
|
||||||
|
X: np.ndarray,
|
||||||
|
y: np.ndarray,
|
||||||
|
features: np.array,
|
||||||
|
impurity: float,
|
||||||
|
title: str,
|
||||||
|
weight: np.ndarray = None,
|
||||||
|
scaler: StandardScaler = None,
|
||||||
|
):
|
||||||
|
self._clf = clf
|
||||||
|
self._title = title
|
||||||
|
self._belief = 0.0
|
||||||
|
# Only store dataset in Testing
|
||||||
|
self._X = X if os.environ.get("TESTING", "NS") != "NS" else None
|
||||||
|
self._y = y
|
||||||
|
self._down = None
|
||||||
|
self._up = None
|
||||||
|
self._class = None
|
||||||
|
self._feature = None
|
||||||
|
self._sample_weight = (
|
||||||
|
weight if os.environ.get("TESTING", "NS") != "NS" else None
|
||||||
|
)
|
||||||
|
self._features = features
|
||||||
|
self._impurity = impurity
|
||||||
|
self._partition_column: int = -1
|
||||||
|
self._scaler = scaler
|
||||||
|
self._proba = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def copy(cls, node: "Snode") -> "Snode":
|
||||||
|
return cls(
|
||||||
|
node._clf,
|
||||||
|
node._X,
|
||||||
|
node._y,
|
||||||
|
node._features,
|
||||||
|
node._impurity,
|
||||||
|
node._title,
|
||||||
|
node._sample_weight,
|
||||||
|
node._scaler,
|
||||||
|
)
|
||||||
|
|
||||||
|
def set_partition_column(self, col: int):
|
||||||
|
self._partition_column = col
|
||||||
|
|
||||||
|
def get_partition_column(self) -> int:
|
||||||
|
return self._partition_column
|
||||||
|
|
||||||
|
def set_down(self, son):
|
||||||
|
self._down = son
|
||||||
|
|
||||||
|
def set_title(self, title):
|
||||||
|
self._title = title
|
||||||
|
|
||||||
|
def set_classifier(self, clf):
|
||||||
|
self._clf = clf
|
||||||
|
|
||||||
|
def set_features(self, features):
|
||||||
|
self._features = features
|
||||||
|
|
||||||
|
def set_impurity(self, impurity):
|
||||||
|
self._impurity = impurity
|
||||||
|
|
||||||
|
def get_title(self) -> str:
|
||||||
|
return self._title
|
||||||
|
|
||||||
|
def get_classifier(self) -> SVC:
|
||||||
|
return self._clf
|
||||||
|
|
||||||
|
def get_impurity(self) -> float:
|
||||||
|
return self._impurity
|
||||||
|
|
||||||
|
def get_features(self) -> np.array:
|
||||||
|
return self._features
|
||||||
|
|
||||||
|
def set_up(self, son):
|
||||||
|
self._up = son
|
||||||
|
|
||||||
|
def is_leaf(self) -> bool:
|
||||||
|
return self._up is None and self._down is None
|
||||||
|
|
||||||
|
def get_down(self) -> "Snode":
|
||||||
|
return self._down
|
||||||
|
|
||||||
|
def get_up(self) -> "Snode":
|
||||||
|
return self._up
|
||||||
|
|
||||||
|
def make_predictor(self, num_classes: int) -> None:
|
||||||
|
"""Compute the class of the predictor and its belief based on the
|
||||||
|
subdataset of the node only if it is a leaf
|
||||||
|
"""
|
||||||
|
if not self.is_leaf():
|
||||||
|
return
|
||||||
|
classes, card = np.unique(self._y, return_counts=True)
|
||||||
|
self._proba = np.zeros((num_classes,), dtype=np.int64)
|
||||||
|
for c, n in zip(classes, card):
|
||||||
|
self._proba[c] = n
|
||||||
|
try:
|
||||||
|
max_card = max(card)
|
||||||
|
self._class = classes[card == max_card][0]
|
||||||
|
self._belief = max_card / np.sum(card)
|
||||||
|
except ValueError:
|
||||||
|
self._class = None
|
||||||
|
|
||||||
|
def graph(self):
|
||||||
|
"""
|
||||||
|
Return a string representing the node in graphviz format
|
||||||
|
"""
|
||||||
|
output = ""
|
||||||
|
count_values = np.unique(self._y, return_counts=True)
|
||||||
|
if self.is_leaf():
|
||||||
|
output += (
|
||||||
|
f'N{id(self)} [shape=box style=filled label="'
|
||||||
|
f"class={self._class} impurity={self._impurity:.3f} "
|
||||||
|
f'counts={self._proba}"];\n'
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
output += (
|
||||||
|
f'N{id(self)} [label="#features={len(self._features)} '
|
||||||
|
f"classes={count_values[0]} samples={count_values[1]} "
|
||||||
|
f'({sum(count_values[1])})" fontcolor=black];\n'
|
||||||
|
)
|
||||||
|
output += f"N{id(self)} -> N{id(self.get_up())} [color=black];\n"
|
||||||
|
output += f"N{id(self)} -> N{id(self.get_down())} [color=black];\n"
|
||||||
|
return output
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
count_values = np.unique(self._y, return_counts=True)
|
||||||
|
if self.is_leaf():
|
||||||
|
return (
|
||||||
|
f"{self._title} - Leaf class={self._class} belief="
|
||||||
|
f"{self._belief: .6f} impurity={self._impurity:.4f} "
|
||||||
|
f"counts={count_values}"
|
||||||
|
)
|
||||||
|
return (
|
||||||
|
f"{self._title} feaures={self._features} impurity="
|
||||||
|
f"{self._impurity:.4f} "
|
||||||
|
f"counts={count_values}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class Siterator:
|
||||||
|
"""Stree preorder iterator"""
|
||||||
|
|
||||||
|
def __init__(self, tree: Snode):
|
||||||
|
self._stack = []
|
||||||
|
self._push(tree)
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
# To complete the iterator interface
|
||||||
|
return self
|
||||||
|
|
||||||
|
def _push(self, node: Snode):
|
||||||
|
if node is not None:
|
||||||
|
self._stack.append(node)
|
||||||
|
|
||||||
|
def __next__(self) -> Snode:
|
||||||
|
if len(self._stack) == 0:
|
||||||
|
raise StopIteration()
|
||||||
|
node = self._stack.pop()
|
||||||
|
self._push(node.get_up())
|
||||||
|
self._push(node.get_down())
|
||||||
|
return node
|
||||||
|
|
||||||
|
|
||||||
|
class Splitter:
|
||||||
|
"""
|
||||||
|
Splits a dataset in two based on different criteria
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
clf : SVC, optional
|
||||||
|
classifier, by default None
|
||||||
|
criterion : str, optional
|
||||||
|
The function to measure the quality of a split (only used if
|
||||||
|
max_features != num_features). Supported criteria are “gini” for the
|
||||||
|
Gini impurity and “entropy” for the information gain., by default
|
||||||
|
"entropy", by default None
|
||||||
|
feature_select : str, optional
|
||||||
|
The strategy used to choose the feature set at each node (only used if
|
||||||
|
max_features < num_features). Supported strategies are: “best”: sklearn
|
||||||
|
SelectKBest algorithm is used in every node to choose the max_features
|
||||||
|
best features. “random”: The algorithm generates 5 candidates and
|
||||||
|
choose the best (max. info. gain) of them. “trandom”: The algorithm
|
||||||
|
generates only one random combination. "mutual": Chooses the best
|
||||||
|
features w.r.t. their mutual info with the label. "cfs": Apply
|
||||||
|
Correlation-based Feature Selection. "fcbf": Apply Fast Correlation-
|
||||||
|
Based, by default None
|
||||||
|
criteria : str, optional
|
||||||
|
ecides (just in case of a multi class classification) which column
|
||||||
|
(class) use to split the dataset in a node. max_samples is
|
||||||
|
incompatible with 'ovo' multiclass_strategy, by default None
|
||||||
|
min_samples_split : int, optional
|
||||||
|
The minimum number of samples required to split an internal node. 0
|
||||||
|
(default) for any, by default None
|
||||||
|
random_state : optional
|
||||||
|
Controls the pseudo random number generation for shuffling the data for
|
||||||
|
probability estimates. Ignored when probability is False.Pass an int
|
||||||
|
for reproducible output across multiple function calls, by
|
||||||
|
default None
|
||||||
|
normalize : bool, optional
|
||||||
|
If standardization of features should be applied on each node with the
|
||||||
|
samples that reach it , by default False
|
||||||
|
|
||||||
|
Raises
|
||||||
|
------
|
||||||
|
ValueError
|
||||||
|
clf has to be a sklearn estimator
|
||||||
|
ValueError
|
||||||
|
criterion must be gini or entropy
|
||||||
|
ValueError
|
||||||
|
criteria has to be max_samples or impurity
|
||||||
|
ValueError
|
||||||
|
splitter must be in {random, best, mutual, cfs, fcbf}
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
clf: SVC = None,
|
||||||
|
criterion: str = None,
|
||||||
|
feature_select: str = None,
|
||||||
|
criteria: str = None,
|
||||||
|
min_samples_split: int = None,
|
||||||
|
random_state=None,
|
||||||
|
normalize=False,
|
||||||
|
):
|
||||||
|
self._clf = clf
|
||||||
|
self._random_state = random_state
|
||||||
|
if random_state is not None:
|
||||||
|
random.seed(random_state)
|
||||||
|
self._criterion = criterion
|
||||||
|
self._min_samples_split = min_samples_split
|
||||||
|
self._criteria = criteria
|
||||||
|
self._feature_select = feature_select
|
||||||
|
self._normalize = normalize
|
||||||
|
|
||||||
|
if clf is None:
|
||||||
|
raise ValueError(f"clf has to be a sklearn estimator, got({clf})")
|
||||||
|
|
||||||
|
if criterion not in ["gini", "entropy"]:
|
||||||
|
raise ValueError(
|
||||||
|
f"criterion must be gini or entropy got({criterion})"
|
||||||
|
)
|
||||||
|
|
||||||
|
if criteria not in [
|
||||||
|
"max_samples",
|
||||||
|
"impurity",
|
||||||
|
]:
|
||||||
|
raise ValueError(
|
||||||
|
f"criteria has to be max_samples or impurity; got ({criteria})"
|
||||||
|
)
|
||||||
|
|
||||||
|
if feature_select not in [
|
||||||
|
"random",
|
||||||
|
"trandom",
|
||||||
|
"best",
|
||||||
|
"mutual",
|
||||||
|
"cfs",
|
||||||
|
"fcbf",
|
||||||
|
"iwss",
|
||||||
|
]:
|
||||||
|
raise ValueError(
|
||||||
|
"splitter must be in {random, trandom, best, mutual, cfs, "
|
||||||
|
"fcbf, iwss} "
|
||||||
|
f"got ({feature_select})"
|
||||||
|
)
|
||||||
|
self.criterion_function = getattr(self, f"_{self._criterion}")
|
||||||
|
self.decision_criteria = getattr(self, f"_{self._criteria}")
|
||||||
|
self.fs_function = getattr(self, f"_fs_{self._feature_select}")
|
||||||
|
|
||||||
|
def _fs_random(
|
||||||
|
self, dataset: np.array, labels: np.array, max_features: int
|
||||||
|
) -> tuple:
|
||||||
|
"""Return the best of five random feature set combinations
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
dataset : np.array
|
||||||
|
array of samples
|
||||||
|
labels : np.array
|
||||||
|
labels of the dataset
|
||||||
|
max_features : int
|
||||||
|
number of features of the subspace
|
||||||
|
(< number of features in dataset)
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
tuple
|
||||||
|
indices of the features selected
|
||||||
|
"""
|
||||||
|
# Random feature reduction
|
||||||
|
n_features = dataset.shape[1]
|
||||||
|
features_sets = self._generate_spaces(n_features, max_features)
|
||||||
|
return self._select_best_set(dataset, labels, features_sets)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _fs_trandom(
|
||||||
|
dataset: np.array, labels: np.array, max_features: int
|
||||||
|
) -> tuple:
|
||||||
|
"""Return the a random feature set combination
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
dataset : np.array
|
||||||
|
array of samples
|
||||||
|
labels : np.array
|
||||||
|
labels of the dataset
|
||||||
|
max_features : int
|
||||||
|
number of features of the subspace
|
||||||
|
(< number of features in dataset)
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
tuple
|
||||||
|
indices of the features selected
|
||||||
|
"""
|
||||||
|
# Random feature reduction
|
||||||
|
n_features = dataset.shape[1]
|
||||||
|
return tuple(sorted(random.sample(range(n_features), max_features)))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _fs_best(
|
||||||
|
dataset: np.array, labels: np.array, max_features: int
|
||||||
|
) -> tuple:
|
||||||
|
"""Return the variabes with higher f-score
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
dataset : np.array
|
||||||
|
array of samples
|
||||||
|
labels : np.array
|
||||||
|
labels of the dataset
|
||||||
|
max_features : int
|
||||||
|
number of features of the subspace
|
||||||
|
(< number of features in dataset)
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
tuple
|
||||||
|
indices of the features selected
|
||||||
|
"""
|
||||||
|
return (
|
||||||
|
SelectKBest(k=max_features)
|
||||||
|
.fit(dataset, labels)
|
||||||
|
.get_support(indices=True)
|
||||||
|
)
|
||||||
|
|
||||||
|
def _fs_mutual(
|
||||||
|
self, dataset: np.array, labels: np.array, max_features: int
|
||||||
|
) -> tuple:
|
||||||
|
"""Return the best features with mutual information with labels
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
dataset : np.array
|
||||||
|
array of samples
|
||||||
|
labels : np.array
|
||||||
|
labels of the dataset
|
||||||
|
max_features : int
|
||||||
|
number of features of the subspace
|
||||||
|
(< number of features in dataset)
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
tuple
|
||||||
|
indices of the features selected
|
||||||
|
"""
|
||||||
|
# return best features with mutual info with the label
|
||||||
|
feature_list = mutual_info_classif(
|
||||||
|
dataset, labels, random_state=self._random_state
|
||||||
|
)
|
||||||
|
return tuple(
|
||||||
|
sorted(
|
||||||
|
range(len(feature_list)), key=lambda sub: feature_list[sub]
|
||||||
|
)[-max_features:]
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _fs_cfs(
|
||||||
|
dataset: np.array, labels: np.array, max_features: int
|
||||||
|
) -> tuple:
|
||||||
|
"""Correlattion-based feature selection with max_features limit
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
dataset : np.array
|
||||||
|
array of samples
|
||||||
|
labels : np.array
|
||||||
|
labels of the dataset
|
||||||
|
max_features : int
|
||||||
|
number of features of the subspace
|
||||||
|
(< number of features in dataset)
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
tuple
|
||||||
|
indices of the features selected
|
||||||
|
"""
|
||||||
|
mufs = MUFS(max_features=max_features, discrete=False)
|
||||||
|
return mufs.cfs(dataset, labels).get_results()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _fs_fcbf(
|
||||||
|
dataset: np.array, labels: np.array, max_features: int
|
||||||
|
) -> tuple:
|
||||||
|
"""Fast Correlation-based Filter algorithm with max_features limit
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
dataset : np.array
|
||||||
|
array of samples
|
||||||
|
labels : np.array
|
||||||
|
labels of the dataset
|
||||||
|
max_features : int
|
||||||
|
number of features of the subspace
|
||||||
|
(< number of features in dataset)
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
tuple
|
||||||
|
indices of the features selected
|
||||||
|
"""
|
||||||
|
mufs = MUFS(max_features=max_features, discrete=False)
|
||||||
|
return mufs.fcbf(dataset, labels, 5e-4).get_results()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _fs_iwss(
|
||||||
|
dataset: np.array, labels: np.array, max_features: int
|
||||||
|
) -> tuple:
|
||||||
|
"""Correlattion-based feature selection based on iwss with max_features
|
||||||
|
limit
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
dataset : np.array
|
||||||
|
array of samples
|
||||||
|
labels : np.array
|
||||||
|
labels of the dataset
|
||||||
|
max_features : int
|
||||||
|
number of features of the subspace
|
||||||
|
(< number of features in dataset)
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
tuple
|
||||||
|
indices of the features selected
|
||||||
|
"""
|
||||||
|
mufs = MUFS(max_features=max_features, discrete=False)
|
||||||
|
return mufs.iwss(dataset, labels, 0.25).get_results()
|
||||||
|
|
||||||
|
def partition_impurity(self, y: np.array) -> np.array:
|
||||||
|
return self.criterion_function(y)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _gini(y: np.array) -> float:
|
||||||
|
_, count = np.unique(y, return_counts=True)
|
||||||
|
return 1 - np.sum(np.square(count / np.sum(count)))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _entropy(y: np.array) -> float:
|
||||||
|
"""Compute entropy of a labels set
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
y : np.array
|
||||||
|
set of labels
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
float
|
||||||
|
entropy
|
||||||
|
"""
|
||||||
|
n_labels = len(y)
|
||||||
|
if n_labels <= 1:
|
||||||
|
return 0
|
||||||
|
counts = np.bincount(y)
|
||||||
|
proportions = counts / n_labels
|
||||||
|
n_classes = np.count_nonzero(proportions)
|
||||||
|
if n_classes <= 1:
|
||||||
|
return 0
|
||||||
|
entropy = 0.0
|
||||||
|
# Compute standard entropy.
|
||||||
|
for prop in proportions:
|
||||||
|
if prop != 0.0:
|
||||||
|
entropy -= prop * log(prop, n_classes)
|
||||||
|
return entropy
|
||||||
|
|
||||||
|
def information_gain(
|
||||||
|
self, labels: np.array, labels_up: np.array, labels_dn: np.array
|
||||||
|
) -> float:
|
||||||
|
"""Compute information gain of a split candidate
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
labels : np.array
|
||||||
|
labels of the dataset
|
||||||
|
labels_up : np.array
|
||||||
|
labels of one side
|
||||||
|
labels_dn : np.array
|
||||||
|
labels on the other side
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
float
|
||||||
|
information gain
|
||||||
|
"""
|
||||||
|
imp_prev = self.criterion_function(labels)
|
||||||
|
card_up = card_dn = imp_up = imp_dn = 0
|
||||||
|
if labels_up is not None:
|
||||||
|
card_up = labels_up.shape[0]
|
||||||
|
imp_up = self.criterion_function(labels_up)
|
||||||
|
if labels_dn is not None:
|
||||||
|
card_dn = labels_dn.shape[0] if labels_dn is not None else 0
|
||||||
|
imp_dn = self.criterion_function(labels_dn)
|
||||||
|
samples = card_up + card_dn
|
||||||
|
if samples == 0:
|
||||||
|
return 0.0
|
||||||
|
else:
|
||||||
|
result = (
|
||||||
|
imp_prev
|
||||||
|
- (card_up / samples) * imp_up
|
||||||
|
- (card_dn / samples) * imp_dn
|
||||||
|
)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _select_best_set(
|
||||||
|
self, dataset: np.array, labels: np.array, features_sets: list
|
||||||
|
) -> list:
|
||||||
|
"""Return the best set of features among feature_sets, the criterion is
|
||||||
|
the information gain
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
dataset : np.array
|
||||||
|
array of samples (# samples, # features)
|
||||||
|
labels : np.array
|
||||||
|
array of labels
|
||||||
|
features_sets : list
|
||||||
|
list of features sets to check
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
list
|
||||||
|
best feature set
|
||||||
|
"""
|
||||||
|
max_gain = 0
|
||||||
|
selected = None
|
||||||
|
warnings.filterwarnings("ignore", category=ConvergenceWarning)
|
||||||
|
for feature_set in features_sets:
|
||||||
|
self._clf.fit(dataset[:, feature_set], labels)
|
||||||
|
node = Snode(
|
||||||
|
self._clf, dataset, labels, feature_set, 0.0, "subset"
|
||||||
|
)
|
||||||
|
self.partition(dataset, node, train=True)
|
||||||
|
y1, y2 = self.part(labels)
|
||||||
|
gain = self.information_gain(labels, y1, y2)
|
||||||
|
if gain > max_gain:
|
||||||
|
max_gain = gain
|
||||||
|
selected = feature_set
|
||||||
|
return selected if selected is not None else feature_set
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _generate_spaces(features: int, max_features: int) -> list:
|
||||||
|
"""Generate at most 5 feature random combinations
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
features : int
|
||||||
|
number of features in each combination
|
||||||
|
max_features : int
|
||||||
|
number of features in dataset
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
list
|
||||||
|
list with up to 5 combination of features randomly selected
|
||||||
|
"""
|
||||||
|
comb = set()
|
||||||
|
# Generate at most 5 combinations
|
||||||
|
number = factorial(features) / (
|
||||||
|
factorial(max_features) * factorial(features - max_features)
|
||||||
|
)
|
||||||
|
set_length = min(5, number)
|
||||||
|
while len(comb) < set_length:
|
||||||
|
comb.add(
|
||||||
|
tuple(sorted(random.sample(range(features), max_features)))
|
||||||
|
)
|
||||||
|
return list(comb)
|
||||||
|
|
||||||
|
def _get_subspaces_set(
|
||||||
|
self, dataset: np.array, labels: np.array, max_features: int
|
||||||
|
) -> tuple:
|
||||||
|
"""Compute the indices of the features selected by splitter depending
|
||||||
|
on the self._feature_select hyper parameter
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
dataset : np.array
|
||||||
|
array of samples
|
||||||
|
labels : np.array
|
||||||
|
labels of the dataset
|
||||||
|
max_features : int
|
||||||
|
number of features of the subspace
|
||||||
|
(<= number of features in dataset)
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
tuple
|
||||||
|
indices of the features selected
|
||||||
|
"""
|
||||||
|
# No feature reduction
|
||||||
|
n_features = dataset.shape[1]
|
||||||
|
if n_features == max_features:
|
||||||
|
return tuple(range(n_features))
|
||||||
|
# select features as selected in constructor
|
||||||
|
return self.fs_function(dataset, labels, max_features)
|
||||||
|
|
||||||
|
def get_subspace(
|
||||||
|
self, dataset: np.array, labels: np.array, max_features: int
|
||||||
|
) -> tuple:
|
||||||
|
"""Re3turn a subspace of the selected dataset of max_features length.
|
||||||
|
Depending on hyperparameter
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
dataset : np.array
|
||||||
|
array of samples (# samples, # features)
|
||||||
|
labels : np.array
|
||||||
|
labels of the dataset
|
||||||
|
max_features : int
|
||||||
|
number of features to form the subspace
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
tuple
|
||||||
|
tuple with the dataset with only the features selected and the
|
||||||
|
indices of the features selected
|
||||||
|
"""
|
||||||
|
indices = self._get_subspaces_set(dataset, labels, max_features)
|
||||||
|
return dataset[:, indices], indices
|
||||||
|
|
||||||
|
def _impurity(self, data: np.array, y: np.array) -> np.array:
|
||||||
|
"""return column of dataset to be taken into account to split dataset
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
data : np.array
|
||||||
|
distances to hyper plane of every class
|
||||||
|
y : np.array
|
||||||
|
vector of labels (classes)
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
np.array
|
||||||
|
column of dataset to be taken into account to split dataset
|
||||||
|
"""
|
||||||
|
max_gain = 0
|
||||||
|
selected = -1
|
||||||
|
for col in range(data.shape[1]):
|
||||||
|
tup = y[data[:, col] > 0]
|
||||||
|
tdn = y[data[:, col] <= 0]
|
||||||
|
info_gain = self.information_gain(y, tup, tdn)
|
||||||
|
if info_gain > max_gain:
|
||||||
|
selected = col
|
||||||
|
max_gain = info_gain
|
||||||
|
return selected
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _max_samples(data: np.array, y: np.array) -> np.array:
|
||||||
|
"""return column of dataset to be taken into account to split dataset
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
data : np.array
|
||||||
|
distances to hyper plane of every class
|
||||||
|
y : np.array
|
||||||
|
column of dataset to be taken into account to split dataset
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
np.array
|
||||||
|
column of dataset to be taken into account to split dataset
|
||||||
|
"""
|
||||||
|
# select the class with max number of samples
|
||||||
|
_, samples = np.unique(y, return_counts=True)
|
||||||
|
return np.argmax(samples)
|
||||||
|
|
||||||
|
def partition(self, samples: np.array, node: Snode, train: bool):
|
||||||
|
"""Set the criteria to split arrays. Compute the indices of the samples
|
||||||
|
that should go to one side of the tree (up)
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
samples : np.array
|
||||||
|
array of samples (# samples, # features)
|
||||||
|
node : Snode
|
||||||
|
Node of the tree where partition is going to be made
|
||||||
|
train : bool
|
||||||
|
Train time - True / Test time - False
|
||||||
|
"""
|
||||||
|
# data contains the distances of every sample to every class hyperplane
|
||||||
|
# array of (m, nc) nc = # classes
|
||||||
|
data = self._distances(node, samples)
|
||||||
|
if data.shape[0] < self._min_samples_split:
|
||||||
|
# there aren't enough samples to split
|
||||||
|
self._up = np.ones((data.shape[0]), dtype=bool)
|
||||||
|
return
|
||||||
|
if data.ndim > 1:
|
||||||
|
# split criteria for multiclass
|
||||||
|
# Convert data to a (m, 1) array selecting values for samples
|
||||||
|
if train:
|
||||||
|
# in train time we have to compute the column to take into
|
||||||
|
# account to split the dataset
|
||||||
|
col = self.decision_criteria(data, node._y)
|
||||||
|
node.set_partition_column(col)
|
||||||
|
else:
|
||||||
|
# in predcit time just use the column computed in train time
|
||||||
|
# is taking the classifier of class <col>
|
||||||
|
col = node.get_partition_column()
|
||||||
|
if col == -1:
|
||||||
|
# No partition is producing information gain
|
||||||
|
data = np.ones(data.shape)
|
||||||
|
data = data[:, col]
|
||||||
|
self._up = data > 0
|
||||||
|
|
||||||
|
def part(self, origin: np.array) -> list:
|
||||||
|
"""Split an array in two based on indices (self._up) and its complement
|
||||||
|
partition has to be called first to establish up indices
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
origin : np.array
|
||||||
|
dataset to split
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
list
|
||||||
|
list with two splits of the array
|
||||||
|
"""
|
||||||
|
down = ~self._up
|
||||||
|
return [
|
||||||
|
origin[self._up] if any(self._up) else None,
|
||||||
|
origin[down] if any(down) else None,
|
||||||
|
]
|
||||||
|
|
||||||
|
def _distances(self, node: Snode, data: np.ndarray) -> np.array:
|
||||||
|
"""Compute distances of the samples to the hyperplane of the node
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
node : Snode
|
||||||
|
node containing the svm classifier
|
||||||
|
data : np.ndarray
|
||||||
|
samples to compute distance to hyperplane
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
np.array
|
||||||
|
array of shape (m, nc) with the distances of every sample to
|
||||||
|
the hyperplane of every class. nc = # of classes
|
||||||
|
"""
|
||||||
|
X_transformed = data[:, node._features]
|
||||||
|
if self._normalize:
|
||||||
|
X_transformed = node._scaler.transform(X_transformed)
|
||||||
|
return node._clf.decision_function(X_transformed)
|
900
stree/Strees.py
900
stree/Strees.py
File diff suppressed because it is too large
Load Diff
@@ -1,3 +1,8 @@
|
|||||||
from .Strees import Stree, Snode, Siterator, Splitter
|
from .Strees import Stree, Siterator
|
||||||
|
|
||||||
__all__ = ["Stree", "Snode", "Siterator", "Splitter"]
|
__author__ = "Ricardo Montañana Gómez"
|
||||||
|
__copyright__ = "Copyright 2020-2021, Ricardo Montañana Gómez"
|
||||||
|
__license__ = "MIT License"
|
||||||
|
__author_email__ = "ricardo.montanana@alu.uclm.es"
|
||||||
|
|
||||||
|
__all__ = ["Stree", "Siterator"]
|
||||||
|
1
stree/_version.py
Normal file
1
stree/_version.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
__version__ = "1.3.2"
|
@@ -1,16 +1,19 @@
|
|||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from stree import Stree
|
||||||
from stree import Stree, Snode
|
from stree.Splitter import Snode
|
||||||
from .utils import load_dataset
|
from .utils import load_dataset
|
||||||
|
|
||||||
|
|
||||||
class Snode_test(unittest.TestCase):
|
class Snode_test(unittest.TestCase):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
self._random_state = 1
|
self._random_state = 1
|
||||||
self._clf = Stree(random_state=self._random_state)
|
self._clf = Stree(
|
||||||
|
random_state=self._random_state,
|
||||||
|
kernel="liblinear",
|
||||||
|
multiclass_strategy="ovr",
|
||||||
|
)
|
||||||
self._clf.fit(*load_dataset(self._random_state))
|
self._clf.fit(*load_dataset(self._random_state))
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
@@ -40,12 +43,13 @@ class Snode_test(unittest.TestCase):
|
|||||||
# Check Class
|
# Check Class
|
||||||
class_computed = classes[card == max_card]
|
class_computed = classes[card == max_card]
|
||||||
self.assertEqual(class_computed, node._class)
|
self.assertEqual(class_computed, node._class)
|
||||||
|
# Check Partition column
|
||||||
|
self.assertEqual(node._partition_column, -1)
|
||||||
|
|
||||||
check_leave(self._clf.tree_)
|
check_leave(self._clf.tree_)
|
||||||
|
|
||||||
def test_nodes_coefs(self):
|
def test_nodes_coefs(self):
|
||||||
"""Check if the nodes of the tree have the right attributes filled
|
"""Check if the nodes of the tree have the right attributes filled"""
|
||||||
"""
|
|
||||||
|
|
||||||
def run_tree(node: Snode):
|
def run_tree(node: Snode):
|
||||||
if node._belief < 1:
|
if node._belief < 1:
|
||||||
@@ -54,28 +58,62 @@ class Snode_test(unittest.TestCase):
|
|||||||
self.assertIsNotNone(node._clf.coef_)
|
self.assertIsNotNone(node._clf.coef_)
|
||||||
if node.is_leaf():
|
if node.is_leaf():
|
||||||
return
|
return
|
||||||
run_tree(node.get_down())
|
|
||||||
run_tree(node.get_up())
|
run_tree(node.get_up())
|
||||||
|
run_tree(node.get_down())
|
||||||
|
|
||||||
run_tree(self._clf.tree_)
|
model = Stree(self._random_state)
|
||||||
|
model.fit(*load_dataset(self._random_state, 3, 4))
|
||||||
|
run_tree(model.tree_)
|
||||||
|
|
||||||
def test_make_predictor_on_leaf(self):
|
def test_make_predictor_on_leaf(self):
|
||||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||||
test.make_predictor()
|
test.make_predictor(2)
|
||||||
self.assertEqual(1, test._class)
|
self.assertEqual(1, test._class)
|
||||||
self.assertEqual(0.75, test._belief)
|
self.assertEqual(0.75, test._belief)
|
||||||
|
self.assertEqual(-1, test._partition_column)
|
||||||
|
self.assertListEqual([1, 3], test._proba.tolist())
|
||||||
|
|
||||||
def test_make_predictor_on_not_leaf(self):
|
def test_make_predictor_on_not_leaf(self):
|
||||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||||
test.set_up(Snode(None, [1], [1], [], 0.0, "another_test"))
|
test.set_up(Snode(None, [1], [1], [], 0.0, "another_test"))
|
||||||
test.make_predictor()
|
test.make_predictor(2)
|
||||||
self.assertIsNone(test._class)
|
self.assertIsNone(test._class)
|
||||||
self.assertEqual(0, test._belief)
|
self.assertEqual(0, test._belief)
|
||||||
|
self.assertEqual(-1, test._partition_column)
|
||||||
|
self.assertEqual(-1, test.get_up()._partition_column)
|
||||||
|
self.assertIsNone(test._proba)
|
||||||
|
|
||||||
def test_make_predictor_on_leaf_bogus_data(self):
|
def test_make_predictor_on_leaf_bogus_data(self):
|
||||||
test = Snode(None, [1, 2, 3, 4], [], [], 0.0, "test")
|
test = Snode(None, [1, 2, 3, 4], [], [], 0.0, "test")
|
||||||
test.make_predictor()
|
test.make_predictor(2)
|
||||||
self.assertIsNone(test._class)
|
self.assertIsNone(test._class)
|
||||||
|
self.assertEqual(-1, test._partition_column)
|
||||||
|
self.assertListEqual([0, 0], test._proba.tolist())
|
||||||
|
|
||||||
|
def test_set_title(self):
|
||||||
|
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||||
|
self.assertEqual("test", test.get_title())
|
||||||
|
test.set_title("another")
|
||||||
|
self.assertEqual("another", test.get_title())
|
||||||
|
|
||||||
|
def test_set_classifier(self):
|
||||||
|
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||||
|
clf = Stree()
|
||||||
|
self.assertIsNone(test.get_classifier())
|
||||||
|
test.set_classifier(clf)
|
||||||
|
self.assertEqual(clf, test.get_classifier())
|
||||||
|
|
||||||
|
def test_set_impurity(self):
|
||||||
|
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||||
|
self.assertEqual(0.0, test.get_impurity())
|
||||||
|
test.set_impurity(54.7)
|
||||||
|
self.assertEqual(54.7, test.get_impurity())
|
||||||
|
|
||||||
|
def test_set_features(self):
|
||||||
|
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [0, 1], 0.0, "test")
|
||||||
|
self.assertListEqual([0, 1], test.get_features())
|
||||||
|
test.set_features([1, 2])
|
||||||
|
self.assertListEqual([1, 2], test.get_features())
|
||||||
|
|
||||||
def test_copy_node(self):
|
def test_copy_node(self):
|
||||||
px = [1, 2, 3, 4]
|
px = [1, 2, 3, 4]
|
||||||
@@ -86,3 +124,6 @@ class Snode_test(unittest.TestCase):
|
|||||||
self.assertListEqual(computed._y, py)
|
self.assertListEqual(computed._y, py)
|
||||||
self.assertEqual("test", computed._title)
|
self.assertEqual("test", computed._title)
|
||||||
self.assertIsInstance(computed._clf, Stree)
|
self.assertIsInstance(computed._clf, Stree)
|
||||||
|
self.assertEqual(test._partition_column, computed._partition_column)
|
||||||
|
self.assertEqual(test._sample_weight, computed._sample_weight)
|
||||||
|
self.assertEqual(test._scaler, computed._scaler)
|
||||||
|
@@ -5,7 +5,8 @@ import random
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
from sklearn.svm import SVC
|
from sklearn.svm import SVC
|
||||||
from sklearn.datasets import load_wine, load_iris
|
from sklearn.datasets import load_wine, load_iris
|
||||||
from stree import Splitter
|
from stree.Splitter import Splitter
|
||||||
|
from .utils import load_dataset, load_disc_dataset
|
||||||
|
|
||||||
|
|
||||||
class Splitter_test(unittest.TestCase):
|
class Splitter_test(unittest.TestCase):
|
||||||
@@ -17,15 +18,15 @@ class Splitter_test(unittest.TestCase):
|
|||||||
def build(
|
def build(
|
||||||
clf=SVC,
|
clf=SVC,
|
||||||
min_samples_split=0,
|
min_samples_split=0,
|
||||||
splitter_type="random",
|
feature_select="random",
|
||||||
criterion="gini",
|
criterion="gini",
|
||||||
criteria="min_distance",
|
criteria="max_samples",
|
||||||
random_state=None,
|
random_state=None,
|
||||||
):
|
):
|
||||||
return Splitter(
|
return Splitter(
|
||||||
clf=clf(random_state=random_state, kernel="rbf"),
|
clf=clf(random_state=random_state, kernel="rbf"),
|
||||||
min_samples_split=min_samples_split,
|
min_samples_split=min_samples_split,
|
||||||
splitter_type=splitter_type,
|
feature_select=feature_select,
|
||||||
criterion=criterion,
|
criterion=criterion,
|
||||||
criteria=criteria,
|
criteria=criteria,
|
||||||
random_state=random_state,
|
random_state=random_state,
|
||||||
@@ -39,24 +40,20 @@ class Splitter_test(unittest.TestCase):
|
|||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
self.build(criterion="duck")
|
self.build(criterion="duck")
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
self.build(splitter_type="duck")
|
self.build(feature_select="duck")
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
self.build(criteria="duck")
|
self.build(criteria="duck")
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
_ = Splitter(clf=None)
|
_ = Splitter(clf=None)
|
||||||
for splitter_type in ["best", "random"]:
|
for feature_select in ["best", "random"]:
|
||||||
for criterion in ["gini", "entropy"]:
|
for criterion in ["gini", "entropy"]:
|
||||||
for criteria in [
|
for criteria in ["max_samples", "impurity"]:
|
||||||
"min_distance",
|
|
||||||
"max_samples",
|
|
||||||
"max_distance",
|
|
||||||
]:
|
|
||||||
tcl = self.build(
|
tcl = self.build(
|
||||||
splitter_type=splitter_type,
|
feature_select=feature_select,
|
||||||
criterion=criterion,
|
criterion=criterion,
|
||||||
criteria=criteria,
|
criteria=criteria,
|
||||||
)
|
)
|
||||||
self.assertEqual(splitter_type, tcl._splitter_type)
|
self.assertEqual(feature_select, tcl._feature_select)
|
||||||
self.assertEqual(criterion, tcl._criterion)
|
self.assertEqual(criterion, tcl._criterion)
|
||||||
self.assertEqual(criteria, tcl._criteria)
|
self.assertEqual(criteria, tcl._criteria)
|
||||||
|
|
||||||
@@ -138,78 +135,81 @@ class Splitter_test(unittest.TestCase):
|
|||||||
[0.7, 0.01, -0.1],
|
[0.7, 0.01, -0.1],
|
||||||
[0.7, -0.9, 0.5],
|
[0.7, -0.9, 0.5],
|
||||||
[0.1, 0.2, 0.3],
|
[0.1, 0.2, 0.3],
|
||||||
|
[-0.1, 0.2, 0.3],
|
||||||
|
[-0.1, 0.2, 0.3],
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
expected = np.array([0.2, 0.01, -0.9, 0.2])
|
expected = data[:, 0]
|
||||||
y = [1, 2, 1, 0]
|
y = [1, 2, 1, 0, 0, 0]
|
||||||
computed = tcl._max_samples(data, y)
|
computed = tcl._max_samples(data, y)
|
||||||
self.assertEqual((4,), computed.shape)
|
self.assertEqual(0, computed)
|
||||||
self.assertListEqual(expected.tolist(), computed.tolist())
|
computed_data = data[:, computed]
|
||||||
|
self.assertEqual((6,), computed_data.shape)
|
||||||
|
self.assertListEqual(expected.tolist(), computed_data.tolist())
|
||||||
|
|
||||||
def test_min_distance(self):
|
def test_impurity(self):
|
||||||
tcl = self.build()
|
tcl = self.build(criteria="impurity")
|
||||||
data = np.array(
|
data = np.array(
|
||||||
[
|
[
|
||||||
[-0.1, 0.2, -0.3],
|
[-0.1, 0.2, -0.3],
|
||||||
[0.7, 0.01, -0.1],
|
[0.7, 0.01, -0.1],
|
||||||
[0.7, -0.9, 0.5],
|
[0.7, -0.9, 0.5],
|
||||||
[0.1, 0.2, 0.3],
|
[0.1, 0.2, 0.3],
|
||||||
|
[-0.1, 0.2, 0.3],
|
||||||
|
[-0.1, 0.2, 0.3],
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
expected = np.array([2, 2, 1, 0])
|
expected = data[:, 2]
|
||||||
computed = tcl._min_distance(data, None)
|
y = np.array([1, 2, 1, 0, 0, 0])
|
||||||
self.assertEqual((4,), computed.shape)
|
computed = tcl._impurity(data, y)
|
||||||
self.assertListEqual(expected.tolist(), computed.tolist())
|
self.assertEqual(2, computed)
|
||||||
|
computed_data = data[:, computed]
|
||||||
|
self.assertEqual((6,), computed_data.shape)
|
||||||
|
self.assertListEqual(expected.tolist(), computed_data.tolist())
|
||||||
|
|
||||||
def test_max_distance(self):
|
def test_generate_subspaces(self):
|
||||||
tcl = self.build(criteria="max_distance")
|
features = 250
|
||||||
data = np.array(
|
for max_features in range(2, features):
|
||||||
[
|
num = len(Splitter._generate_spaces(features, max_features))
|
||||||
[-0.1, 0.2, -0.3],
|
self.assertEqual(5, num)
|
||||||
[0.7, 0.01, -0.1],
|
self.assertEqual(3, len(Splitter._generate_spaces(3, 2)))
|
||||||
[0.7, -0.9, 0.5],
|
self.assertEqual(4, len(Splitter._generate_spaces(4, 3)))
|
||||||
[0.1, 0.2, 0.3],
|
|
||||||
]
|
|
||||||
)
|
|
||||||
expected = np.array([1, 0, 0, 2])
|
|
||||||
computed = tcl._max_distance(data, None)
|
|
||||||
self.assertEqual((4,), computed.shape)
|
|
||||||
self.assertListEqual(expected.tolist(), computed.tolist())
|
|
||||||
|
|
||||||
def test_best_splitter_few_sets(self):
|
def test_best_splitter_few_sets(self):
|
||||||
X, y = load_iris(return_X_y=True)
|
X, y = load_iris(return_X_y=True)
|
||||||
X = np.delete(X, 3, 1)
|
X = np.delete(X, 3, 1)
|
||||||
tcl = self.build(splitter_type="best", random_state=self._random_state)
|
tcl = self.build(
|
||||||
|
feature_select="best", random_state=self._random_state
|
||||||
|
)
|
||||||
dataset, computed = tcl.get_subspace(X, y, max_features=2)
|
dataset, computed = tcl.get_subspace(X, y, max_features=2)
|
||||||
self.assertListEqual([0, 2], list(computed))
|
self.assertListEqual([0, 2], list(computed))
|
||||||
self.assertListEqual(X[:, computed].tolist(), dataset.tolist())
|
self.assertListEqual(X[:, computed].tolist(), dataset.tolist())
|
||||||
|
|
||||||
def test_splitter_parameter(self):
|
def test_splitter_parameter(self):
|
||||||
expected_values = [
|
expected_values = [
|
||||||
[2, 3, 5, 7], # best entropy min_distance
|
[0, 6, 11, 12], # best entropy max_samples
|
||||||
[0, 2, 4, 5], # best entropy max_samples
|
[0, 6, 11, 12], # best entropy impurity
|
||||||
[0, 2, 8, 12], # best entropy max_distance
|
[0, 6, 11, 12], # best gini max_samples
|
||||||
[1, 2, 5, 12], # best gini min_distance
|
[0, 6, 11, 12], # best gini impurity
|
||||||
[0, 3, 4, 10], # best gini max_samples
|
[0, 3, 8, 12], # random entropy max_samples
|
||||||
[1, 2, 9, 12], # best gini max_distance
|
[0, 3, 7, 12], # random entropy impurity
|
||||||
[3, 9, 11, 12], # random entropy min_distance
|
[1, 7, 9, 12], # random gini max_samples
|
||||||
[1, 5, 6, 9], # random entropy max_samples
|
[1, 5, 8, 12], # random gini impurity
|
||||||
[1, 2, 4, 8], # random entropy max_distance
|
[6, 9, 11, 12], # mutual entropy max_samples
|
||||||
[2, 6, 7, 12], # random gini min_distance
|
[6, 9, 11, 12], # mutual entropy impurity
|
||||||
[3, 9, 10, 11], # random gini max_samples
|
[6, 9, 11, 12], # mutual gini max_samples
|
||||||
[2, 5, 8, 12], # random gini max_distance
|
[6, 9, 11, 12], # mutual gini impurity
|
||||||
]
|
]
|
||||||
X, y = load_wine(return_X_y=True)
|
X, y = load_wine(return_X_y=True)
|
||||||
rn = 0
|
rn = 0
|
||||||
for splitter_type in ["best", "random"]:
|
for feature_select in ["best", "random", "mutual"]:
|
||||||
for criterion in ["entropy", "gini"]:
|
for criterion in ["entropy", "gini"]:
|
||||||
for criteria in [
|
for criteria in [
|
||||||
"min_distance",
|
|
||||||
"max_samples",
|
"max_samples",
|
||||||
"max_distance",
|
"impurity",
|
||||||
]:
|
]:
|
||||||
tcl = self.build(
|
tcl = self.build(
|
||||||
splitter_type=splitter_type,
|
feature_select=feature_select,
|
||||||
criterion=criterion,
|
criterion=criterion,
|
||||||
criteria=criteria,
|
criteria=criteria,
|
||||||
)
|
)
|
||||||
@@ -219,11 +219,94 @@ class Splitter_test(unittest.TestCase):
|
|||||||
dataset, computed = tcl.get_subspace(X, y, max_features=4)
|
dataset, computed = tcl.get_subspace(X, y, max_features=4)
|
||||||
# print(
|
# print(
|
||||||
# "{}, # {:7s}{:8s}{:15s}".format(
|
# "{}, # {:7s}{:8s}{:15s}".format(
|
||||||
# list(computed), splitter_type, criterion,
|
# list(computed),
|
||||||
# criteria,
|
# feature_select,
|
||||||
|
# criterion,
|
||||||
|
# criteria,
|
||||||
# )
|
# )
|
||||||
# )
|
# )
|
||||||
self.assertListEqual(expected, list(computed))
|
self.assertListEqual(expected, sorted(list(computed)))
|
||||||
self.assertListEqual(
|
self.assertListEqual(
|
||||||
X[:, computed].tolist(), dataset.tolist()
|
X[:, computed].tolist(), dataset.tolist()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def test_get_best_subspaces(self):
|
||||||
|
results = [
|
||||||
|
(4, [3, 4, 11, 13]),
|
||||||
|
(7, [1, 3, 4, 5, 11, 13, 16]),
|
||||||
|
(9, [1, 3, 4, 5, 7, 10, 11, 13, 16]),
|
||||||
|
]
|
||||||
|
X, y = load_dataset(n_features=20)
|
||||||
|
for k, expected in results:
|
||||||
|
tcl = self.build(
|
||||||
|
feature_select="best",
|
||||||
|
)
|
||||||
|
Xs, computed = tcl.get_subspace(X, y, k)
|
||||||
|
self.assertListEqual(expected, list(computed))
|
||||||
|
self.assertListEqual(X[:, expected].tolist(), Xs.tolist())
|
||||||
|
|
||||||
|
def test_get_best_subspaces_discrete(self):
|
||||||
|
results = [
|
||||||
|
(4, [0, 3, 16, 18]),
|
||||||
|
(7, [0, 3, 13, 14, 16, 18, 19]),
|
||||||
|
(9, [0, 3, 7, 13, 14, 15, 16, 18, 19]),
|
||||||
|
]
|
||||||
|
X, y = load_disc_dataset(n_features=20)
|
||||||
|
for k, expected in results:
|
||||||
|
tcl = self.build(
|
||||||
|
feature_select="best",
|
||||||
|
)
|
||||||
|
Xs, computed = tcl.get_subspace(X, y, k)
|
||||||
|
self.assertListEqual(expected, list(computed))
|
||||||
|
self.assertListEqual(X[:, expected].tolist(), Xs.tolist())
|
||||||
|
|
||||||
|
def test_get_cfs_subspaces(self):
|
||||||
|
results = [
|
||||||
|
(4, [1, 5, 9, 12]),
|
||||||
|
(6, [1, 5, 9, 12, 4, 2]),
|
||||||
|
(7, [1, 5, 9, 12, 4, 2, 3]),
|
||||||
|
]
|
||||||
|
X, y = load_dataset(n_features=20, n_informative=7)
|
||||||
|
for k, expected in results:
|
||||||
|
tcl = self.build(feature_select="cfs")
|
||||||
|
Xs, computed = tcl.get_subspace(X, y, k)
|
||||||
|
self.assertListEqual(expected, list(computed))
|
||||||
|
self.assertListEqual(X[:, expected].tolist(), Xs.tolist())
|
||||||
|
|
||||||
|
def test_get_fcbf_subspaces(self):
|
||||||
|
results = [
|
||||||
|
(4, [1, 5, 9, 12]),
|
||||||
|
(6, [1, 5, 9, 12, 4, 2]),
|
||||||
|
(7, [1, 5, 9, 12, 4, 2, 16]),
|
||||||
|
]
|
||||||
|
for rs, expected in results:
|
||||||
|
X, y = load_dataset(n_features=20, n_informative=7)
|
||||||
|
tcl = self.build(feature_select="fcbf", random_state=rs)
|
||||||
|
Xs, computed = tcl.get_subspace(X, y, rs)
|
||||||
|
self.assertListEqual(expected, list(computed))
|
||||||
|
self.assertListEqual(X[:, expected].tolist(), Xs.tolist())
|
||||||
|
|
||||||
|
def test_get_iwss_subspaces(self):
|
||||||
|
results = [
|
||||||
|
(4, [1, 5, 9, 12]),
|
||||||
|
(6, [1, 5, 9, 12, 4, 15]),
|
||||||
|
]
|
||||||
|
for rs, expected in results:
|
||||||
|
X, y = load_dataset(n_features=20, n_informative=7)
|
||||||
|
tcl = self.build(feature_select="iwss", random_state=rs)
|
||||||
|
Xs, computed = tcl.get_subspace(X, y, rs)
|
||||||
|
self.assertListEqual(expected, list(computed))
|
||||||
|
self.assertListEqual(X[:, expected].tolist(), Xs.tolist())
|
||||||
|
|
||||||
|
def test_get_trandom_subspaces(self):
|
||||||
|
results = [
|
||||||
|
(4, [3, 7, 9, 12]),
|
||||||
|
(6, [0, 1, 2, 8, 15, 18]),
|
||||||
|
(7, [1, 2, 4, 8, 10, 12, 13]),
|
||||||
|
]
|
||||||
|
for rs, expected in results:
|
||||||
|
X, y = load_dataset(n_features=20, n_informative=7)
|
||||||
|
tcl = self.build(feature_select="trandom", random_state=rs)
|
||||||
|
Xs, computed = tcl.get_subspace(X, y, rs)
|
||||||
|
self.assertListEqual(expected, list(computed))
|
||||||
|
self.assertListEqual(X[:, expected].tolist(), Xs.tolist())
|
||||||
|
@@ -5,28 +5,47 @@ import warnings
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
from sklearn.datasets import load_iris, load_wine
|
from sklearn.datasets import load_iris, load_wine
|
||||||
from sklearn.exceptions import ConvergenceWarning
|
from sklearn.exceptions import ConvergenceWarning
|
||||||
|
from sklearn.svm import LinearSVC
|
||||||
|
|
||||||
from stree import Stree, Snode
|
from stree import Stree
|
||||||
|
from stree.Splitter import Snode
|
||||||
from .utils import load_dataset
|
from .utils import load_dataset
|
||||||
|
from .._version import __version__
|
||||||
|
|
||||||
|
|
||||||
class Stree_test(unittest.TestCase):
|
class Stree_test(unittest.TestCase):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
self._random_state = 1
|
self._random_state = 1
|
||||||
self._kernels = ["linear", "rbf", "poly"]
|
self._kernels = ["liblinear", "linear", "rbf", "poly", "sigmoid"]
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def setUp(cls):
|
def setUp(cls):
|
||||||
os.environ["TESTING"] = "1"
|
os.environ["TESTING"] = "1"
|
||||||
|
|
||||||
|
def test_valid_kernels(self):
|
||||||
|
X, y = load_dataset()
|
||||||
|
for kernel in self._kernels:
|
||||||
|
clf = Stree(kernel=kernel, multiclass_strategy="ovr")
|
||||||
|
clf.fit(X, y)
|
||||||
|
self.assertIsNotNone(clf.tree_)
|
||||||
|
|
||||||
|
def test_bogus_kernel(self):
|
||||||
|
kernel = "other"
|
||||||
|
X, y = load_dataset()
|
||||||
|
clf = Stree(kernel=kernel)
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
clf.fit(X, y)
|
||||||
|
|
||||||
def _check_tree(self, node: Snode):
|
def _check_tree(self, node: Snode):
|
||||||
"""Check recursively that the nodes that are not leaves have the
|
"""Check recursively that the nodes that are not leaves have the
|
||||||
correct number of labels and its sons have the right number of elements
|
correct number of labels and its sons have the right number of elements
|
||||||
in their dataset
|
in their dataset
|
||||||
|
|
||||||
Arguments:
|
Parameters
|
||||||
node {Snode} -- node to check
|
----------
|
||||||
|
node : Snode
|
||||||
|
node to check
|
||||||
"""
|
"""
|
||||||
if node.is_leaf():
|
if node.is_leaf():
|
||||||
return
|
return
|
||||||
@@ -37,37 +56,49 @@ class Stree_test(unittest.TestCase):
|
|||||||
# i.e. The partition algorithm didn't forget any sample
|
# i.e. The partition algorithm didn't forget any sample
|
||||||
self.assertEqual(node._y.shape[0], y_down.shape[0] + y_up.shape[0])
|
self.assertEqual(node._y.shape[0], y_down.shape[0] + y_up.shape[0])
|
||||||
unique_y, count_y = np.unique(node._y, return_counts=True)
|
unique_y, count_y = np.unique(node._y, return_counts=True)
|
||||||
_, count_d = np.unique(y_down, return_counts=True)
|
labels_d, count_d = np.unique(y_down, return_counts=True)
|
||||||
_, count_u = np.unique(y_up, return_counts=True)
|
labels_u, count_u = np.unique(y_up, return_counts=True)
|
||||||
|
dict_d = {label: count_d[i] for i, label in enumerate(labels_d)}
|
||||||
|
dict_u = {label: count_u[i] for i, label in enumerate(labels_u)}
|
||||||
#
|
#
|
||||||
for i in unique_y:
|
for i in unique_y:
|
||||||
number_down = count_d[i]
|
|
||||||
try:
|
try:
|
||||||
number_up = count_u[i]
|
number_up = dict_u[i]
|
||||||
except IndexError:
|
except KeyError:
|
||||||
number_up = 0
|
number_up = 0
|
||||||
|
try:
|
||||||
|
number_down = dict_d[i]
|
||||||
|
except KeyError:
|
||||||
|
number_down = 0
|
||||||
self.assertEqual(count_y[i], number_down + number_up)
|
self.assertEqual(count_y[i], number_down + number_up)
|
||||||
# Is the partition made the same as the prediction?
|
# Is the partition made the same as the prediction?
|
||||||
# as the node is not a leaf...
|
# as the node is not a leaf...
|
||||||
_, count_yp = np.unique(y_prediction, return_counts=True)
|
_, count_yp = np.unique(y_prediction, return_counts=True)
|
||||||
self.assertEqual(count_yp[0], y_up.shape[0])
|
self.assertEqual(count_yp[1], y_up.shape[0])
|
||||||
self.assertEqual(count_yp[1], y_down.shape[0])
|
self.assertEqual(count_yp[0], y_down.shape[0])
|
||||||
self._check_tree(node.get_down())
|
self._check_tree(node.get_down())
|
||||||
self._check_tree(node.get_up())
|
self._check_tree(node.get_up())
|
||||||
|
|
||||||
def test_build_tree(self):
|
def test_build_tree(self):
|
||||||
"""Check if the tree is built the same way as predictions of models
|
"""Check if the tree is built the same way as predictions of models"""
|
||||||
"""
|
|
||||||
warnings.filterwarnings("ignore")
|
warnings.filterwarnings("ignore")
|
||||||
for kernel in self._kernels:
|
for kernel in self._kernels:
|
||||||
clf = Stree(kernel=kernel, random_state=self._random_state)
|
clf = Stree(
|
||||||
|
kernel="sigmoid",
|
||||||
|
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
|
||||||
|
random_state=self._random_state,
|
||||||
|
)
|
||||||
clf.fit(*load_dataset(self._random_state))
|
clf.fit(*load_dataset(self._random_state))
|
||||||
self._check_tree(clf.tree_)
|
self._check_tree(clf.tree_)
|
||||||
|
|
||||||
def test_single_prediction(self):
|
def test_single_prediction(self):
|
||||||
X, y = load_dataset(self._random_state)
|
X, y = load_dataset(self._random_state)
|
||||||
for kernel in self._kernels:
|
for kernel in self._kernels:
|
||||||
clf = Stree(kernel=kernel, random_state=self._random_state)
|
clf = Stree(
|
||||||
|
kernel=kernel,
|
||||||
|
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
|
||||||
|
random_state=self._random_state,
|
||||||
|
)
|
||||||
yp = clf.fit(X, y).predict((X[0, :].reshape(-1, X.shape[1])))
|
yp = clf.fit(X, y).predict((X[0, :].reshape(-1, X.shape[1])))
|
||||||
self.assertEqual(yp[0], y[0])
|
self.assertEqual(yp[0], y[0])
|
||||||
|
|
||||||
@@ -75,18 +106,58 @@ class Stree_test(unittest.TestCase):
|
|||||||
# First 27 elements the predictions are the same as the truth
|
# First 27 elements the predictions are the same as the truth
|
||||||
num = 27
|
num = 27
|
||||||
X, y = load_dataset(self._random_state)
|
X, y = load_dataset(self._random_state)
|
||||||
for kernel in self._kernels:
|
for kernel in ["liblinear", "linear", "rbf", "poly"]:
|
||||||
clf = Stree(kernel=kernel, random_state=self._random_state)
|
clf = Stree(
|
||||||
|
kernel=kernel,
|
||||||
|
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
|
||||||
|
random_state=self._random_state,
|
||||||
|
)
|
||||||
yp = clf.fit(X, y).predict(X[:num, :])
|
yp = clf.fit(X, y).predict(X[:num, :])
|
||||||
self.assertListEqual(y[:num].tolist(), yp.tolist())
|
self.assertListEqual(y[:num].tolist(), yp.tolist())
|
||||||
|
|
||||||
|
def test_multiple_predict_proba(self):
|
||||||
|
expected = {
|
||||||
|
"liblinear": {
|
||||||
|
0: [0.02401129943502825, 0.9759887005649718],
|
||||||
|
17: [0.9282970550576184, 0.07170294494238157],
|
||||||
|
},
|
||||||
|
"linear": {
|
||||||
|
0: [0.029329608938547486, 0.9706703910614525],
|
||||||
|
17: [0.9298469387755102, 0.07015306122448979],
|
||||||
|
},
|
||||||
|
"rbf": {
|
||||||
|
0: [0.023448275862068966, 0.976551724137931],
|
||||||
|
17: [0.9458064516129032, 0.05419354838709677],
|
||||||
|
},
|
||||||
|
"poly": {
|
||||||
|
0: [0.01601164483260553, 0.9839883551673945],
|
||||||
|
17: [0.9089790897908979, 0.0910209102091021],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
indices = [0, 17]
|
||||||
|
X, y = load_dataset(self._random_state)
|
||||||
|
for kernel in ["liblinear", "linear", "rbf", "poly"]:
|
||||||
|
clf = Stree(
|
||||||
|
kernel=kernel,
|
||||||
|
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
|
||||||
|
random_state=self._random_state,
|
||||||
|
)
|
||||||
|
yp = clf.fit(X, y).predict_proba(X)
|
||||||
|
for index in indices:
|
||||||
|
for exp, comp in zip(expected[kernel][index], yp[index]):
|
||||||
|
self.assertAlmostEqual(exp, comp)
|
||||||
|
|
||||||
def test_single_vs_multiple_prediction(self):
|
def test_single_vs_multiple_prediction(self):
|
||||||
"""Check if predicting sample by sample gives the same result as
|
"""Check if predicting sample by sample gives the same result as
|
||||||
predicting all samples at once
|
predicting all samples at once
|
||||||
"""
|
"""
|
||||||
X, y = load_dataset(self._random_state)
|
X, y = load_dataset(self._random_state)
|
||||||
for kernel in self._kernels:
|
for kernel in self._kernels:
|
||||||
clf = Stree(kernel=kernel, random_state=self._random_state)
|
clf = Stree(
|
||||||
|
kernel=kernel,
|
||||||
|
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
|
||||||
|
random_state=self._random_state,
|
||||||
|
)
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
# Compute prediction line by line
|
# Compute prediction line by line
|
||||||
yp_line = np.array([], dtype=int)
|
yp_line = np.array([], dtype=int)
|
||||||
@@ -99,26 +170,32 @@ class Stree_test(unittest.TestCase):
|
|||||||
self.assertListEqual(yp_line.tolist(), yp_once.tolist())
|
self.assertListEqual(yp_line.tolist(), yp_once.tolist())
|
||||||
|
|
||||||
def test_iterator_and_str(self):
|
def test_iterator_and_str(self):
|
||||||
"""Check preorder iterator
|
"""Check preorder iterator"""
|
||||||
"""
|
|
||||||
expected = [
|
expected = [
|
||||||
"root feaures=(0, 1, 2) impurity=0.5000",
|
"root feaures=(0, 1, 2) impurity=1.0000 counts=(array([0, 1]), "
|
||||||
"root - Down feaures=(0, 1, 2) impurity=0.0671",
|
"array([750, 750]))",
|
||||||
"root - Down - Down, <cgaf> - Leaf class=1 belief= 0.975989 "
|
"root - Down(2), <cgaf> - Leaf class=0 belief= 0.928297 impurity="
|
||||||
"impurity=0.0469 counts=(array([0, 1]), array([ 17, 691]))",
|
"0.3722 counts=(array([0, 1]), array([725, 56]))",
|
||||||
"root - Down - Up feaures=(0, 1, 2) impurity=0.3967",
|
"root - Up(2) feaures=(0, 1, 2) impurity=0.2178 counts=(array([0, "
|
||||||
"root - Down - Up - Down, <cgaf> - Leaf class=1 belief= 0.750000 "
|
"1]), array([ 25, 694]))",
|
||||||
"impurity=0.3750 counts=(array([0, 1]), array([1, 3]))",
|
"root - Up(2) - Down(3) feaures=(0, 1, 2) impurity=0.8454 counts="
|
||||||
"root - Down - Up - Up, <pure> - Leaf class=0 belief= 1.000000 "
|
"(array([0, 1]), array([8, 3]))",
|
||||||
"impurity=0.0000 counts=(array([0]), array([7]))",
|
"root - Up(2) - Down(3) - Down(4), <pure> - Leaf class=0 belief= "
|
||||||
"root - Up, <cgaf> - Leaf class=0 belief= 0.928297 impurity=0.1331"
|
"1.000000 impurity=0.0000 counts=(array([0]), array([7]))",
|
||||||
" counts=(array([0, 1]), array([725, 56]))",
|
"root - Up(2) - Down(3) - Up(4), <cgaf> - Leaf class=1 belief= "
|
||||||
|
"0.750000 impurity=0.8113 counts=(array([0, 1]), array([1, 3]))",
|
||||||
|
"root - Up(2) - Up(3), <cgaf> - Leaf class=1 belief= 0.975989 "
|
||||||
|
"impurity=0.1634 counts=(array([0, 1]), array([ 17, 691]))",
|
||||||
]
|
]
|
||||||
computed = []
|
computed = []
|
||||||
expected_string = ""
|
expected_string = ""
|
||||||
clf = Stree(kernel="linear", random_state=self._random_state)
|
clf = Stree(
|
||||||
|
kernel="liblinear",
|
||||||
|
multiclass_strategy="ovr",
|
||||||
|
random_state=self._random_state,
|
||||||
|
)
|
||||||
clf.fit(*load_dataset(self._random_state))
|
clf.fit(*load_dataset(self._random_state))
|
||||||
for node in clf:
|
for node in iter(clf):
|
||||||
computed.append(str(node))
|
computed.append(str(node))
|
||||||
expected_string += str(node) + "\n"
|
expected_string += str(node) + "\n"
|
||||||
self.assertListEqual(expected, computed)
|
self.assertListEqual(expected, computed)
|
||||||
@@ -154,9 +231,15 @@ class Stree_test(unittest.TestCase):
|
|||||||
def test_check_max_depth(self):
|
def test_check_max_depth(self):
|
||||||
depths = (3, 4)
|
depths = (3, 4)
|
||||||
for depth in depths:
|
for depth in depths:
|
||||||
tcl = Stree(random_state=self._random_state, max_depth=depth)
|
tcl = Stree(
|
||||||
|
kernel="liblinear",
|
||||||
|
multiclass_strategy="ovr",
|
||||||
|
random_state=self._random_state,
|
||||||
|
max_depth=depth,
|
||||||
|
)
|
||||||
tcl.fit(*load_dataset(self._random_state))
|
tcl.fit(*load_dataset(self._random_state))
|
||||||
self.assertEqual(depth, tcl.depth_)
|
self.assertEqual(depth, tcl.depth_)
|
||||||
|
self.assertEqual(depth, tcl.get_depth())
|
||||||
|
|
||||||
def test_unfitted_tree_is_iterable(self):
|
def test_unfitted_tree_is_iterable(self):
|
||||||
tcl = Stree()
|
tcl = Stree()
|
||||||
@@ -175,7 +258,7 @@ class Stree_test(unittest.TestCase):
|
|||||||
for kernel in self._kernels:
|
for kernel in self._kernels:
|
||||||
clf = Stree(
|
clf = Stree(
|
||||||
kernel=kernel,
|
kernel=kernel,
|
||||||
split_criteria="max_samples",
|
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
|
||||||
random_state=self._random_state,
|
random_state=self._random_state,
|
||||||
)
|
)
|
||||||
px = [[1, 2], [5, 6], [9, 10]]
|
px = [[1, 2], [5, 6], [9, 10]]
|
||||||
@@ -186,47 +269,60 @@ class Stree_test(unittest.TestCase):
|
|||||||
self.assertListEqual(py, clf.classes_.tolist())
|
self.assertListEqual(py, clf.classes_.tolist())
|
||||||
|
|
||||||
def test_muticlass_dataset(self):
|
def test_muticlass_dataset(self):
|
||||||
|
warnings.filterwarnings("ignore", category=ConvergenceWarning)
|
||||||
|
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
||||||
datasets = {
|
datasets = {
|
||||||
"Synt": load_dataset(random_state=self._random_state, n_classes=3),
|
"Synt": load_dataset(random_state=self._random_state, n_classes=3),
|
||||||
"Iris": load_iris(return_X_y=True),
|
"Iris": load_wine(return_X_y=True),
|
||||||
}
|
}
|
||||||
outcomes = {
|
outcomes = {
|
||||||
"Synt": {
|
"Synt": {
|
||||||
"max_samples linear": 0.9533333333333334,
|
"max_samples liblinear": 0.9493333333333334,
|
||||||
"max_samples rbf": 0.836,
|
"max_samples linear": 0.9426666666666667,
|
||||||
"max_samples poly": 0.9473333333333334,
|
"max_samples rbf": 0.9606666666666667,
|
||||||
"min_distance linear": 0.9533333333333334,
|
"max_samples poly": 0.9373333333333334,
|
||||||
"min_distance rbf": 0.836,
|
"max_samples sigmoid": 0.824,
|
||||||
"min_distance poly": 0.9473333333333334,
|
"impurity liblinear": 0.9493333333333334,
|
||||||
"max_distance linear": 0.9533333333333334,
|
"impurity linear": 0.9426666666666667,
|
||||||
"max_distance rbf": 0.836,
|
"impurity rbf": 0.9606666666666667,
|
||||||
"max_distance poly": 0.9473333333333334,
|
"impurity poly": 0.9373333333333334,
|
||||||
|
"impurity sigmoid": 0.824,
|
||||||
},
|
},
|
||||||
"Iris": {
|
"Iris": {
|
||||||
"max_samples linear": 0.98,
|
"max_samples liblinear": 0.9550561797752809,
|
||||||
"max_samples rbf": 1.0,
|
"max_samples linear": 1.0,
|
||||||
"max_samples poly": 1.0,
|
"max_samples rbf": 0.6685393258426966,
|
||||||
"min_distance linear": 0.98,
|
"max_samples poly": 0.6853932584269663,
|
||||||
"min_distance rbf": 1.0,
|
"max_samples sigmoid": 0.6404494382022472,
|
||||||
"min_distance poly": 1.0,
|
"impurity liblinear": 0.9550561797752809,
|
||||||
"max_distance linear": 0.98,
|
"impurity linear": 1.0,
|
||||||
"max_distance rbf": 1.0,
|
"impurity rbf": 0.6685393258426966,
|
||||||
"max_distance poly": 1.0,
|
"impurity poly": 0.6853932584269663,
|
||||||
|
"impurity sigmoid": 0.6404494382022472,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, dataset in datasets.items():
|
for name, dataset in datasets.items():
|
||||||
px, py = dataset
|
px, py = dataset
|
||||||
for criteria in ["max_samples", "min_distance", "max_distance"]:
|
for criteria in ["max_samples", "impurity"]:
|
||||||
for kernel in self._kernels:
|
for kernel in self._kernels:
|
||||||
clf = Stree(
|
clf = Stree(
|
||||||
C=1e4,
|
max_iter=int(1e4),
|
||||||
max_iter=1e4,
|
multiclass_strategy=(
|
||||||
|
"ovr" if kernel == "liblinear" else "ovo"
|
||||||
|
),
|
||||||
kernel=kernel,
|
kernel=kernel,
|
||||||
random_state=self._random_state,
|
random_state=self._random_state,
|
||||||
)
|
)
|
||||||
clf.fit(px, py)
|
clf.fit(px, py)
|
||||||
outcome = outcomes[name][f"{criteria} {kernel}"]
|
outcome = outcomes[name][f"{criteria} {kernel}"]
|
||||||
self.assertAlmostEqual(outcome, clf.score(px, py))
|
# print(f'"{criteria} {kernel}": {clf.score(px, py)},')
|
||||||
|
self.assertAlmostEqual(
|
||||||
|
outcome,
|
||||||
|
clf.score(px, py),
|
||||||
|
5,
|
||||||
|
f"{name} - {criteria} - {kernel}",
|
||||||
|
)
|
||||||
|
|
||||||
def test_max_features(self):
|
def test_max_features(self):
|
||||||
n_features = 16
|
n_features = 16
|
||||||
@@ -251,6 +347,12 @@ class Stree_test(unittest.TestCase):
|
|||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
_ = clf._initialize_max_features()
|
_ = clf._initialize_max_features()
|
||||||
|
|
||||||
|
def test_wrong_max_features(self):
|
||||||
|
X, y = load_dataset(n_features=15)
|
||||||
|
clf = Stree(max_features=16)
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
clf.fit(X, y)
|
||||||
|
|
||||||
def test_get_subspaces(self):
|
def test_get_subspaces(self):
|
||||||
dataset = np.random.random((10, 16))
|
dataset = np.random.random((10, 16))
|
||||||
y = np.random.randint(0, 2, 10)
|
y = np.random.randint(0, 2, 10)
|
||||||
@@ -288,16 +390,22 @@ class Stree_test(unittest.TestCase):
|
|||||||
clf.predict(X[:, :3])
|
clf.predict(X[:, :3])
|
||||||
|
|
||||||
# Tests of score
|
# Tests of score
|
||||||
|
|
||||||
def test_score_binary(self):
|
def test_score_binary(self):
|
||||||
|
"""Check score for binary classification."""
|
||||||
X, y = load_dataset(self._random_state)
|
X, y = load_dataset(self._random_state)
|
||||||
accuracies = [
|
accuracies = [
|
||||||
0.9506666666666667,
|
0.9506666666666667,
|
||||||
|
0.9493333333333334,
|
||||||
0.9606666666666667,
|
0.9606666666666667,
|
||||||
0.9433333333333334,
|
0.9433333333333334,
|
||||||
|
0.9153333333333333,
|
||||||
]
|
]
|
||||||
for kernel, accuracy_expected in zip(self._kernels, accuracies):
|
for kernel, accuracy_expected in zip(self._kernels, accuracies):
|
||||||
clf = Stree(random_state=self._random_state, kernel=kernel,)
|
clf = Stree(
|
||||||
|
random_state=self._random_state,
|
||||||
|
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
|
||||||
|
kernel=kernel,
|
||||||
|
)
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
accuracy_score = clf.score(X, y)
|
accuracy_score = clf.score(X, y)
|
||||||
yp = clf.predict(X)
|
yp = clf.predict(X)
|
||||||
@@ -306,111 +414,355 @@ class Stree_test(unittest.TestCase):
|
|||||||
self.assertAlmostEqual(accuracy_expected, accuracy_score)
|
self.assertAlmostEqual(accuracy_expected, accuracy_score)
|
||||||
|
|
||||||
def test_score_max_features(self):
|
def test_score_max_features(self):
|
||||||
|
"""Check score using max_features."""
|
||||||
X, y = load_dataset(self._random_state)
|
X, y = load_dataset(self._random_state)
|
||||||
clf = Stree(random_state=self._random_state, max_features=2)
|
clf = Stree(
|
||||||
|
kernel="liblinear",
|
||||||
|
multiclass_strategy="ovr",
|
||||||
|
random_state=self._random_state,
|
||||||
|
max_features=2,
|
||||||
|
)
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
self.assertAlmostEqual(0.9426666666666667, clf.score(X, y))
|
self.assertAlmostEqual(0.9453333333333334, clf.score(X, y))
|
||||||
|
|
||||||
def test_score_multi_class(self):
|
|
||||||
warnings.filterwarnings("ignore")
|
|
||||||
accuracies = [
|
|
||||||
0.8258427, # Wine linear min_distance
|
|
||||||
0.6741573, # Wine linear max_distance
|
|
||||||
0.8314607, # Wine linear max_samples
|
|
||||||
0.6629213, # Wine rbf min_distance
|
|
||||||
1.0000000, # Wine rbf max_distance
|
|
||||||
0.4044944, # Wine rbf max_samples
|
|
||||||
0.9157303, # Wine poly min_distance
|
|
||||||
1.0000000, # Wine poly max_distance
|
|
||||||
0.7640449, # Wine poly max_samples
|
|
||||||
0.9933333, # Iris linear min_distance
|
|
||||||
0.9666667, # Iris linear max_distance
|
|
||||||
0.9666667, # Iris linear max_samples
|
|
||||||
0.9800000, # Iris rbf min_distance
|
|
||||||
0.9800000, # Iris rbf max_distance
|
|
||||||
0.9800000, # Iris rbf max_samples
|
|
||||||
1.0000000, # Iris poly min_distance
|
|
||||||
1.0000000, # Iris poly max_distance
|
|
||||||
1.0000000, # Iris poly max_samples
|
|
||||||
0.8993333, # Synthetic linear min_distance
|
|
||||||
0.6533333, # Synthetic linear max_distance
|
|
||||||
0.9313333, # Synthetic linear max_samples
|
|
||||||
0.8320000, # Synthetic rbf min_distance
|
|
||||||
0.6660000, # Synthetic rbf max_distance
|
|
||||||
0.8320000, # Synthetic rbf max_samples
|
|
||||||
0.6066667, # Synthetic poly min_distance
|
|
||||||
0.6840000, # Synthetic poly max_distance
|
|
||||||
0.6340000, # Synthetic poly max_samples
|
|
||||||
]
|
|
||||||
datasets = [
|
|
||||||
("Wine", load_wine(return_X_y=True)),
|
|
||||||
("Iris", load_iris(return_X_y=True)),
|
|
||||||
(
|
|
||||||
"Synthetic",
|
|
||||||
load_dataset(self._random_state, n_classes=3, n_features=5),
|
|
||||||
),
|
|
||||||
]
|
|
||||||
for dataset_name, dataset in datasets:
|
|
||||||
X, y = dataset
|
|
||||||
for kernel in self._kernels:
|
|
||||||
for criteria in [
|
|
||||||
"min_distance",
|
|
||||||
"max_distance",
|
|
||||||
"max_samples",
|
|
||||||
]:
|
|
||||||
clf = Stree(
|
|
||||||
C=17,
|
|
||||||
random_state=self._random_state,
|
|
||||||
kernel=kernel,
|
|
||||||
split_criteria=criteria,
|
|
||||||
degree=5,
|
|
||||||
gamma="auto",
|
|
||||||
)
|
|
||||||
clf.fit(X, y)
|
|
||||||
accuracy_score = clf.score(X, y)
|
|
||||||
yp = clf.predict(X)
|
|
||||||
accuracy_computed = np.mean(yp == y)
|
|
||||||
# print(
|
|
||||||
# "{:.7f}, # {:7} {:5} {}".format(
|
|
||||||
# accuracy_score, dataset_name, kernel, criteria
|
|
||||||
# )
|
|
||||||
# )
|
|
||||||
accuracy_expected = accuracies.pop(0)
|
|
||||||
self.assertEqual(accuracy_score, accuracy_computed)
|
|
||||||
self.assertAlmostEqual(accuracy_expected, accuracy_score)
|
|
||||||
|
|
||||||
def test_bogus_splitter_parameter(self):
|
def test_bogus_splitter_parameter(self):
|
||||||
|
"""Check that bogus splitter parameter raises exception."""
|
||||||
clf = Stree(splitter="duck")
|
clf = Stree(splitter="duck")
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
clf.fit(*load_dataset())
|
clf.fit(*load_dataset())
|
||||||
|
|
||||||
def test_weights_removing_class(self):
|
def test_multiclass_classifier_integrity(self):
|
||||||
# This patch solves an stderr message from sklearn svm lib
|
"""Checks if the multiclass operation is done right"""
|
||||||
# "WARNING: class label x specified in weight is not found"
|
X, y = load_iris(return_X_y=True)
|
||||||
|
clf = Stree(
|
||||||
|
kernel="liblinear", multiclass_strategy="ovr", random_state=0
|
||||||
|
)
|
||||||
|
clf.fit(X, y)
|
||||||
|
score = clf.score(X, y)
|
||||||
|
# Check accuracy of the whole model
|
||||||
|
self.assertAlmostEquals(0.98, score, 5)
|
||||||
|
svm = LinearSVC(random_state=0)
|
||||||
|
svm.fit(X, y)
|
||||||
|
self.assertAlmostEquals(0.9666666666666667, svm.score(X, y), 5)
|
||||||
|
data = svm.decision_function(X)
|
||||||
|
expected = [
|
||||||
|
0.4444444444444444,
|
||||||
|
0.35777777777777775,
|
||||||
|
0.4569777777777778,
|
||||||
|
]
|
||||||
|
ty = data.copy()
|
||||||
|
ty[data <= 0] = 0
|
||||||
|
ty[data > 0] = 1
|
||||||
|
ty = ty.astype(int)
|
||||||
|
for i in range(3):
|
||||||
|
self.assertAlmostEquals(
|
||||||
|
expected[i],
|
||||||
|
clf.splitter_._gini(ty[:, i]),
|
||||||
|
)
|
||||||
|
# 1st Branch
|
||||||
|
# up has to have 50 samples of class 0
|
||||||
|
# down should have 100 [50, 50]
|
||||||
|
up = data[:, 2] > 0
|
||||||
|
resup = np.unique(y[up], return_counts=True)
|
||||||
|
resdn = np.unique(y[~up], return_counts=True)
|
||||||
|
self.assertListEqual([1, 2], resup[0].tolist())
|
||||||
|
self.assertListEqual([3, 50], resup[1].tolist())
|
||||||
|
self.assertListEqual([0, 1], resdn[0].tolist())
|
||||||
|
self.assertListEqual([50, 47], resdn[1].tolist())
|
||||||
|
# 2nd Branch
|
||||||
|
# up should have 53 samples of classes [1, 2] [3, 50]
|
||||||
|
# down shoud have 47 samples of class 1
|
||||||
|
node_up = clf.tree_.get_down().get_up()
|
||||||
|
node_dn = clf.tree_.get_down().get_down()
|
||||||
|
resup = np.unique(node_up._y, return_counts=True)
|
||||||
|
resdn = np.unique(node_dn._y, return_counts=True)
|
||||||
|
self.assertListEqual([1, 2], resup[0].tolist())
|
||||||
|
self.assertListEqual([3, 50], resup[1].tolist())
|
||||||
|
self.assertListEqual([1], resdn[0].tolist())
|
||||||
|
self.assertListEqual([47], resdn[1].tolist())
|
||||||
|
|
||||||
|
def test_score_multiclass_rbf(self):
|
||||||
|
"""Test score for multiclass classification with rbf kernel."""
|
||||||
|
X, y = load_dataset(
|
||||||
|
random_state=self._random_state,
|
||||||
|
n_classes=3,
|
||||||
|
n_features=5,
|
||||||
|
n_samples=500,
|
||||||
|
)
|
||||||
|
clf = Stree(kernel="rbf", random_state=self._random_state)
|
||||||
|
clf2 = Stree(
|
||||||
|
kernel="rbf", random_state=self._random_state, normalize=True
|
||||||
|
)
|
||||||
|
self.assertEqual(0.966, clf.fit(X, y).score(X, y))
|
||||||
|
self.assertEqual(0.964, clf2.fit(X, y).score(X, y))
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
self.assertEqual(0.6685393258426966, clf.fit(X, y).score(X, y))
|
||||||
|
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
||||||
|
|
||||||
|
def test_score_multiclass_poly(self):
|
||||||
|
"""Test score for multiclass classification with poly kernel."""
|
||||||
|
X, y = load_dataset(
|
||||||
|
random_state=self._random_state,
|
||||||
|
n_classes=3,
|
||||||
|
n_features=5,
|
||||||
|
n_samples=500,
|
||||||
|
)
|
||||||
|
clf = Stree(
|
||||||
|
kernel="poly", random_state=self._random_state, C=10, degree=5
|
||||||
|
)
|
||||||
|
clf2 = Stree(
|
||||||
|
kernel="poly",
|
||||||
|
random_state=self._random_state,
|
||||||
|
normalize=True,
|
||||||
|
)
|
||||||
|
self.assertEqual(0.946, clf.fit(X, y).score(X, y))
|
||||||
|
self.assertEqual(0.972, clf2.fit(X, y).score(X, y))
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
self.assertEqual(0.7808988764044944, clf.fit(X, y).score(X, y))
|
||||||
|
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
||||||
|
|
||||||
|
def test_score_multiclass_liblinear(self):
|
||||||
|
"""Test score for multiclass classification with liblinear kernel."""
|
||||||
|
X, y = load_dataset(
|
||||||
|
random_state=self._random_state,
|
||||||
|
n_classes=3,
|
||||||
|
n_features=5,
|
||||||
|
n_samples=500,
|
||||||
|
)
|
||||||
|
clf = Stree(
|
||||||
|
kernel="liblinear",
|
||||||
|
multiclass_strategy="ovr",
|
||||||
|
random_state=self._random_state,
|
||||||
|
C=10,
|
||||||
|
)
|
||||||
|
clf2 = Stree(
|
||||||
|
kernel="liblinear",
|
||||||
|
multiclass_strategy="ovr",
|
||||||
|
random_state=self._random_state,
|
||||||
|
normalize=True,
|
||||||
|
)
|
||||||
|
self.assertEqual(0.968, clf.fit(X, y).score(X, y))
|
||||||
|
self.assertEqual(0.97, clf2.fit(X, y).score(X, y))
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
self.assertEqual(1.0, clf.fit(X, y).score(X, y))
|
||||||
|
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
||||||
|
|
||||||
|
def test_score_multiclass_sigmoid(self):
|
||||||
|
"""Test score for multiclass classification with sigmoid kernel."""
|
||||||
|
X, y = load_dataset(
|
||||||
|
random_state=self._random_state,
|
||||||
|
n_classes=3,
|
||||||
|
n_features=5,
|
||||||
|
n_samples=500,
|
||||||
|
)
|
||||||
|
clf = Stree(kernel="sigmoid", random_state=self._random_state, C=10)
|
||||||
|
clf2 = Stree(
|
||||||
|
kernel="sigmoid",
|
||||||
|
random_state=self._random_state,
|
||||||
|
normalize=True,
|
||||||
|
C=10,
|
||||||
|
)
|
||||||
|
self.assertEqual(0.796, clf.fit(X, y).score(X, y))
|
||||||
|
self.assertEqual(0.952, clf2.fit(X, y).score(X, y))
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
self.assertEqual(0.6910112359550562, clf.fit(X, y).score(X, y))
|
||||||
|
self.assertEqual(0.9662921348314607, clf2.fit(X, y).score(X, y))
|
||||||
|
|
||||||
|
def test_score_multiclass_linear(self):
|
||||||
|
"""Test score for multiclass classification with linear kernel."""
|
||||||
|
warnings.filterwarnings("ignore", category=ConvergenceWarning)
|
||||||
|
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
||||||
|
X, y = load_dataset(
|
||||||
|
random_state=self._random_state,
|
||||||
|
n_classes=3,
|
||||||
|
n_features=5,
|
||||||
|
n_samples=1500,
|
||||||
|
)
|
||||||
|
clf = Stree(
|
||||||
|
kernel="liblinear",
|
||||||
|
multiclass_strategy="ovr",
|
||||||
|
random_state=self._random_state,
|
||||||
|
)
|
||||||
|
self.assertEqual(0.9533333333333334, clf.fit(X, y).score(X, y))
|
||||||
|
# Check with context based standardization
|
||||||
|
clf2 = Stree(
|
||||||
|
kernel="liblinear",
|
||||||
|
multiclass_strategy="ovr",
|
||||||
|
random_state=self._random_state,
|
||||||
|
normalize=True,
|
||||||
|
)
|
||||||
|
self.assertEqual(0.9526666666666667, clf2.fit(X, y).score(X, y))
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
self.assertEqual(0.9831460674157303, clf.fit(X, y).score(X, y))
|
||||||
|
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
||||||
|
|
||||||
|
def test_zero_all_sample_weights(self):
|
||||||
|
"""Test exception raises when all sample weights are zero."""
|
||||||
|
X, y = load_dataset(self._random_state)
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
Stree().fit(X, y, np.zeros(len(y)))
|
||||||
|
|
||||||
|
def test_mask_samples_weighted_zero(self):
|
||||||
|
"""Check that the weighted zero samples are masked."""
|
||||||
X = np.array(
|
X = np.array(
|
||||||
[
|
[
|
||||||
[0.1, 0.1],
|
[1, 1],
|
||||||
[0.1, 0.2],
|
[1, 1],
|
||||||
[0.2, 0.1],
|
[1, 1],
|
||||||
[5, 6],
|
[2, 2],
|
||||||
[8, 9],
|
[2, 2],
|
||||||
[6, 7],
|
[2, 2],
|
||||||
[0.2, 0.2],
|
[3, 3],
|
||||||
|
[3, 3],
|
||||||
|
[3, 3],
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
y = np.array([0, 0, 0, 1, 1, 1, 0])
|
y = np.array([1, 1, 1, 2, 2, 2, 5, 5, 5])
|
||||||
epsilon = 1e-5
|
yw = np.array([1, 1, 1, 1, 1, 1, 5, 5, 5])
|
||||||
weights = [1, 1, 1, 0, 0, 0, 1]
|
w = [1, 1, 1, 0, 0, 0, 1, 1, 1]
|
||||||
weights = np.array(weights, dtype="float64")
|
model1 = Stree().fit(X, y)
|
||||||
weights_epsilon = [x + epsilon for x in weights]
|
model2 = Stree().fit(X, y, w)
|
||||||
weights_no_zero = np.array([1, 1, 1, 0, 0, 2, 1])
|
predict1 = model1.predict(X)
|
||||||
original = weights_no_zero.copy()
|
predict2 = model2.predict(X)
|
||||||
clf = Stree()
|
self.assertListEqual(y.tolist(), predict1.tolist())
|
||||||
|
self.assertListEqual(yw.tolist(), predict2.tolist())
|
||||||
|
self.assertEqual(model1.score(X, y), 1)
|
||||||
|
self.assertAlmostEqual(model2.score(X, y), 0.66666667)
|
||||||
|
self.assertEqual(model2.score(X, y, w), 1)
|
||||||
|
|
||||||
|
def test_depth(self):
|
||||||
|
"""Check depth of the tree."""
|
||||||
|
X, y = load_dataset(
|
||||||
|
random_state=self._random_state,
|
||||||
|
n_classes=3,
|
||||||
|
n_features=5,
|
||||||
|
n_samples=1500,
|
||||||
|
)
|
||||||
|
clf = Stree(random_state=self._random_state)
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
node = clf.train(X, y, weights, 1, "test",)
|
self.assertEqual(6, clf.depth_)
|
||||||
# if a class is lost with zero weights the patch adds epsilon
|
self.assertEqual(6, clf.get_depth())
|
||||||
self.assertListEqual(weights.tolist(), weights_epsilon)
|
X, y = load_wine(return_X_y=True)
|
||||||
self.assertListEqual(node._sample_weight.tolist(), weights_epsilon)
|
clf = Stree(random_state=self._random_state)
|
||||||
# zero weights are ok when they don't erase a class
|
clf.fit(X, y)
|
||||||
_ = clf.train(X, y, weights_no_zero, 1, "test")
|
self.assertEqual(4, clf.depth_)
|
||||||
self.assertListEqual(weights_no_zero.tolist(), original.tolist())
|
self.assertEqual(4, clf.get_depth())
|
||||||
|
|
||||||
|
def test_nodes_leaves(self):
|
||||||
|
"""Check number of nodes and leaves."""
|
||||||
|
X, y = load_dataset(
|
||||||
|
random_state=self._random_state,
|
||||||
|
n_classes=3,
|
||||||
|
n_features=5,
|
||||||
|
n_samples=1500,
|
||||||
|
)
|
||||||
|
clf = Stree(random_state=self._random_state)
|
||||||
|
clf.fit(X, y)
|
||||||
|
nodes, leaves = clf.nodes_leaves()
|
||||||
|
self.assertEqual(31, nodes)
|
||||||
|
self.assertEqual(31, clf.get_nodes())
|
||||||
|
self.assertEqual(16, leaves)
|
||||||
|
self.assertEqual(16, clf.get_leaves())
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
clf = Stree(random_state=self._random_state)
|
||||||
|
clf.fit(X, y)
|
||||||
|
nodes, leaves = clf.nodes_leaves()
|
||||||
|
self.assertEqual(11, nodes)
|
||||||
|
self.assertEqual(11, clf.get_nodes())
|
||||||
|
self.assertEqual(6, leaves)
|
||||||
|
self.assertEqual(6, clf.get_leaves())
|
||||||
|
|
||||||
|
def test_nodes_leaves_artificial(self):
|
||||||
|
"""Check leaves of artificial dataset."""
|
||||||
|
n1 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test1")
|
||||||
|
n2 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test2")
|
||||||
|
n3 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test3")
|
||||||
|
n4 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test4")
|
||||||
|
n5 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test5")
|
||||||
|
n6 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test6")
|
||||||
|
n1.set_up(n2)
|
||||||
|
n2.set_up(n3)
|
||||||
|
n2.set_down(n4)
|
||||||
|
n3.set_up(n5)
|
||||||
|
n4.set_down(n6)
|
||||||
|
clf = Stree(random_state=self._random_state)
|
||||||
|
clf.tree_ = n1
|
||||||
|
nodes, leaves = clf.nodes_leaves()
|
||||||
|
self.assertEqual(6, nodes)
|
||||||
|
self.assertEqual(6, clf.get_nodes())
|
||||||
|
self.assertEqual(2, leaves)
|
||||||
|
self.assertEqual(2, clf.get_leaves())
|
||||||
|
|
||||||
|
def test_bogus_multiclass_strategy(self):
|
||||||
|
"""Check invalid multiclass strategy."""
|
||||||
|
clf = Stree(multiclass_strategy="other")
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
clf.fit(X, y)
|
||||||
|
|
||||||
|
def test_multiclass_strategy(self):
|
||||||
|
"""Check multiclass strategy."""
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
clf_o = Stree(multiclass_strategy="ovo")
|
||||||
|
clf_r = Stree(multiclass_strategy="ovr")
|
||||||
|
score_o = clf_o.fit(X, y).score(X, y)
|
||||||
|
score_r = clf_r.fit(X, y).score(X, y)
|
||||||
|
self.assertEqual(1.0, score_o)
|
||||||
|
self.assertEqual(0.9269662921348315, score_r)
|
||||||
|
|
||||||
|
def test_incompatible_hyperparameters(self):
|
||||||
|
"""Check incompatible hyperparameters."""
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
clf = Stree(kernel="liblinear", multiclass_strategy="ovo")
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
clf.fit(X, y)
|
||||||
|
clf = Stree(multiclass_strategy="ovo", split_criteria="max_samples")
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
clf.fit(X, y)
|
||||||
|
|
||||||
|
def test_version(self):
|
||||||
|
"""Check STree version."""
|
||||||
|
clf = Stree()
|
||||||
|
self.assertEqual(__version__, clf.version())
|
||||||
|
|
||||||
|
def test_graph(self):
|
||||||
|
"""Check graphviz representation of the tree."""
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
clf = Stree(random_state=self._random_state)
|
||||||
|
|
||||||
|
expected_head = (
|
||||||
|
"digraph STree {\nlabel=<STree >\nfontsize=30\n"
|
||||||
|
"fontcolor=blue\nlabelloc=t\n"
|
||||||
|
)
|
||||||
|
expected_tail = (
|
||||||
|
' [shape=box style=filled label="class=1 impurity=0.000 '
|
||||||
|
'counts=[0 1 0]"];\n}\n'
|
||||||
|
)
|
||||||
|
self.assertEqual(clf.graph(), expected_head + "}\n")
|
||||||
|
clf.fit(X, y)
|
||||||
|
computed = clf.graph()
|
||||||
|
computed_head = computed[: len(expected_head)]
|
||||||
|
num = -len(expected_tail)
|
||||||
|
computed_tail = computed[num:]
|
||||||
|
self.assertEqual(computed_head, expected_head)
|
||||||
|
self.assertEqual(computed_tail, expected_tail)
|
||||||
|
|
||||||
|
def test_graph_title(self):
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
clf = Stree(random_state=self._random_state)
|
||||||
|
expected_head = (
|
||||||
|
"digraph STree {\nlabel=<STree Sample title>\nfontsize=30\n"
|
||||||
|
"fontcolor=blue\nlabelloc=t\n"
|
||||||
|
)
|
||||||
|
expected_tail = (
|
||||||
|
' [shape=box style=filled label="class=1 impurity=0.000 '
|
||||||
|
'counts=[0 1 0]"];\n}\n'
|
||||||
|
)
|
||||||
|
self.assertEqual(clf.graph("Sample title"), expected_head + "}\n")
|
||||||
|
clf.fit(X, y)
|
||||||
|
computed = clf.graph("Sample title")
|
||||||
|
computed_head = computed[: len(expected_head)]
|
||||||
|
num = -len(expected_tail)
|
||||||
|
computed_tail = computed[num:]
|
||||||
|
self.assertEqual(computed_head, expected_head)
|
||||||
|
self.assertEqual(computed_tail, expected_tail)
|
||||||
|
@@ -1,11 +1,14 @@
|
|||||||
from sklearn.datasets import make_classification
|
from sklearn.datasets import make_classification
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
def load_dataset(random_state=0, n_classes=2, n_features=3):
|
def load_dataset(
|
||||||
|
random_state=0, n_classes=2, n_features=3, n_samples=1500, n_informative=3
|
||||||
|
):
|
||||||
X, y = make_classification(
|
X, y = make_classification(
|
||||||
n_samples=1500,
|
n_samples=n_samples,
|
||||||
n_features=n_features,
|
n_features=n_features,
|
||||||
n_informative=3,
|
n_informative=n_informative,
|
||||||
n_redundant=0,
|
n_redundant=0,
|
||||||
n_repeated=0,
|
n_repeated=0,
|
||||||
n_classes=n_classes,
|
n_classes=n_classes,
|
||||||
@@ -15,3 +18,12 @@ def load_dataset(random_state=0, n_classes=2, n_features=3):
|
|||||||
random_state=random_state,
|
random_state=random_state,
|
||||||
)
|
)
|
||||||
return X, y
|
return X, y
|
||||||
|
|
||||||
|
|
||||||
|
def load_disc_dataset(
|
||||||
|
random_state=0, n_classes=2, n_features=3, n_samples=1500
|
||||||
|
):
|
||||||
|
np.random.seed(random_state)
|
||||||
|
X = np.random.randint(1, 17, size=(n_samples, n_features)).astype(float)
|
||||||
|
y = np.random.randint(low=0, high=n_classes, size=(n_samples), dtype=int)
|
||||||
|
return X, y
|
||||||
|
Reference in New Issue
Block a user