mirror of
https://github.com/Doctorado-ML/STree.git
synced 2025-08-17 16:36:01 +00:00
Compare commits
39 Commits
add_multic
...
1.1
Author | SHA1 | Date | |
---|---|---|---|
d46f544466
|
|||
79190ef2e1
|
|||
|
4f04e72670 | ||
5cef0f4875
|
|||
28c7558f01
|
|||
|
e19d10f6a7 | ||
|
02de394c96 | ||
|
a4aac9d310 | ||
|
8a18c998df | ||
b55f59a3ec
|
|||
783d105099
|
|||
c36f685263
|
|||
0f89b044f1
|
|||
|
6ba973dfe1 | ||
|
460c63a6d0 | ||
|
f438124057 | ||
|
147dad684c | ||
|
3bdac9bd60 | ||
|
e4ac5075e5 | ||
|
36816074ff | ||
475ad7e752
|
|||
|
1c869e154e | ||
f5706c3159
|
|||
be552fdd6c
|
|||
5e3a8e3ec5
|
|||
554ec03c32
|
|||
4b7e4a3fb0
|
|||
76723993fd
|
|||
ecd0b86f4d
|
|||
3e52a4746c
|
|||
|
a20e45e8e7 | ||
9334951d1b
|
|||
736ab7ef20
|
|||
c94bc068bd
|
|||
502ee72799
|
|||
f1ee4de37b
|
|||
ae1c199e21
|
|||
1bfe273a70
|
|||
|
647d21bdb5 |
@@ -10,5 +10,4 @@ exclude_lines =
|
||||
if __name__ == .__main__.:
|
||||
ignore_errors = True
|
||||
omit =
|
||||
stree/tests/*
|
||||
stree/__init__.py
|
56
.github/workflows/codeql-analysis.yml
vendored
Normal file
56
.github/workflows/codeql-analysis.yml
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ master ]
|
||||
schedule:
|
||||
- cron: '16 17 * * 3'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'python' ]
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||
# Learn more:
|
||||
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
47
.github/workflows/main.yml
vendored
Normal file
47
.github/workflows/main.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
branches: [master]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [macos-latest, ubuntu-latest]
|
||||
python: [3.8]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip install -q --upgrade pip
|
||||
pip install -q -r requirements.txt
|
||||
pip install -q --upgrade codecov coverage black flake8 codacy-coverage
|
||||
- name: Lint
|
||||
run: |
|
||||
black --check --diff stree
|
||||
flake8 --count stree
|
||||
- name: Tests
|
||||
run: |
|
||||
coverage run -m unittest -v stree.tests
|
||||
coverage xml
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./coverage.xml
|
||||
- name: Run codacy-coverage-reporter
|
||||
if: runner.os == 'Linux'
|
||||
uses: codacy/codacy-coverage-reporter-action@master
|
||||
with:
|
||||
project-token: ${{ secrets.CODACY_PROJECT_TOKEN }}
|
||||
coverage-reports: coverage.xml
|
5
.gitignore
vendored
5
.gitignore
vendored
@@ -130,4 +130,7 @@ dmypy.json
|
||||
|
||||
.idea
|
||||
.vscode
|
||||
.pre-commit-config.yaml
|
||||
.pre-commit-config.yaml
|
||||
|
||||
**.csv
|
||||
.virtual_documents
|
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2020 Doctorado-ML
|
||||
Copyright (c) 2020-2021, Ricardo Montañana Gómez
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
49
Makefile
Normal file
49
Makefile
Normal file
@@ -0,0 +1,49 @@
|
||||
SHELL := /bin/bash
|
||||
.DEFAULT_GOAL := help
|
||||
.PHONY: coverage deps help lint push test doc build
|
||||
|
||||
coverage: ## Run tests with coverage
|
||||
coverage erase
|
||||
coverage run -m unittest -v stree.tests
|
||||
coverage report -m
|
||||
|
||||
deps: ## Install dependencies
|
||||
pip install -r requirements.txt
|
||||
|
||||
lint: ## Lint and static-check
|
||||
black stree
|
||||
flake8 stree
|
||||
mypy stree
|
||||
|
||||
push: ## Push code with tags
|
||||
git push && git push --tags
|
||||
|
||||
test: ## Run tests
|
||||
python -m unittest -v stree.tests
|
||||
|
||||
doc: ## Update documentation
|
||||
make -C docs --makefile=Makefile html
|
||||
|
||||
build: ## Build package
|
||||
rm -fr dist/*
|
||||
python setup.py sdist bdist_wheel
|
||||
|
||||
doc-clean: ## Update documentation
|
||||
make -C docs --makefile=Makefile clean
|
||||
|
||||
help: ## Show help message
|
||||
@IFS=$$'\n' ; \
|
||||
help_lines=(`fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##/:/'`); \
|
||||
printf "%s\n\n" "Usage: make [task]"; \
|
||||
printf "%-20s %s\n" "task" "help" ; \
|
||||
printf "%-20s %s\n" "------" "----" ; \
|
||||
for help_line in $${help_lines[@]}; do \
|
||||
IFS=$$':' ; \
|
||||
help_split=($$help_line) ; \
|
||||
help_command=`echo $${help_split[0]} | sed -e 's/^ *//' -e 's/ *$$//'` ; \
|
||||
help_info=`echo $${help_split[2]} | sed -e 's/^ *//' -e 's/ *$$//'` ; \
|
||||
printf '\033[36m'; \
|
||||
printf "%-20s %s" $$help_command ; \
|
||||
printf '\033[0m'; \
|
||||
printf "%s\n" $$help_info; \
|
||||
done
|
57
README.md
57
README.md
@@ -1,8 +1,9 @@
|
||||
[](https://app.codeship.com/projects/399170)
|
||||

|
||||
[](https://codecov.io/gh/doctorado-ml/stree)
|
||||
[](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
|
||||
[](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
|
||||
[](https://lgtm.com/projects/g/Doctorado-ML/STree/context:python)
|
||||
|
||||
# Stree
|
||||
# STree
|
||||
|
||||
Oblique Tree classifier based on SVM nodes. The nodes are built and splitted with sklearn SVC models. Stree is a sklearn estimator and can be integrated in pipelines, grid searches, etc.
|
||||
|
||||
@@ -14,30 +15,60 @@ Oblique Tree classifier based on SVM nodes. The nodes are built and splitted wit
|
||||
pip install git+https://github.com/doctorado-ml/stree
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
Can be found in
|
||||
|
||||
## Examples
|
||||
|
||||
### Jupyter notebooks
|
||||
|
||||
* [](https://mybinder.org/v2/gh/Doctorado-ML/STree/master?urlpath=lab/tree/notebooks/benchmark.ipynb) Benchmark
|
||||
- [](https://mybinder.org/v2/gh/Doctorado-ML/STree/master?urlpath=lab/tree/notebooks/benchmark.ipynb) Benchmark
|
||||
|
||||
* [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/benchmark.ipynb) Benchmark
|
||||
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/benchmark.ipynb) Benchmark
|
||||
|
||||
* [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/features.ipynb) Test features
|
||||
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/features.ipynb) Some features
|
||||
|
||||
* [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/adaboost.ipynb) Adaboost
|
||||
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/gridsearch.ipynb) Gridsearch
|
||||
|
||||
* [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/gridsearch.ipynb) Gridsearch
|
||||
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/ensemble.ipynb) Ensembles
|
||||
|
||||
* [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/test_graphs.ipynb) Test Graphics
|
||||
## Hyperparameters
|
||||
|
||||
### Command line
|
||||
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
|
||||
| --- | ------------------- | ------------------------------------------------------ | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
|
||||
| \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of ‘liblinear’, ‘linear’, ‘poly’ or ‘rbf’. liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library |
|
||||
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
|
||||
| \* | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls |
|
||||
| | max_depth | \<int\> | None | Specifies the maximum depth of the tree |
|
||||
| \* | tol | \<float\> | 1e-4 | Tolerance for stopping criterion. |
|
||||
| \* | degree | \<int\> | 3 | Degree of the polynomial kernel function (‘poly’). Ignored by all other kernels. |
|
||||
| \* | gamma | {"scale", "auto"} or \<float\> | scale | Kernel coefficient for ‘rbf’ and ‘poly’.<br>if gamma='scale' (default) is passed then it uses 1 / (n_features \* X.var()) as value of gamma,<br>if ‘auto’, uses 1 / n_features. |
|
||||
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\*. max_samples is incompatible with 'ovo' multiclass_strategy |
|
||||
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
|
||||
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
|
||||
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
|
||||
| | splitter | {"best", "random", "mutual"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **“best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **“random”**: The algorithm generates 5 candidates and choose one randomly. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label |
|
||||
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
|
||||
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest |
|
||||
|
||||
```bash
|
||||
python main.py
|
||||
```
|
||||
\* Hyperparameter used by the support vector classifier of every node
|
||||
|
||||
\*\* **Splitting in a STree node**
|
||||
|
||||
The decision function is applied to the dataset and distances from samples to hyperplanes are computed in a matrix. This matrix has as many columns as classes the samples belongs to (if more than two, i.e. multiclass classification) or 1 column if it's a binary class dataset. In binary classification only one hyperplane is computed and therefore only one column is needed to store the distances of the samples to it. If three or more classes are present in the dataset we need as many hyperplanes as classes are there, and therefore one column per hyperplane is needed.
|
||||
|
||||
In case of multiclass classification we have to decide which column take into account to make the split, that depends on hyperparameter _split_criteria_, if "impurity" is chosen then STree computes information gain of every split candidate using each column and chooses the one that maximize the information gain, otherwise STree choses the column with more samples with a predicted class (the column with more positive numbers in it).
|
||||
|
||||
Once we have the column to take into account for the split, the algorithm splits samples with positive distances to hyperplane from the rest.
|
||||
|
||||
## Tests
|
||||
|
||||
```bash
|
||||
python -m unittest -v stree.tests
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
STree is [MIT](https://github.com/doctorado-ml/stree/blob/master/LICENSE) licensed
|
||||
|
10
codecov.yml
10
codecov.yml
@@ -1,12 +1,12 @@
|
||||
overage:
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: 90%
|
||||
target: 100%
|
||||
comment:
|
||||
layout: "reach, diff, flags, files"
|
||||
behavior: default
|
||||
require_changes: false
|
||||
require_changes: false
|
||||
require_base: yes
|
||||
require_head: yes
|
||||
branches: null
|
||||
require_head: yes
|
||||
branches: null
|
||||
|
20
docs/Makefile
Normal file
20
docs/Makefile
Normal file
@@ -0,0 +1,20 @@
|
||||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line, and also
|
||||
# from the environment for the first two.
|
||||
SPHINXOPTS ?=
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
SOURCEDIR = source
|
||||
BUILDDIR = build
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
.PHONY: help Makefile
|
||||
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
4
docs/requirements.txt
Normal file
4
docs/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
sphinx
|
||||
sphinx-rtd-theme
|
||||
myst-parser
|
||||
git+https://github.com/doctorado-ml/stree
|
9
docs/source/api/Siterator.rst
Normal file
9
docs/source/api/Siterator.rst
Normal file
@@ -0,0 +1,9 @@
|
||||
Siterator
|
||||
=========
|
||||
|
||||
.. automodule:: stree
|
||||
.. autoclass:: Siterator
|
||||
:members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
:show-inheritance:
|
9
docs/source/api/Snode.rst
Normal file
9
docs/source/api/Snode.rst
Normal file
@@ -0,0 +1,9 @@
|
||||
Snode
|
||||
=====
|
||||
|
||||
.. automodule:: stree
|
||||
.. autoclass:: Snode
|
||||
:members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
:show-inheritance:
|
9
docs/source/api/Splitter.rst
Normal file
9
docs/source/api/Splitter.rst
Normal file
@@ -0,0 +1,9 @@
|
||||
Splitter
|
||||
========
|
||||
|
||||
.. automodule:: stree
|
||||
.. autoclass:: Splitter
|
||||
:members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
:show-inheritance:
|
9
docs/source/api/Stree.rst
Normal file
9
docs/source/api/Stree.rst
Normal file
@@ -0,0 +1,9 @@
|
||||
Stree
|
||||
=====
|
||||
|
||||
.. automodule:: stree
|
||||
.. autoclass:: Stree
|
||||
:members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
:show-inheritance:
|
11
docs/source/api/index.rst
Normal file
11
docs/source/api/index.rst
Normal file
@@ -0,0 +1,11 @@
|
||||
API index
|
||||
=========
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Contents:
|
||||
|
||||
Stree
|
||||
Splitter
|
||||
Snode
|
||||
Siterator
|
57
docs/source/conf.py
Normal file
57
docs/source/conf.py
Normal file
@@ -0,0 +1,57 @@
|
||||
# Configuration file for the Sphinx documentation builder.
|
||||
#
|
||||
# This file only contains a selection of the most common options. For a full
|
||||
# list see the documentation:
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
||||
|
||||
# -- Path setup --------------------------------------------------------------
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
import stree
|
||||
|
||||
sys.path.insert(0, os.path.abspath("../../stree/"))
|
||||
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
|
||||
project = "STree"
|
||||
copyright = "2020 - 2021, Ricardo Montañana Gómez"
|
||||
author = "Ricardo Montañana Gómez"
|
||||
|
||||
# The full version, including alpha/beta/rc tags
|
||||
version = stree.__version__
|
||||
release = version
|
||||
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = ["myst_parser", "sphinx.ext.autodoc", "sphinx.ext.viewcode"]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ["_templates"]
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
# This pattern also affects html_static_path and html_extra_path.
|
||||
exclude_patterns = []
|
||||
|
||||
|
||||
# -- Options for HTML output -------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
#
|
||||
html_theme = "sphinx_rtd_theme"
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ["_static"]
|
44
docs/source/example.md
Normal file
44
docs/source/example.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# Examples
|
||||
|
||||
## Notebooks
|
||||
|
||||
- [](https://mybinder.org/v2/gh/Doctorado-ML/STree/master?urlpath=lab/tree/notebooks/benchmark.ipynb) Benchmark
|
||||
|
||||
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/benchmark.ipynb) Benchmark
|
||||
|
||||
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/features.ipynb) Some features
|
||||
|
||||
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/gridsearch.ipynb) Gridsearch
|
||||
|
||||
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/ensemble.ipynb) Ensembles
|
||||
|
||||
## Sample Code
|
||||
|
||||
```python
|
||||
import time
|
||||
from sklearn.model_selection import train_test_split
|
||||
from sklearn.datasets import load_iris
|
||||
from stree import Stree
|
||||
|
||||
random_state = 1
|
||||
X, y = load_iris(return_X_y=True)
|
||||
Xtrain, Xtest, ytrain, ytest = train_test_split(
|
||||
X, y, test_size=0.2, random_state=random_state
|
||||
)
|
||||
now = time.time()
|
||||
print("Predicting with max_features=sqrt(n_features)")
|
||||
clf = Stree(random_state=random_state, max_features="auto")
|
||||
clf.fit(Xtrain, ytrain)
|
||||
print(f"Took {time.time() - now:.2f} seconds to train")
|
||||
print(clf)
|
||||
print(f"Classifier's accuracy (train): {clf.score(Xtrain, ytrain):.4f}")
|
||||
print(f"Classifier's accuracy (test) : {clf.score(Xtest, ytest):.4f}")
|
||||
print("=" * 40)
|
||||
print("Predicting with max_features=n_features")
|
||||
clf = Stree(random_state=random_state)
|
||||
clf.fit(Xtrain, ytrain)
|
||||
print(f"Took {time.time() - now:.2f} seconds to train")
|
||||
print(clf)
|
||||
print(f"Classifier's accuracy (train): {clf.score(Xtrain, ytrain):.4f}")
|
||||
print(f"Classifier's accuracy (test) : {clf.score(Xtest, ytest):.4f}")
|
||||
```
|
BIN
docs/source/example.png
Normal file
BIN
docs/source/example.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 3.1 MiB |
29
docs/source/hyperparameters.md
Normal file
29
docs/source/hyperparameters.md
Normal file
@@ -0,0 +1,29 @@
|
||||
## Hyperparameters
|
||||
|
||||
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
|
||||
| --- | ------------------- | ------------------------------------------------------ | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
|
||||
| \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of ‘liblinear’, ‘linear’, ‘poly’ or ‘rbf’. liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library |
|
||||
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
|
||||
| \* | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls |
|
||||
| | max_depth | \<int\> | None | Specifies the maximum depth of the tree |
|
||||
| \* | tol | \<float\> | 1e-4 | Tolerance for stopping criterion. |
|
||||
| \* | degree | \<int\> | 3 | Degree of the polynomial kernel function (‘poly’). Ignored by all other kernels. |
|
||||
| \* | gamma | {"scale", "auto"} or \<float\> | scale | Kernel coefficient for ‘rbf’ and ‘poly’.<br>if gamma='scale' (default) is passed then it uses 1 / (n_features \* X.var()) as value of gamma,<br>if ‘auto’, uses 1 / n_features. |
|
||||
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\*. max_samples is incompatible with 'ovo' multiclass_strategy |
|
||||
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
|
||||
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
|
||||
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
|
||||
| | splitter | {"best", "random", "mutual"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **“best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **“random”**: The algorithm generates 5 candidates and choose one randomly. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label |
|
||||
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
|
||||
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest |
|
||||
|
||||
\* Hyperparameter used by the support vector classifier of every node
|
||||
|
||||
\*\* **Splitting in a STree node**
|
||||
|
||||
The decision function is applied to the dataset and distances from samples to hyperplanes are computed in a matrix. This matrix has as many columns as classes the samples belongs to (if more than two, i.e. multiclass classification) or 1 column if it's a binary class dataset. In binary classification only one hyperplane is computed and therefore only one column is needed to store the distances of the samples to it. If three or more classes are present in the dataset we need as many hyperplanes as classes are there, and therefore one column per hyperplane is needed.
|
||||
|
||||
In case of multiclass classification we have to decide which column take into account to make the split, that depends on hyperparameter _split_criteria_, if "impurity" is chosen then STree computes information gain of every split candidate using each column and chooses the one that maximize the information gain, otherwise STree choses the column with more samples with a predicted class (the column with more positive numbers in it).
|
||||
|
||||
Once we have the column to take into account for the split, the algorithm splits samples with positive distances to hyperplane from the rest.
|
15
docs/source/index.rst
Normal file
15
docs/source/index.rst
Normal file
@@ -0,0 +1,15 @@
|
||||
Welcome to STree's documentation!
|
||||
=================================
|
||||
|
||||
.. toctree::
|
||||
:caption: Contents:
|
||||
:titlesonly:
|
||||
|
||||
|
||||
stree
|
||||
install
|
||||
hyperparameters
|
||||
example
|
||||
api/index
|
||||
|
||||
* :ref:`genindex`
|
16
docs/source/install.rst
Normal file
16
docs/source/install.rst
Normal file
@@ -0,0 +1,16 @@
|
||||
Install
|
||||
=======
|
||||
|
||||
The main stable release
|
||||
|
||||
``pip install stree``
|
||||
|
||||
or the last development branch
|
||||
|
||||
``pip install git+https://github.com/doctorado-ml/stree``
|
||||
|
||||
Tests
|
||||
*****
|
||||
|
||||
|
||||
``python -m unittest -v stree.tests``
|
14
docs/source/stree.md
Normal file
14
docs/source/stree.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# STree
|
||||
|
||||
[](https://app.codeship.com/projects/399170)
|
||||
[](https://codecov.io/gh/doctorado-ml/stree)
|
||||
[](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
|
||||
[](https://lgtm.com/projects/g/Doctorado-ML/STree/context:python)
|
||||
|
||||
Oblique Tree classifier based on SVM nodes. The nodes are built and splitted with sklearn SVC models. Stree is a sklearn estimator and can be integrated in pipelines, grid searches, etc.
|
||||
|
||||

|
||||
|
||||
## License
|
||||
|
||||
STree is [MIT](https://github.com/doctorado-ml/stree/blob/master/LICENSE) licensed
|
77
main.py
77
main.py
@@ -1,77 +0,0 @@
|
||||
import time
|
||||
from sklearn.model_selection import train_test_split
|
||||
from stree import Stree
|
||||
|
||||
random_state = 1
|
||||
|
||||
|
||||
def load_creditcard(n_examples=0):
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import random
|
||||
|
||||
df = pd.read_csv("data/creditcard.csv")
|
||||
print(
|
||||
"Fraud: {0:.3f}% {1}".format(
|
||||
df.Class[df.Class == 1].count() * 100 / df.shape[0],
|
||||
df.Class[df.Class == 1].count(),
|
||||
)
|
||||
)
|
||||
print(
|
||||
"Valid: {0:.3f}% {1}".format(
|
||||
df.Class[df.Class == 0].count() * 100 / df.shape[0],
|
||||
df.Class[df.Class == 0].count(),
|
||||
)
|
||||
)
|
||||
y = np.expand_dims(df.Class.values, axis=1)
|
||||
X = df.drop(["Class", "Time", "Amount"], axis=1).values
|
||||
if n_examples > 0:
|
||||
# Take first n_examples samples
|
||||
X = X[:n_examples, :]
|
||||
y = y[:n_examples, :]
|
||||
else:
|
||||
# Take all the positive samples with a number of random negatives
|
||||
if n_examples < 0:
|
||||
Xt = X[(y == 1).ravel()]
|
||||
yt = y[(y == 1).ravel()]
|
||||
indices = random.sample(range(X.shape[0]), -1 * n_examples)
|
||||
X = np.append(Xt, X[indices], axis=0)
|
||||
y = np.append(yt, y[indices], axis=0)
|
||||
print("X.shape", X.shape, " y.shape", y.shape)
|
||||
print(
|
||||
"Fraud: {0:.3f}% {1}".format(
|
||||
len(y[y == 1]) * 100 / X.shape[0], len(y[y == 1])
|
||||
)
|
||||
)
|
||||
print(
|
||||
"Valid: {0:.3f}% {1}".format(
|
||||
len(y[y == 0]) * 100 / X.shape[0], len(y[y == 0])
|
||||
)
|
||||
)
|
||||
Xtrain, Xtest, ytrain, ytest = train_test_split(
|
||||
X,
|
||||
y,
|
||||
train_size=0.7,
|
||||
shuffle=True,
|
||||
random_state=random_state,
|
||||
stratify=y,
|
||||
)
|
||||
return Xtrain, Xtest, ytrain, ytest
|
||||
|
||||
|
||||
# data = load_creditcard(-5000) # Take all true samples + 5000 of the others
|
||||
# data = load_creditcard(5000) # Take the first 5000 samples
|
||||
data = load_creditcard() # Take all the samples
|
||||
|
||||
Xtrain = data[0]
|
||||
Xtest = data[1]
|
||||
ytrain = data[2]
|
||||
ytest = data[3]
|
||||
|
||||
now = time.time()
|
||||
clf = Stree(C=0.01, random_state=random_state)
|
||||
clf.fit(Xtrain, ytrain)
|
||||
print(f"Took {time.time() - now:.2f} seconds to train")
|
||||
print(clf)
|
||||
print(f"Classifier's accuracy (train): {clf.score(Xtrain, ytrain):.4f}")
|
||||
print(f"Classifier's accuracy (test) : {clf.score(Xtest, ytest):.4f}")
|
File diff suppressed because one or more lines are too long
@@ -4,7 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Test AdaBoost with different configurations"
|
||||
"# Test Stree with AdaBoost and Bagging with different configurations"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -17,38 +17,43 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#\n",
|
||||
"# Google Colab setup\n",
|
||||
"#\n",
|
||||
"#!pip install git+https://github.com/doctorado-ml/stree"
|
||||
"#!pip install git+https://github.com/doctorado-ml/stree\n",
|
||||
"!pip install pandas"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import time\n",
|
||||
"from sklearn.ensemble import AdaBoostClassifier\n",
|
||||
"from sklearn.tree import DecisionTreeClassifier\n",
|
||||
"from sklearn.svm import LinearSVC, SVC\n",
|
||||
"from sklearn.model_selection import GridSearchCV, train_test_split\n",
|
||||
"from sklearn.datasets import load_iris\n",
|
||||
"from stree import Stree"
|
||||
"import os\n",
|
||||
"import random\n",
|
||||
"import warnings\n",
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"from sklearn.ensemble import AdaBoostClassifier, BaggingClassifier\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"from sklearn.exceptions import ConvergenceWarning\n",
|
||||
"from stree import Stree\n",
|
||||
"\n",
|
||||
"warnings.filterwarnings(\"ignore\", category=ConvergenceWarning)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"if not os.path.isfile('data/creditcard.csv'):\n",
|
||||
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
||||
" !tar xzf creditcard.tgz"
|
||||
@@ -56,22 +61,15 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": "Fraud: 0.173% 492\nValid: 99.827% 284315\nX.shape (100492, 28) y.shape (100492,)\nFraud: 0.659% 662\nValid: 99.341% 99830\n"
|
||||
}
|
||||
],
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"random_state=1\n",
|
||||
"\n",
|
||||
"def load_creditcard(n_examples=0):\n",
|
||||
" import pandas as pd\n",
|
||||
" import numpy as np\n",
|
||||
" import random\n",
|
||||
" df = pd.read_csv('data/creditcard.csv')\n",
|
||||
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
||||
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
||||
@@ -117,23 +115,19 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## STree alone on the whole dataset and linear kernel"
|
||||
"## STree alone with 100.000 samples and linear kernel"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": "Score Train: 0.9985499829409757\nScore Test: 0.998407854584052\nTook 39.45 seconds\n"
|
||||
}
|
||||
],
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"now = time.time()\n",
|
||||
"clf = Stree(max_depth=3, random_state=random_state)\n",
|
||||
"clf = Stree(max_depth=3, random_state=random_state, max_iter=1e3)\n",
|
||||
"clf.fit(Xtrain, ytrain)\n",
|
||||
"print(\"Score Train: \", clf.score(Xtrain, ytrain))\n",
|
||||
"print(\"Score Test: \", clf.score(Xtest, ytest))\n",
|
||||
@@ -144,12 +138,12 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Different kernels with different configuations"
|
||||
"## Adaboost"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -160,19 +154,15 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": "Kernel: linear\tTime: 87.00 seconds\tScore Train: 0.9982372\tScore Test: 0.9981425\nKernel: rbf\tTime: 60.60 seconds\tScore Train: 0.9934181\tScore Test: 0.9933992\nKernel: poly\tTime: 88.08 seconds\tScore Train: 0.9937450\tScore Test: 0.9938968\n"
|
||||
}
|
||||
],
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for kernel in ['linear', 'rbf', 'poly']:\n",
|
||||
" now = time.time()\n",
|
||||
" clf = AdaBoostClassifier(Stree(C=7, kernel=kernel, max_depth=max_depth, random_state=random_state), n_estimators=n_estimators, random_state=random_state)\n",
|
||||
" clf = AdaBoostClassifier(base_estimator=Stree(C=C, kernel=kernel, max_depth=max_depth, random_state=random_state, max_iter=1e3), algorithm=\"SAMME\", n_estimators=n_estimators, random_state=random_state)\n",
|
||||
" clf.fit(Xtrain, ytrain)\n",
|
||||
" score_train = clf.score(Xtrain, ytrain)\n",
|
||||
" score_test = clf.score(Xtest, ytest)\n",
|
||||
@@ -183,24 +173,31 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Test algorithm SAMME in AdaBoost to check speed/accuracy"
|
||||
"## Bagging"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": "Kernel: linear\tTime: 58.75 seconds\tScore Train: 0.9980524\tScore Test: 0.9978771\nKernel: rbf\tTime: 12.49 seconds\tScore Train: 0.9934181\tScore Test: 0.9933992\nKernel: poly\tTime: 97.85 seconds\tScore Train: 0.9972137\tScore Test: 0.9971806\n"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"n_estimators = 10\n",
|
||||
"C = 7\n",
|
||||
"max_depth = 3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for kernel in ['linear', 'rbf', 'poly']:\n",
|
||||
" now = time.time()\n",
|
||||
" clf = AdaBoostClassifier(Stree(C=7, kernel=kernel, max_depth=max_depth, random_state=random_state), n_estimators=n_estimators, random_state=random_state, algorithm=\"SAMME\")\n",
|
||||
" clf = BaggingClassifier(base_estimator=Stree(C=C, kernel=kernel, max_depth=max_depth, random_state=random_state, max_iter=1e3), n_estimators=n_estimators, random_state=random_state)\n",
|
||||
" clf.fit(Xtrain, ytrain)\n",
|
||||
" score_train = clf.score(Xtrain, ytrain)\n",
|
||||
" score_test = clf.score(Xtest, ytest)\n",
|
||||
@@ -209,6 +206,11 @@
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
@@ -219,14 +221,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.6-final"
|
||||
},
|
||||
"orig_nbformat": 2,
|
||||
"kernelspec": {
|
||||
"name": "python37664bitgeneralvenvfbd0a23e74cf4e778460f5ffc6761f39",
|
||||
"display_name": "Python 3.7.6 64-bit ('general': venv)"
|
||||
"version": "3.8.2-final"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
@@ -4,7 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Test smple_weight, kernels, C, sklearn estimator"
|
||||
"# Test sample_weight, kernels, C, sklearn estimator"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -17,22 +17,27 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#\n",
|
||||
"# Google Colab setup\n",
|
||||
"#\n",
|
||||
"#!pip install git+https://github.com/doctorado-ml/stree"
|
||||
"#!pip install git+https://github.com/doctorado-ml/stree\n",
|
||||
"!pip install pandas"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import time\n",
|
||||
"import random\n",
|
||||
"import warnings\n",
|
||||
"import os\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
"from sklearn.svm import SVC\n",
|
||||
@@ -40,17 +45,20 @@
|
||||
"from sklearn.utils.estimator_checks import check_estimator\n",
|
||||
"from sklearn.datasets import make_classification, load_iris, load_wine\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"from sklearn.utils.class_weight import compute_sample_weight\n",
|
||||
"from sklearn.exceptions import ConvergenceWarning\n",
|
||||
"from stree import Stree\n",
|
||||
"import time"
|
||||
"warnings.filterwarnings(\"ignore\", category=ConvergenceWarning)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"if not os.path.isfile('data/creditcard.csv'):\n",
|
||||
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
||||
" !tar xzf creditcard.tgz"
|
||||
@@ -58,22 +66,15 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": "Fraud: 0.173% 492\nValid: 99.827% 284315\nX.shape (1492, 28) y.shape (1492,)\nFraud: 33.110% 494\nValid: 66.890% 998\n"
|
||||
}
|
||||
],
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"random_state=1\n",
|
||||
"\n",
|
||||
"def load_creditcard(n_examples=0):\n",
|
||||
" import pandas as pd\n",
|
||||
" import numpy as np\n",
|
||||
" import random\n",
|
||||
" df = pd.read_csv('data/creditcard.csv')\n",
|
||||
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
||||
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
||||
@@ -94,22 +95,20 @@
|
||||
" print(\"X.shape\", X.shape, \" y.shape\", y.shape)\n",
|
||||
" print(\"Fraud: {0:.3f}% {1}\".format(len(y[y == 1])*100/X.shape[0], len(y[y == 1])))\n",
|
||||
" print(\"Valid: {0:.3f}% {1}\".format(len(y[y == 0]) * 100 / X.shape[0], len(y[y == 0])))\n",
|
||||
" Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=0.7, shuffle=True, random_state=random_state, stratify=y)\n",
|
||||
" Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=0.7, shuffle=True, random_state=random_state)\n",
|
||||
" return Xtrain, Xtest, ytrain, ytest\n",
|
||||
"\n",
|
||||
"# data = load_creditcard(-5000) # Take all true samples + 5000 of the others\n",
|
||||
"data = load_creditcard(-5000) # Take all true samples with up to 5000 of the others\n",
|
||||
"# data = load_creditcard(5000) # Take the first 5000 samples\n",
|
||||
"data = load_creditcard(-1000) # Take all the samples\n",
|
||||
"# data = load_creditcard(-1000) # Take 1000 samples\n",
|
||||
"\n",
|
||||
"Xtrain = data[0]\n",
|
||||
"Xtest = data[1]\n",
|
||||
"ytrain = data[2]\n",
|
||||
"ytest = data[3]\n",
|
||||
"# Set weights inverse to its count class in dataset\n",
|
||||
"weights = np.ones(Xtrain.shape[0],) * 1.00244\n",
|
||||
"weights[ytrain==1] = 1.99755\n",
|
||||
"weights_test = np.ones(Xtest.shape[0],) * 1.00244\n",
|
||||
"weights_test[ytest==1] = 1.99755 "
|
||||
"weights = compute_sample_weight(\"balanced\", ytrain)\n",
|
||||
"weights_test = compute_sample_weight(\"balanced\", ytest)\n",
|
||||
"print(weights[:4], weights_test[:4])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -123,21 +122,17 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Test smple_weights\n",
|
||||
"## Test sample_weights\n",
|
||||
"Compute accuracy with weights in samples. The weights are set based on the inverse of the number of samples of each class"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": "Accuracy of Train without weights 0.9789272030651341\nAccuracy of Train with weights 0.9952107279693486\nAccuracy of Tests without weights 0.9598214285714286\nAccuracy of Tests with weights 0.9508928571428571\n"
|
||||
}
|
||||
],
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"C = 23\n",
|
||||
"print(\"Accuracy of Train without weights\", Stree(C=C, random_state=1).fit(Xtrain, ytrain).score(Xtrain, ytrain))\n",
|
||||
@@ -156,15 +151,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": "Time: 0.27s\tKernel: linear\tAccuracy_train: 0.9683908045977011\tAccuracy_test: 0.953125\nTime: 0.09s\tKernel: rbf\tAccuracy_train: 0.9875478927203065\tAccuracy_test: 0.9598214285714286\nTime: 0.06s\tKernel: poly\tAccuracy_train: 0.9885057471264368\tAccuracy_test: 0.9464285714285714\n"
|
||||
}
|
||||
],
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"random_state=1\n",
|
||||
"for kernel in ['linear', 'rbf', 'poly']:\n",
|
||||
@@ -185,19 +176,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": [
|
||||
"outputPrepend"
|
||||
]
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": "************** C=0.001 ****************************\nClassifier's accuracy (train): 0.9531\nClassifier's accuracy (test) : 0.9621\nroot\nroot - Down, <cgaf> - Leaf class=1 belief= 0.983713 counts=(array([0, 1]), array([ 5, 302]))\nroot - Up, <cgaf> - Leaf class=0 belief= 0.940299 counts=(array([0, 1]), array([693, 44]))\n\n**************************************************\n************** C=0.01 ****************************\nClassifier's accuracy (train): 0.9569\nClassifier's accuracy (test) : 0.9621\nroot\nroot - Down, <cgaf> - Leaf class=1 belief= 0.990228 counts=(array([0, 1]), array([ 3, 304]))\nroot - Up, <cgaf> - Leaf class=0 belief= 0.943012 counts=(array([0, 1]), array([695, 42]))\n\n**************************************************\n************** C=1 ****************************\nClassifier's accuracy (train): 0.9655\nClassifier's accuracy (test) : 0.9643\nroot\nroot - Down\nroot - Down - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([310]))\nroot - Down - Up, <pure> - Leaf class=0 belief= 1.000000 counts=(array([0]), array([5]))\nroot - Up, <cgaf> - Leaf class=0 belief= 0.950617 counts=(array([0, 1]), array([693, 36]))\n\n**************************************************\n************** C=5 ****************************\nClassifier's accuracy (train): 0.9684\nClassifier's accuracy (test) : 0.9598\nroot\nroot - Down\nroot - Down - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([311]))\nroot - Down - Up, <pure> - Leaf class=0 belief= 1.000000 counts=(array([0]), array([8]))\nroot - Up\nroot - Up - Down\nroot - Up - Down - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([1]))\nroot - Up - Down - Up, <pure> - Leaf class=0 belief= 1.000000 counts=(array([0]), array([2]))\nroot - Up - Up\nroot - Up - Up - Down, <pure> - Leaf class=0 belief= 1.000000 counts=(array([0]), array([2]))\nroot - Up - Up - Up\nroot - Up - Up - Up - Down\nroot - Up - Up - Up - Down - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([1]))\nroot - Up - Up - Up - Down - Up, <pure> - Leaf class=0 belief= 1.000000 counts=(array([0]), array([1]))\nroot - Up - Up - Up - Up, <cgaf> - Leaf class=0 belief= 0.954039 counts=(array([0, 1]), array([685, 33]))\n\n**************************************************\n************** C=17 ****************************\nClassifier's accuracy (train): 0.9751\nClassifier's accuracy (test) : 0.9464\nroot\nroot - Down\nroot - Down - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([304]))\nroot - Down - Up, <pure> - Leaf class=0 belief= 1.000000 counts=(array([0]), array([8]))\nroot - Up\nroot - Up - Down\nroot - Up - Down - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([4]))\nroot - Up - Down - Up, <pure> - Leaf class=0 belief= 1.000000 counts=(array([0]), array([3]))\nroot - Up - Up\nroot - Up - Up - Down\nroot - Up - Up - Down - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([4]))\nroot - Up - Up - Down - Up, <pure> - Leaf class=0 belief= 1.000000 counts=(array([0]), array([2]))\nroot - Up - Up - Up\nroot - Up - Up - Up - Down\nroot - Up - Up - Up - Down - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([3]))\nroot - Up - Up - Up - Down - Up, <pure> - Leaf class=0 belief= 1.000000 counts=(array([0]), array([1]))\nroot - Up - Up - Up - Up\nroot - Up - Up - Up - Up - Down\nroot - Up - Up - Up - Up - Down - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([3]))\nroot - Up - Up - Up - Up - Down - Up, <pure> - Leaf class=0 belief= 1.000000 counts=(array([0]), array([3]))\nroot - Up - Up - Up - Up - Up\nroot - Up - Up - Up - Up - Up - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([2]))\nroot - Up - Up - Up - Up - Up - Up, <cgaf> - Leaf class=0 belief= 0.963225 counts=(array([0, 1]), array([681, 26]))\n\n**************************************************\n0.6869 secs\n"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"t = time.time()\n",
|
||||
"for C in (.001, .01, 1, 5, 17):\n",
|
||||
@@ -216,20 +199,16 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Test iterator\n",
|
||||
"Check different weays of using the iterator"
|
||||
"Check different ways of using the iterator"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": "root\nroot - Down\nroot - Down - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([304]))\nroot - Down - Up, <pure> - Leaf class=0 belief= 1.000000 counts=(array([0]), array([8]))\nroot - Up\nroot - Up - Down\nroot - Up - Down - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([4]))\nroot - Up - Down - Up, <pure> - Leaf class=0 belief= 1.000000 counts=(array([0]), array([3]))\nroot - Up - Up\nroot - Up - Up - Down\nroot - Up - Up - Down - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([4]))\nroot - Up - Up - Down - Up, <pure> - Leaf class=0 belief= 1.000000 counts=(array([0]), array([2]))\nroot - Up - Up - Up\nroot - Up - Up - Up - Down\nroot - Up - Up - Up - Down - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([3]))\nroot - Up - Up - Up - Down - Up, <pure> - Leaf class=0 belief= 1.000000 counts=(array([0]), array([1]))\nroot - Up - Up - Up - Up\nroot - Up - Up - Up - Up - Down\nroot - Up - Up - Up - Up - Down - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([3]))\nroot - Up - Up - Up - Up - Down - Up, <pure> - Leaf class=0 belief= 1.000000 counts=(array([0]), array([3]))\nroot - Up - Up - Up - Up - Up\nroot - Up - Up - Up - Up - Up - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([2]))\nroot - Up - Up - Up - Up - Up - Up, <cgaf> - Leaf class=0 belief= 0.963225 counts=(array([0, 1]), array([681, 26]))\n"
|
||||
}
|
||||
],
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#check iterator\n",
|
||||
"for i in list(clf):\n",
|
||||
@@ -238,15 +217,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": "root\nroot - Down\nroot - Down - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([304]))\nroot - Down - Up, <pure> - Leaf class=0 belief= 1.000000 counts=(array([0]), array([8]))\nroot - Up\nroot - Up - Down\nroot - Up - Down - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([4]))\nroot - Up - Down - Up, <pure> - Leaf class=0 belief= 1.000000 counts=(array([0]), array([3]))\nroot - Up - Up\nroot - Up - Up - Down\nroot - Up - Up - Down - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([4]))\nroot - Up - Up - Down - Up, <pure> - Leaf class=0 belief= 1.000000 counts=(array([0]), array([2]))\nroot - Up - Up - Up\nroot - Up - Up - Up - Down\nroot - Up - Up - Up - Down - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([3]))\nroot - Up - Up - Up - Down - Up, <pure> - Leaf class=0 belief= 1.000000 counts=(array([0]), array([1]))\nroot - Up - Up - Up - Up\nroot - Up - Up - Up - Up - Down\nroot - Up - Up - Up - Up - Down - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([3]))\nroot - Up - Up - Up - Up - Down - Up, <pure> - Leaf class=0 belief= 1.000000 counts=(array([0]), array([3]))\nroot - Up - Up - Up - Up - Up\nroot - Up - Up - Up - Up - Up - Down, <pure> - Leaf class=1 belief= 1.000000 counts=(array([1]), array([2]))\nroot - Up - Up - Up - Up - Up - Up, <cgaf> - Leaf class=0 belief= 0.963225 counts=(array([0, 1]), array([681, 26]))\n"
|
||||
}
|
||||
],
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#check iterator again\n",
|
||||
"for i in clf:\n",
|
||||
@@ -262,15 +237,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": "1 functools.partial(<function check_no_attributes_set_in_init at 0x1254f13b0>, 'Stree')\n2 functools.partial(<function check_estimators_dtypes at 0x1254e84d0>, 'Stree')\n3 functools.partial(<function check_fit_score_takes_y at 0x1254e83b0>, 'Stree')\n4 functools.partial(<function check_sample_weights_pandas_series at 0x1254e0cb0>, 'Stree')\n5 functools.partial(<function check_sample_weights_not_an_array at 0x1254e0dd0>, 'Stree')\n6 functools.partial(<function check_sample_weights_list at 0x1254e0ef0>, 'Stree')\n7 functools.partial(<function check_sample_weights_shape at 0x1254e2050>, 'Stree')\n8 functools.partial(<function check_sample_weights_invariance at 0x1254e2170>, 'Stree')\n9 functools.partial(<function check_estimators_fit_returns_self at 0x1254eb4d0>, 'Stree')\n10 functools.partial(<function check_estimators_fit_returns_self at 0x1254eb4d0>, 'Stree', readonly_memmap=True)\n11 functools.partial(<function check_complex_data at 0x1254e2320>, 'Stree')\n12 functools.partial(<function check_dtype_object at 0x1254e2290>, 'Stree')\n13 functools.partial(<function check_estimators_empty_data_messages at 0x1254e85f0>, 'Stree')\n14 functools.partial(<function check_pipeline_consistency at 0x1254e8290>, 'Stree')\n15 functools.partial(<function check_estimators_nan_inf at 0x1254e8710>, 'Stree')\n16 functools.partial(<function check_estimators_overwrite_params at 0x1254f1290>, 'Stree')\n17 functools.partial(<function check_estimator_sparse_data at 0x1254e0b90>, 'Stree')\n18 functools.partial(<function check_estimators_pickle at 0x1254e8950>, 'Stree')\n19 functools.partial(<function check_classifier_data_not_an_array at 0x1254f15f0>, 'Stree')\n20 functools.partial(<function check_classifiers_one_label at 0x1254eb050>, 'Stree')\n21 functools.partial(<function check_classifiers_classes at 0x1254eba70>, 'Stree')\n22 functools.partial(<function check_estimators_partial_fit_n_features at 0x1254e8a70>, 'Stree')\n23 functools.partial(<function check_classifiers_train at 0x1254eb170>, 'Stree')\n24 functools.partial(<function check_classifiers_train at 0x1254eb170>, 'Stree', readonly_memmap=True)\n25 functools.partial(<function check_classifiers_train at 0x1254eb170>, 'Stree', readonly_memmap=True, X_dtype='float32')\n26 functools.partial(<function check_classifiers_regression_target at 0x1254f40e0>, 'Stree')\n27 functools.partial(<function check_supervised_y_no_nan at 0x1254da9e0>, 'Stree')\n28 functools.partial(<function check_supervised_y_2d at 0x1254eb710>, 'Stree')\n29 functools.partial(<function check_estimators_unfitted at 0x1254eb5f0>, 'Stree')\n30 functools.partial(<function check_non_transformer_estimators_n_iter at 0x1254f1c20>, 'Stree')\n31 functools.partial(<function check_decision_proba_consistency at 0x1254f4200>, 'Stree')\n32 functools.partial(<function check_fit2d_predict1d at 0x1254e2830>, 'Stree')\n33 functools.partial(<function check_methods_subset_invariance at 0x1254e29e0>, 'Stree')\n34 functools.partial(<function check_fit2d_1sample at 0x1254e2b00>, 'Stree')\n35 functools.partial(<function check_fit2d_1feature at 0x1254e2c20>, 'Stree')\n36 functools.partial(<function check_fit1d at 0x1254e2d40>, 'Stree')\n37 functools.partial(<function check_get_params_invariance at 0x1254f1e60>, 'Stree')\n38 functools.partial(<function check_set_params at 0x1254f1f80>, 'Stree')\n39 functools.partial(<function check_dict_unchanged at 0x1254e2440>, 'Stree')\n40 functools.partial(<function check_dont_overwrite_parameters at 0x1254e2710>, 'Stree')\n41 functools.partial(<function check_fit_idempotent at 0x1254f43b0>, 'Stree')\n42 functools.partial(<function check_n_features_in at 0x1254f4440>, 'Stree')\n43 functools.partial(<function check_requires_y_none at 0x1254f44d0>, 'Stree')\n"
|
||||
}
|
||||
],
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Make checks one by one\n",
|
||||
"c = 0\n",
|
||||
@@ -283,7 +254,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -300,15 +271,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": "== Not Weighted ===\nSVC train score ..: 0.9521072796934866\nSTree train score : 0.9578544061302682\nSVC test score ...: 0.9553571428571429\nSTree test score .: 0.9575892857142857\n==== Weighted =====\nSVC train score ..: 0.9616858237547893\nSTree train score : 0.9616858237547893\nSVC test score ...: 0.9642857142857143\nSTree test score .: 0.9598214285714286\n*SVC test score ..: 0.951413553411694\n*STree test score : 0.9480517444389333\n"
|
||||
}
|
||||
],
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"svc = SVC(C=7, kernel='rbf', gamma=.001, random_state=random_state)\n",
|
||||
"clf = Stree(C=17, kernel='rbf', gamma=.001, random_state=random_state)\n",
|
||||
@@ -332,25 +299,47 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": "root\nroot - Down\nroot - Down - Down, <cgaf> - Leaf class=1 belief= 0.969325 counts=(array([0, 1]), array([ 10, 316]))\nroot - Down - Up, <pure> - Leaf class=0 belief= 1.000000 counts=(array([0]), array([1]))\nroot - Up, <cgaf> - Leaf class=0 belief= 0.958159 counts=(array([0, 1]), array([687, 30]))\n\n"
|
||||
}
|
||||
],
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(clf)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Test max_features"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for max_features in [None, \"auto\", \"log2\", 7, .5, .1, .7]:\n",
|
||||
" now = time.time()\n",
|
||||
" print(\"*\"*40)\n",
|
||||
" clf = Stree(random_state=random_state, max_features=max_features)\n",
|
||||
" clf.fit(Xtrain, ytrain)\n",
|
||||
" print(f\"max_features {max_features} = {clf.max_features_}\")\n",
|
||||
" print(\"Train score :\", clf.score(Xtrain, ytrain))\n",
|
||||
" print(\"Test score .:\", clf.score(Xtest, ytest))\n",
|
||||
" print(f\"Took {time.time() - now:.2f} seconds\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.7.6 64-bit ('general': venv)",
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python37664bitgeneralvenvfbd0a23e74cf4e778460f5ffc6761f39"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -362,9 +351,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.6-final"
|
||||
"version": "3.8.2-final"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
@@ -1,244 +1,253 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Test Gridsearch\n",
|
||||
"with different kernels and different configurations"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Setup\n",
|
||||
"Uncomment the next cell if STree is not already installed"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#\n",
|
||||
"# Google Colab setup\n",
|
||||
"#\n",
|
||||
"#!pip install git+https://github.com/doctorado-ml/stree"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "zIHKVxthDZEa",
|
||||
"colab_type": "code",
|
||||
"colab": {}
|
||||
},
|
||||
"source": [
|
||||
"from sklearn.ensemble import AdaBoostClassifier\n",
|
||||
"from sklearn.svm import LinearSVC\n",
|
||||
"from sklearn.model_selection import GridSearchCV, train_test_split\n",
|
||||
"from stree import Stree"
|
||||
],
|
||||
"execution_count": 2,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "IEmq50QgDZEi",
|
||||
"colab_type": "code",
|
||||
"colab": {}
|
||||
},
|
||||
"source": [
|
||||
"import os\n",
|
||||
"if not os.path.isfile('data/creditcard.csv'):\n",
|
||||
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
||||
" !tar xzf creditcard.tgz"
|
||||
],
|
||||
"execution_count": 3,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "z9Q-YUfBDZEq",
|
||||
"colab_type": "code",
|
||||
"colab": {},
|
||||
"outputId": "afc822fb-f16a-4302-8a67-2b9e2880159b"
|
||||
},
|
||||
"source": [
|
||||
"random_state=1\n",
|
||||
"\n",
|
||||
"def load_creditcard(n_examples=0):\n",
|
||||
" import pandas as pd\n",
|
||||
" import numpy as np\n",
|
||||
" import random\n",
|
||||
" df = pd.read_csv('data/creditcard.csv')\n",
|
||||
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
||||
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
||||
" y = df.Class\n",
|
||||
" X = df.drop(['Class', 'Time', 'Amount'], axis=1).values\n",
|
||||
" if n_examples > 0:\n",
|
||||
" # Take first n_examples samples\n",
|
||||
" X = X[:n_examples, :]\n",
|
||||
" y = y[:n_examples, :]\n",
|
||||
" else:\n",
|
||||
" # Take all the positive samples with a number of random negatives\n",
|
||||
" if n_examples < 0:\n",
|
||||
" Xt = X[(y == 1).ravel()]\n",
|
||||
" yt = y[(y == 1).ravel()]\n",
|
||||
" indices = random.sample(range(X.shape[0]), -1 * n_examples)\n",
|
||||
" X = np.append(Xt, X[indices], axis=0)\n",
|
||||
" y = np.append(yt, y[indices], axis=0)\n",
|
||||
" print(\"X.shape\", X.shape, \" y.shape\", y.shape)\n",
|
||||
" print(\"Fraud: {0:.3f}% {1}\".format(len(y[y == 1])*100/X.shape[0], len(y[y == 1])))\n",
|
||||
" print(\"Valid: {0:.3f}% {1}\".format(len(y[y == 0]) * 100 / X.shape[0], len(y[y == 0])))\n",
|
||||
" Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=0.7, shuffle=True, random_state=random_state, stratify=y)\n",
|
||||
" return Xtrain, Xtest, ytrain, ytest\n",
|
||||
"\n",
|
||||
"data = load_creditcard(-1000) # Take all true samples + 1000 of the others\n",
|
||||
"# data = load_creditcard(5000) # Take the first 5000 samples\n",
|
||||
"# data = load_creditcard(0) # Take all the samples\n",
|
||||
"\n",
|
||||
"Xtrain = data[0]\n",
|
||||
"Xtest = data[1]\n",
|
||||
"ytrain = data[2]\n",
|
||||
"ytest = data[3]"
|
||||
],
|
||||
"execution_count": 4,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": "Fraud: 0.173% 492\nValid: 99.827% 284315\nX.shape (1492, 28) y.shape (1492,)\nFraud: 33.244% 496\nValid: 66.756% 996\n"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tests"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "HmX3kR4PDZEw",
|
||||
"colab_type": "code",
|
||||
"colab": {}
|
||||
},
|
||||
"source": [
|
||||
"parameters = {\n",
|
||||
" 'base_estimator': [Stree()],\n",
|
||||
" 'n_estimators': [10, 25],\n",
|
||||
" 'learning_rate': [.5, 1],\n",
|
||||
" 'base_estimator__tol': [.1, 1e-02],\n",
|
||||
" 'base_estimator__max_depth': [3, 5],\n",
|
||||
" 'base_estimator__C': [1, 3],\n",
|
||||
" 'base_estimator__kernel': ['linear', 'poly', 'rbf']\n",
|
||||
"}"
|
||||
],
|
||||
"execution_count": 9,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "execute_result",
|
||||
"data": {
|
||||
"text/plain": "{'C': 1.0,\n 'degree': 3,\n 'gamma': 'scale',\n 'kernel': 'linear',\n 'max_depth': None,\n 'max_iter': 1000,\n 'min_samples_split': 0,\n 'random_state': None,\n 'tol': 0.0001}"
|
||||
},
|
||||
"metadata": {},
|
||||
"execution_count": 14
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"Stree().get_params()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "CrcB8o6EDZE5",
|
||||
"colab_type": "code",
|
||||
"colab": {},
|
||||
"outputId": "7703413a-d563-4289-a13b-532f38f82762"
|
||||
},
|
||||
"source": [
|
||||
"random_state=2020\n",
|
||||
"clf = AdaBoostClassifier(random_state=random_state)\n",
|
||||
"grid = GridSearchCV(clf, parameters, verbose=10, n_jobs=-1, return_train_score=True)\n",
|
||||
"grid.fit(Xtrain, ytrain)"
|
||||
],
|
||||
"execution_count": 11,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": "Fitting 5 folds for each of 96 candidates, totalling 480 fits\n[Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers.\n[Parallel(n_jobs=-1)]: Done 2 tasks | elapsed: 3.6s\n[Parallel(n_jobs=-1)]: Done 9 tasks | elapsed: 4.2s\n[Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 4.8s\n[Parallel(n_jobs=-1)]: Done 25 tasks | elapsed: 5.3s\n[Parallel(n_jobs=-1)]: Done 34 tasks | elapsed: 6.2s\n[Parallel(n_jobs=-1)]: Done 45 tasks | elapsed: 7.2s\n[Parallel(n_jobs=-1)]: Done 56 tasks | elapsed: 8.9s\n[Parallel(n_jobs=-1)]: Done 69 tasks | elapsed: 10.7s\n[Parallel(n_jobs=-1)]: Done 82 tasks | elapsed: 12.7s\n[Parallel(n_jobs=-1)]: Done 97 tasks | elapsed: 16.7s\n[Parallel(n_jobs=-1)]: Done 112 tasks | elapsed: 19.4s\n[Parallel(n_jobs=-1)]: Done 129 tasks | elapsed: 24.4s\n[Parallel(n_jobs=-1)]: Done 146 tasks | elapsed: 29.3s\n[Parallel(n_jobs=-1)]: Done 165 tasks | elapsed: 32.7s\n[Parallel(n_jobs=-1)]: Done 184 tasks | elapsed: 36.4s\n[Parallel(n_jobs=-1)]: Done 205 tasks | elapsed: 39.7s\n[Parallel(n_jobs=-1)]: Done 226 tasks | elapsed: 43.7s\n[Parallel(n_jobs=-1)]: Done 249 tasks | elapsed: 46.6s\n[Parallel(n_jobs=-1)]: Done 272 tasks | elapsed: 48.8s\n[Parallel(n_jobs=-1)]: Done 297 tasks | elapsed: 52.0s\n[Parallel(n_jobs=-1)]: Done 322 tasks | elapsed: 55.9s\n[Parallel(n_jobs=-1)]: Done 349 tasks | elapsed: 1.0min\n[Parallel(n_jobs=-1)]: Done 376 tasks | elapsed: 1.2min\n[Parallel(n_jobs=-1)]: Done 405 tasks | elapsed: 1.3min\n[Parallel(n_jobs=-1)]: Done 434 tasks | elapsed: 1.3min\n[Parallel(n_jobs=-1)]: Done 465 tasks | elapsed: 1.4min\n[Parallel(n_jobs=-1)]: Done 480 out of 480 | elapsed: 1.5min finished\n"
|
||||
},
|
||||
{
|
||||
"output_type": "execute_result",
|
||||
"data": {
|
||||
"text/plain": "GridSearchCV(estimator=AdaBoostClassifier(random_state=2020), n_jobs=-1,\n param_grid={'base_estimator': [Stree(C=1, max_depth=3, tol=0.1)],\n 'base_estimator__C': [1, 3],\n 'base_estimator__kernel': ['linear', 'poly', 'rbf'],\n 'base_estimator__max_depth': [3, 5],\n 'base_estimator__tol': [0.1, 0.01],\n 'learning_rate': [0.5, 1], 'n_estimators': [10, 25]},\n return_train_score=True, verbose=10)"
|
||||
},
|
||||
"metadata": {},
|
||||
"execution_count": 11
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "ZjX88NoYDZE8",
|
||||
"colab_type": "code",
|
||||
"colab": {},
|
||||
"outputId": "285163c8-fa33-4915-8ae7-61c4f7844344"
|
||||
},
|
||||
"source": [
|
||||
"print(\"Best estimator: \", grid.best_estimator_)\n",
|
||||
"print(\"Best hyperparameters: \", grid.best_params_)\n",
|
||||
"print(\"Best accuracy: \", grid.best_score_)"
|
||||
],
|
||||
"execution_count": 16,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": "Best estimator: AdaBoostClassifier(base_estimator=Stree(C=1, max_depth=3, tol=0.1),\n learning_rate=0.5, n_estimators=10, random_state=2020)\nBest hyperparameters: {'base_estimator': Stree(C=1, max_depth=3, tol=0.1), 'base_estimator__C': 1, 'base_estimator__kernel': 'linear', 'base_estimator__max_depth': 3, 'base_estimator__tol': 0.1, 'learning_rate': 0.5, 'n_estimators': 10}\nBest accuracy: 0.9492316893632683\n"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.6-final"
|
||||
},
|
||||
"orig_nbformat": 2,
|
||||
"kernelspec": {
|
||||
"name": "python37664bitgeneralvenvfbd0a23e74cf4e778460f5ffc6761f39",
|
||||
"display_name": "Python 3.7.6 64-bit ('general': venv)"
|
||||
},
|
||||
"colab": {
|
||||
"name": "gridsearch.ipynb",
|
||||
"provenance": []
|
||||
}
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Test Gridsearch\n",
|
||||
"with different kernels and different configurations"
|
||||
]
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Setup\n",
|
||||
"Uncomment the next cell if STree is not already installed"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#\n",
|
||||
"# Google Colab setup\n",
|
||||
"#\n",
|
||||
"#!pip install git+https://github.com/doctorado-ml/stree\n",
|
||||
"!pip install pandas"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {},
|
||||
"colab_type": "code",
|
||||
"id": "zIHKVxthDZEa"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import random\n",
|
||||
"import os\n",
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"from sklearn.ensemble import AdaBoostClassifier\n",
|
||||
"from sklearn.svm import LinearSVC\n",
|
||||
"from sklearn.model_selection import GridSearchCV, train_test_split\n",
|
||||
"from stree import Stree"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {},
|
||||
"colab_type": "code",
|
||||
"id": "IEmq50QgDZEi"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if not os.path.isfile('data/creditcard.csv'):\n",
|
||||
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
||||
" !tar xzf creditcard.tgz"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {},
|
||||
"colab_type": "code",
|
||||
"id": "z9Q-YUfBDZEq",
|
||||
"outputId": "afc822fb-f16a-4302-8a67-2b9e2880159b",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"random_state=1\n",
|
||||
"\n",
|
||||
"def load_creditcard(n_examples=0):\n",
|
||||
" df = pd.read_csv('data/creditcard.csv')\n",
|
||||
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
||||
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
||||
" y = df.Class\n",
|
||||
" X = df.drop(['Class', 'Time', 'Amount'], axis=1).values\n",
|
||||
" if n_examples > 0:\n",
|
||||
" # Take first n_examples samples\n",
|
||||
" X = X[:n_examples, :]\n",
|
||||
" y = y[:n_examples, :]\n",
|
||||
" else:\n",
|
||||
" # Take all the positive samples with a number of random negatives\n",
|
||||
" if n_examples < 0:\n",
|
||||
" Xt = X[(y == 1).ravel()]\n",
|
||||
" yt = y[(y == 1).ravel()]\n",
|
||||
" indices = random.sample(range(X.shape[0]), -1 * n_examples)\n",
|
||||
" X = np.append(Xt, X[indices], axis=0)\n",
|
||||
" y = np.append(yt, y[indices], axis=0)\n",
|
||||
" print(\"X.shape\", X.shape, \" y.shape\", y.shape)\n",
|
||||
" print(\"Fraud: {0:.3f}% {1}\".format(len(y[y == 1])*100/X.shape[0], len(y[y == 1])))\n",
|
||||
" print(\"Valid: {0:.3f}% {1}\".format(len(y[y == 0]) * 100 / X.shape[0], len(y[y == 0])))\n",
|
||||
" Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=0.7, shuffle=True, random_state=random_state, stratify=y)\n",
|
||||
" return Xtrain, Xtest, ytrain, ytest\n",
|
||||
"\n",
|
||||
"data = load_creditcard(-1000) # Take all true samples + 1000 of the others\n",
|
||||
"# data = load_creditcard(5000) # Take the first 5000 samples\n",
|
||||
"# data = load_creditcard(0) # Take all the samples\n",
|
||||
"\n",
|
||||
"Xtrain = data[0]\n",
|
||||
"Xtest = data[1]\n",
|
||||
"ytrain = data[2]\n",
|
||||
"ytest = data[3]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tests"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {},
|
||||
"colab_type": "code",
|
||||
"id": "HmX3kR4PDZEw"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"parameters = [{\n",
|
||||
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
||||
" 'n_estimators': [10, 25],\n",
|
||||
" 'learning_rate': [.5, 1],\n",
|
||||
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||
" 'base_estimator__tol': [.1, 1e-02],\n",
|
||||
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
||||
" 'base_estimator__C': [1, 7, 55],\n",
|
||||
" 'base_estimator__kernel': ['linear']\n",
|
||||
"},\n",
|
||||
"{\n",
|
||||
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
||||
" 'n_estimators': [10, 25],\n",
|
||||
" 'learning_rate': [.5, 1],\n",
|
||||
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||
" 'base_estimator__tol': [.1, 1e-02],\n",
|
||||
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
||||
" 'base_estimator__C': [1, 7, 55],\n",
|
||||
" 'base_estimator__degree': [3, 5, 7],\n",
|
||||
" 'base_estimator__kernel': ['poly']\n",
|
||||
"},\n",
|
||||
"{\n",
|
||||
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
||||
" 'n_estimators': [10, 25],\n",
|
||||
" 'learning_rate': [.5, 1],\n",
|
||||
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||
" 'base_estimator__tol': [.1, 1e-02],\n",
|
||||
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
||||
" 'base_estimator__C': [1, 7, 55],\n",
|
||||
" 'base_estimator__gamma': [.1, 1, 10],\n",
|
||||
" 'base_estimator__kernel': ['rbf']\n",
|
||||
"}]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"Stree().get_params()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {},
|
||||
"colab_type": "code",
|
||||
"id": "CrcB8o6EDZE5",
|
||||
"outputId": "7703413a-d563-4289-a13b-532f38f82762",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"clf = AdaBoostClassifier(random_state=random_state, algorithm=\"SAMME\")\n",
|
||||
"grid = GridSearchCV(clf, parameters, verbose=5, n_jobs=-1, return_train_score=True)\n",
|
||||
"grid.fit(Xtrain, ytrain)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {},
|
||||
"colab_type": "code",
|
||||
"id": "ZjX88NoYDZE8",
|
||||
"outputId": "285163c8-fa33-4915-8ae7-61c4f7844344",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"Best estimator: \", grid.best_estimator_)\n",
|
||||
"print(\"Best hyperparameters: \", grid.best_params_)\n",
|
||||
"print(\"Best accuracy: \", grid.best_score_)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Best estimator: AdaBoostClassifier(algorithm='SAMME',\n",
|
||||
" base_estimator=Stree(C=55, max_depth=7, random_state=1,\n",
|
||||
" split_criteria='max_samples', tol=0.1),\n",
|
||||
" learning_rate=0.5, n_estimators=25, random_state=1)\n",
|
||||
"Best hyperparameters: {'base_estimator': Stree(C=55, max_depth=7, random_state=1, split_criteria='max_samples', tol=0.1), 'base_estimator__C': 55, 'base_estimator__kernel': 'linear', 'base_estimator__max_depth': 7, 'base_estimator__split_criteria': 'max_samples', 'base_estimator__tol': 0.1, 'learning_rate': 0.5, 'n_estimators': 25}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Best accuracy: 0.9511777695988222"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"name": "gridsearch.ipynb",
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.2-final"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
File diff suppressed because one or more lines are too long
@@ -1,5 +1 @@
|
||||
numpy
|
||||
scikit-learn
|
||||
pandas
|
||||
matplotlib
|
||||
ipympl
|
||||
scikit-learn>0.24
|
1
runtime.txt
Normal file
1
runtime.txt
Normal file
@@ -0,0 +1 @@
|
||||
python-3.8
|
38
setup.py
38
setup.py
@@ -1,36 +1,50 @@
|
||||
import setuptools
|
||||
|
||||
__version__ = "0.9rc4"
|
||||
__author__ = "Ricardo Montañana Gómez"
|
||||
|
||||
|
||||
def readme():
|
||||
with open("README.md") as f:
|
||||
return f.read()
|
||||
|
||||
|
||||
def get_data(field):
|
||||
item = ""
|
||||
with open("stree/__init__.py") as f:
|
||||
for line in f.readlines():
|
||||
if line.startswith(f"__{field}__"):
|
||||
delim = '"' if '"' in line else "'"
|
||||
item = line.split(delim)[1]
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(f"Unable to find {field} string.")
|
||||
return item
|
||||
|
||||
|
||||
setuptools.setup(
|
||||
name="STree",
|
||||
version=__version__,
|
||||
license="MIT License",
|
||||
version=get_data("version"),
|
||||
license=get_data("license"),
|
||||
description="Oblique decision tree with svm nodes",
|
||||
long_description=readme(),
|
||||
long_description_content_type="text/markdown",
|
||||
packages=setuptools.find_packages(),
|
||||
url="https://github.com/doctorado-ml/stree",
|
||||
author=__author__,
|
||||
author_email="ricardo.montanana@alu.uclm.es",
|
||||
url="https://github.com/Doctorado-ML/STree#stree",
|
||||
project_urls={
|
||||
"Code": "https://github.com/Doctorado-ML/STree",
|
||||
"Documentation": "https://stree.readthedocs.io/en/latest/index.html",
|
||||
},
|
||||
author=get_data("author"),
|
||||
author_email=get_data("author_email"),
|
||||
keywords="scikit-learn oblique-classifier oblique-decision-tree decision-\
|
||||
tree svm svc",
|
||||
classifiers=[
|
||||
"Development Status :: 4 - Beta",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Programming Language :: Python :: 3.7",
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"License :: OSI Approved :: " + get_data("license"),
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Natural Language :: English",
|
||||
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
||||
"Intended Audience :: Science/Research",
|
||||
],
|
||||
install_requires=["scikit-learn>=0.23.0", "numpy", "matplotlib", "ipympl"],
|
||||
install_requires=["scikit-learn", "numpy"],
|
||||
test_suite="stree.tests",
|
||||
zip_safe=False,
|
||||
)
|
||||
|
10
stree/.readthedocs.yaml
Normal file
10
stree/.readthedocs.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
version: 2
|
||||
|
||||
sphinx:
|
||||
configuration: docs/source/conf.py
|
||||
|
||||
python:
|
||||
version: 3.8
|
||||
install:
|
||||
- requirements: requirements.txt
|
||||
- requirements: docs/requirements.txt
|
860
stree/Strees.py
860
stree/Strees.py
File diff suppressed because it is too large
Load Diff
@@ -1,205 +0,0 @@
|
||||
"""
|
||||
__author__ = "Ricardo Montañana Gómez"
|
||||
__copyright__ = "Copyright 2020, Ricardo Montañana Gómez"
|
||||
__license__ = "MIT"
|
||||
__version__ = "0.9"
|
||||
Plot 3D views of nodes in Stree
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from sklearn.decomposition import PCA
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
|
||||
from .Strees import Stree, Snode, Siterator
|
||||
|
||||
|
||||
class Snode_graph(Snode):
|
||||
def __init__(self, node: Stree):
|
||||
self._plot_size = (8, 8)
|
||||
self._xlimits = (None, None)
|
||||
self._ylimits = (None, None)
|
||||
self._zlimits = (None, None)
|
||||
n = Snode.copy(node)
|
||||
super().__init__(n._clf, n._X, n._y, n._title)
|
||||
|
||||
def set_plot_size(self, size: tuple):
|
||||
self._plot_size = size
|
||||
|
||||
def get_plot_size(self) -> tuple:
|
||||
return self._plot_size
|
||||
|
||||
def _is_pure(self) -> bool:
|
||||
"""is considered pure a leaf node with one label
|
||||
"""
|
||||
if self.is_leaf():
|
||||
return self._belief == 1.0
|
||||
return False
|
||||
|
||||
def set_axis_limits(self, limits: tuple):
|
||||
self._xlimits, self._ylimits, self._zlimits = limits
|
||||
|
||||
def get_axis_limits(self) -> tuple:
|
||||
return self._xlimits, self._ylimits, self._zlimits
|
||||
|
||||
def _set_graphics_axis(self, ax: Axes3D):
|
||||
ax.set_xlim(self._xlimits)
|
||||
ax.set_ylim(self._ylimits)
|
||||
ax.set_zlim(self._zlimits)
|
||||
|
||||
def save_hyperplane(
|
||||
self, save_folder: str = "./", save_prefix: str = "", save_seq: int = 1
|
||||
):
|
||||
_, fig = self.plot_hyperplane()
|
||||
name = os.path.join(save_folder, f"{save_prefix}STnode{save_seq}.png")
|
||||
fig.savefig(name, bbox_inches="tight")
|
||||
plt.close(fig)
|
||||
|
||||
def _get_cmap(self):
|
||||
cmap = "jet"
|
||||
if self._is_pure() and self._class == 1:
|
||||
cmap = "jet_r"
|
||||
return cmap
|
||||
|
||||
def _graph_title(self):
|
||||
n_class, card = np.unique(self._y, return_counts=True)
|
||||
return f"{self._title} {n_class} {card}"
|
||||
|
||||
def plot_hyperplane(self, plot_distribution: bool = True):
|
||||
fig = plt.figure(figsize=self._plot_size)
|
||||
ax = fig.add_subplot(1, 1, 1, projection="3d")
|
||||
if not self._is_pure():
|
||||
# Can't plot hyperplane of leaves with one label because it hasn't
|
||||
# classiffier
|
||||
# get the splitting hyperplane
|
||||
def hyperplane(x, y):
|
||||
return (
|
||||
-self._clf.intercept_
|
||||
- self._clf.coef_[0][0] * x
|
||||
- self._clf.coef_[0][1] * y
|
||||
) / self._clf.coef_[0][2]
|
||||
|
||||
tmpx = np.linspace(self._X[:, 0].min(), self._X[:, 0].max())
|
||||
tmpy = np.linspace(self._X[:, 1].min(), self._X[:, 1].max())
|
||||
xx, yy = np.meshgrid(tmpx, tmpy)
|
||||
ax.plot_surface(
|
||||
xx,
|
||||
yy,
|
||||
hyperplane(xx, yy),
|
||||
alpha=0.5,
|
||||
antialiased=True,
|
||||
rstride=1,
|
||||
cstride=1,
|
||||
cmap="seismic",
|
||||
)
|
||||
self._set_graphics_axis(ax)
|
||||
if plot_distribution:
|
||||
self.plot_distribution(ax)
|
||||
else:
|
||||
plt.title(self._graph_title())
|
||||
plt.show()
|
||||
return ax, fig
|
||||
|
||||
def plot_distribution(self, ax: Axes3D = None):
|
||||
if ax is None:
|
||||
fig = plt.figure(figsize=self._plot_size)
|
||||
ax = fig.add_subplot(1, 1, 1, projection="3d")
|
||||
plt.title(self._graph_title())
|
||||
cmap = self._get_cmap()
|
||||
ax.scatter(
|
||||
self._X[:, 0], self._X[:, 1], self._X[:, 2], c=self._y, cmap=cmap
|
||||
)
|
||||
ax.set_xlabel("X0")
|
||||
ax.set_ylabel("X1")
|
||||
ax.set_zlabel("X2")
|
||||
plt.show()
|
||||
|
||||
|
||||
class Stree_grapher(Stree):
|
||||
"""Build 3d graphs of any dataset, if it's more than 3 features PCA shall
|
||||
make its magic
|
||||
"""
|
||||
|
||||
def __init__(self, params: dict):
|
||||
self._plot_size = (8, 8)
|
||||
self._tree_gr = None
|
||||
# make Snode store X's
|
||||
os.environ["TESTING"] = "1"
|
||||
self._fitted = False
|
||||
self._pca = None
|
||||
super().__init__(**params)
|
||||
|
||||
def __del__(self):
|
||||
try:
|
||||
os.environ.pop("TESTING")
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def _copy_tree(self, node: Snode) -> Snode_graph:
|
||||
mirror = Snode_graph(node)
|
||||
# clone node
|
||||
mirror._class = node._class
|
||||
mirror._belief = node._belief
|
||||
if node.get_down() is not None:
|
||||
mirror.set_down(self._copy_tree(node.get_down()))
|
||||
if node.get_up() is not None:
|
||||
mirror.set_up(self._copy_tree(node.get_up()))
|
||||
return mirror
|
||||
|
||||
def fit(
|
||||
self, X: np.array, y: np.array, sample_weight: np.array = None
|
||||
) -> "Stree_grapher":
|
||||
"""Fit the Stree and copy the tree in a Snode_graph tree
|
||||
|
||||
:param X: Dataset
|
||||
:type X: np.array
|
||||
:param y: Labels
|
||||
:type y: np.array
|
||||
:return: Stree model
|
||||
:rtype: Stree
|
||||
"""
|
||||
if X.shape[1] != 3:
|
||||
self._pca = PCA(n_components=3)
|
||||
X = self._pca.fit_transform(X)
|
||||
super().fit(X, y, sample_weight=sample_weight)
|
||||
self._tree_gr = self._copy_tree(self.tree_)
|
||||
self._fitted = True
|
||||
return self
|
||||
|
||||
def score(self, X: np.array, y: np.array) -> float:
|
||||
self._check_fitted()
|
||||
if X.shape[1] != 3:
|
||||
X = self._pca.transform(X)
|
||||
return super().score(X, y)
|
||||
|
||||
def _check_fitted(self):
|
||||
if not self._fitted:
|
||||
raise Exception("Have to fit the grapher first!")
|
||||
|
||||
def save_all(self, save_folder: str = "./", save_prefix: str = ""):
|
||||
"""Save all the node plots in png format, each with a sequence number
|
||||
|
||||
:param save_folder: folder where the plots are saved, defaults to './'
|
||||
:type save_folder: str, optional
|
||||
"""
|
||||
self._check_fitted()
|
||||
if not os.path.isdir(save_folder):
|
||||
os.mkdir(save_folder)
|
||||
seq = 1
|
||||
for node in self:
|
||||
node.save_hyperplane(
|
||||
save_folder=save_folder, save_prefix=save_prefix, save_seq=seq
|
||||
)
|
||||
seq += 1
|
||||
|
||||
def plot_all(self):
|
||||
"""Plots all the nodes
|
||||
"""
|
||||
self._check_fitted()
|
||||
for node in self:
|
||||
node.plot_hyperplane()
|
||||
|
||||
def __iter__(self):
|
||||
return Siterator(self._tree_gr)
|
@@ -1,4 +1,10 @@
|
||||
from .Strees import Stree, Snode, Siterator
|
||||
from .Strees_grapher import Stree_grapher, Snode_graph
|
||||
from .Strees import Stree, Snode, Siterator, Splitter
|
||||
|
||||
__all__ = ["Stree", "Snode", "Siterator", "Stree_grapher", "Snode_graph"]
|
||||
__version__ = "1.1"
|
||||
|
||||
__author__ = "Ricardo Montañana Gómez"
|
||||
__copyright__ = "Copyright 2020-2021, Ricardo Montañana Gómez"
|
||||
__license__ = "MIT License"
|
||||
__author_email__ = "ricardo.montanana@alu.uclm.es"
|
||||
|
||||
__all__ = ["Stree", "Snode", "Siterator", "Splitter"]
|
||||
|
125
stree/tests/Snode_test.py
Normal file
125
stree/tests/Snode_test.py
Normal file
@@ -0,0 +1,125 @@
|
||||
import os
|
||||
import unittest
|
||||
import numpy as np
|
||||
from stree import Stree, Snode
|
||||
from .utils import load_dataset
|
||||
|
||||
|
||||
class Snode_test(unittest.TestCase):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._random_state = 1
|
||||
self._clf = Stree(
|
||||
random_state=self._random_state,
|
||||
kernel="liblinear",
|
||||
multiclass_strategy="ovr",
|
||||
)
|
||||
self._clf.fit(*load_dataset(self._random_state))
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def setUp(cls):
|
||||
os.environ["TESTING"] = "1"
|
||||
|
||||
def test_attributes_in_leaves(self):
|
||||
"""Check if the attributes in leaves have correct values so they form a
|
||||
predictor
|
||||
"""
|
||||
|
||||
def check_leave(node: Snode):
|
||||
if not node.is_leaf():
|
||||
check_leave(node.get_down())
|
||||
check_leave(node.get_up())
|
||||
return
|
||||
# Check Belief in leave
|
||||
classes, card = np.unique(node._y, return_counts=True)
|
||||
max_card = max(card)
|
||||
min_card = min(card)
|
||||
if len(classes) > 1:
|
||||
belief = max_card / (max_card + min_card)
|
||||
else:
|
||||
belief = 1
|
||||
self.assertEqual(belief, node._belief)
|
||||
# Check Class
|
||||
class_computed = classes[card == max_card]
|
||||
self.assertEqual(class_computed, node._class)
|
||||
# Check Partition column
|
||||
self.assertEqual(node._partition_column, -1)
|
||||
|
||||
check_leave(self._clf.tree_)
|
||||
|
||||
def test_nodes_coefs(self):
|
||||
"""Check if the nodes of the tree have the right attributes filled"""
|
||||
|
||||
def run_tree(node: Snode):
|
||||
if node._belief < 1:
|
||||
# only exclude pure leaves
|
||||
self.assertIsNotNone(node._clf)
|
||||
self.assertIsNotNone(node._clf.coef_)
|
||||
if node.is_leaf():
|
||||
return
|
||||
run_tree(node.get_up())
|
||||
run_tree(node.get_down())
|
||||
|
||||
model = Stree(self._random_state)
|
||||
model.fit(*load_dataset(self._random_state, 3, 4))
|
||||
run_tree(model.tree_)
|
||||
|
||||
def test_make_predictor_on_leaf(self):
|
||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||
test.make_predictor()
|
||||
self.assertEqual(1, test._class)
|
||||
self.assertEqual(0.75, test._belief)
|
||||
self.assertEqual(-1, test._partition_column)
|
||||
|
||||
def test_set_title(self):
|
||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||
self.assertEqual("test", test.get_title())
|
||||
test.set_title("another")
|
||||
self.assertEqual("another", test.get_title())
|
||||
|
||||
def test_set_classifier(self):
|
||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||
clf = Stree()
|
||||
self.assertIsNone(test.get_classifier())
|
||||
test.set_classifier(clf)
|
||||
self.assertEqual(clf, test.get_classifier())
|
||||
|
||||
def test_set_impurity(self):
|
||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||
self.assertEqual(0.0, test.get_impurity())
|
||||
test.set_impurity(54.7)
|
||||
self.assertEqual(54.7, test.get_impurity())
|
||||
|
||||
def test_set_features(self):
|
||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [0, 1], 0.0, "test")
|
||||
self.assertListEqual([0, 1], test.get_features())
|
||||
test.set_features([1, 2])
|
||||
self.assertListEqual([1, 2], test.get_features())
|
||||
|
||||
def test_make_predictor_on_not_leaf(self):
|
||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||
test.set_up(Snode(None, [1], [1], [], 0.0, "another_test"))
|
||||
test.make_predictor()
|
||||
self.assertIsNone(test._class)
|
||||
self.assertEqual(0, test._belief)
|
||||
self.assertEqual(-1, test._partition_column)
|
||||
self.assertEqual(-1, test.get_up()._partition_column)
|
||||
|
||||
def test_make_predictor_on_leaf_bogus_data(self):
|
||||
test = Snode(None, [1, 2, 3, 4], [], [], 0.0, "test")
|
||||
test.make_predictor()
|
||||
self.assertIsNone(test._class)
|
||||
self.assertEqual(-1, test._partition_column)
|
||||
|
||||
def test_copy_node(self):
|
||||
px = [1, 2, 3, 4]
|
||||
py = [1]
|
||||
test = Snode(Stree(), px, py, [], 0.0, "test")
|
||||
computed = Snode.copy(test)
|
||||
self.assertListEqual(computed._X, px)
|
||||
self.assertListEqual(computed._y, py)
|
||||
self.assertEqual("test", computed._title)
|
||||
self.assertIsInstance(computed._clf, Stree)
|
||||
self.assertEqual(test._partition_column, computed._partition_column)
|
||||
self.assertEqual(test._sample_weight, computed._sample_weight)
|
||||
self.assertEqual(test._scaler, computed._scaler)
|
246
stree/tests/Splitter_test.py
Normal file
246
stree/tests/Splitter_test.py
Normal file
@@ -0,0 +1,246 @@
|
||||
import os
|
||||
import unittest
|
||||
import random
|
||||
|
||||
import numpy as np
|
||||
from sklearn.svm import SVC
|
||||
from sklearn.datasets import load_wine, load_iris
|
||||
from stree import Splitter
|
||||
from .utils import load_dataset
|
||||
|
||||
|
||||
class Splitter_test(unittest.TestCase):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._random_state = 1
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def build(
|
||||
clf=SVC,
|
||||
min_samples_split=0,
|
||||
feature_select="random",
|
||||
criterion="gini",
|
||||
criteria="max_samples",
|
||||
random_state=None,
|
||||
):
|
||||
return Splitter(
|
||||
clf=clf(random_state=random_state, kernel="rbf"),
|
||||
min_samples_split=min_samples_split,
|
||||
feature_select=feature_select,
|
||||
criterion=criterion,
|
||||
criteria=criteria,
|
||||
random_state=random_state,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def setUp(cls):
|
||||
os.environ["TESTING"] = "1"
|
||||
|
||||
def test_init(self):
|
||||
with self.assertRaises(ValueError):
|
||||
self.build(criterion="duck")
|
||||
with self.assertRaises(ValueError):
|
||||
self.build(feature_select="duck")
|
||||
with self.assertRaises(ValueError):
|
||||
self.build(criteria="duck")
|
||||
with self.assertRaises(ValueError):
|
||||
_ = Splitter(clf=None)
|
||||
for feature_select in ["best", "random"]:
|
||||
for criterion in ["gini", "entropy"]:
|
||||
for criteria in ["max_samples", "impurity"]:
|
||||
tcl = self.build(
|
||||
feature_select=feature_select,
|
||||
criterion=criterion,
|
||||
criteria=criteria,
|
||||
)
|
||||
self.assertEqual(feature_select, tcl._feature_select)
|
||||
self.assertEqual(criterion, tcl._criterion)
|
||||
self.assertEqual(criteria, tcl._criteria)
|
||||
|
||||
def test_gini(self):
|
||||
expected_values = [
|
||||
([0, 1, 1, 1, 1, 1, 0, 0, 0, 1], 0.48),
|
||||
([0, 1, 1, 2, 2, 3, 4, 5, 3, 2, 1, 1], 0.7777777777777778),
|
||||
([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2], 0.520408163265306),
|
||||
([0, 0, 1, 1, 1, 1, 0, 0], 0.5),
|
||||
([0, 0, 1, 1, 2, 2, 3, 3], 0.75),
|
||||
([0, 0, 1, 1, 1, 1, 1, 1], 0.375),
|
||||
([0], 0),
|
||||
([1, 1, 1, 1], 0),
|
||||
]
|
||||
for labels, expected in expected_values:
|
||||
self.assertAlmostEqual(expected, Splitter._gini(labels))
|
||||
tcl = self.build(criterion="gini")
|
||||
self.assertAlmostEqual(expected, tcl.criterion_function(labels))
|
||||
|
||||
def test_entropy(self):
|
||||
expected_values = [
|
||||
([0, 1, 1, 1, 1, 1, 0, 0, 0, 1], 0.9709505944546686),
|
||||
([0, 1, 1, 2, 2, 3, 4, 5, 3, 2, 1, 1], 0.9111886696810589),
|
||||
([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2], 0.8120406807940999),
|
||||
([0, 0, 1, 1, 1, 1, 0, 0], 1),
|
||||
([0, 0, 1, 1, 2, 2, 3, 3], 1),
|
||||
([0, 0, 1, 1, 1, 1, 1, 1], 0.8112781244591328),
|
||||
([1], 0),
|
||||
([0, 0, 0, 0], 0),
|
||||
]
|
||||
for labels, expected in expected_values:
|
||||
self.assertAlmostEqual(expected, Splitter._entropy(labels))
|
||||
tcl = self.build(criterion="entropy")
|
||||
self.assertAlmostEqual(expected, tcl.criterion_function(labels))
|
||||
|
||||
def test_information_gain(self):
|
||||
expected_values = [
|
||||
(
|
||||
[0, 1, 1, 1, 1, 1],
|
||||
[0, 0, 0, 1],
|
||||
0.16333333333333333,
|
||||
0.25642589168200297,
|
||||
),
|
||||
(
|
||||
[0, 1, 1, 2, 2, 3, 4, 5, 3, 2, 1, 1],
|
||||
[5, 3, 2, 1, 1],
|
||||
0.007381776239907684,
|
||||
-0.03328610916207225,
|
||||
),
|
||||
([], [], 0.0, 0.0),
|
||||
([1], [], 0.0, 0.0),
|
||||
([], [1], 0.0, 0.0),
|
||||
([0, 0, 0, 0], [0, 0], 0.0, 0.0),
|
||||
([], [1, 1, 1, 2], 0.0, 0.0),
|
||||
(None, [1, 2, 3], 0.0, 0.0),
|
||||
([1, 2, 3], None, 0.0, 0.0),
|
||||
]
|
||||
for yu, yd, expected_gini, expected_entropy in expected_values:
|
||||
yu = np.array(yu, dtype=np.int32) if yu is not None else None
|
||||
yd = np.array(yd, dtype=np.int32) if yd is not None else None
|
||||
if yu is not None and yd is not None:
|
||||
complete = np.append(yu, yd)
|
||||
elif yd is not None:
|
||||
complete = yd
|
||||
else:
|
||||
complete = yu
|
||||
tcl = self.build(criterion="gini")
|
||||
computed = tcl.information_gain(complete, yu, yd)
|
||||
self.assertAlmostEqual(expected_gini, computed)
|
||||
tcl = self.build(criterion="entropy")
|
||||
computed = tcl.information_gain(complete, yu, yd)
|
||||
self.assertAlmostEqual(expected_entropy, computed)
|
||||
|
||||
def test_max_samples(self):
|
||||
tcl = self.build(criteria="max_samples")
|
||||
data = np.array(
|
||||
[
|
||||
[-0.1, 0.2, -0.3],
|
||||
[0.7, 0.01, -0.1],
|
||||
[0.7, -0.9, 0.5],
|
||||
[0.1, 0.2, 0.3],
|
||||
[-0.1, 0.2, 0.3],
|
||||
[-0.1, 0.2, 0.3],
|
||||
]
|
||||
)
|
||||
expected = data[:, 0]
|
||||
y = [1, 2, 1, 0, 0, 0]
|
||||
computed = tcl._max_samples(data, y)
|
||||
self.assertEqual(0, computed)
|
||||
computed_data = data[:, computed]
|
||||
self.assertEqual((6,), computed_data.shape)
|
||||
self.assertListEqual(expected.tolist(), computed_data.tolist())
|
||||
|
||||
def test_impurity(self):
|
||||
tcl = self.build(criteria="impurity")
|
||||
data = np.array(
|
||||
[
|
||||
[-0.1, 0.2, -0.3],
|
||||
[0.7, 0.01, -0.1],
|
||||
[0.7, -0.9, 0.5],
|
||||
[0.1, 0.2, 0.3],
|
||||
[-0.1, 0.2, 0.3],
|
||||
[-0.1, 0.2, 0.3],
|
||||
]
|
||||
)
|
||||
expected = data[:, 2]
|
||||
y = np.array([1, 2, 1, 0, 0, 0])
|
||||
computed = tcl._impurity(data, y)
|
||||
self.assertEqual(2, computed)
|
||||
computed_data = data[:, computed]
|
||||
self.assertEqual((6,), computed_data.shape)
|
||||
self.assertListEqual(expected.tolist(), computed_data.tolist())
|
||||
|
||||
def test_generate_subspaces(self):
|
||||
features = 250
|
||||
for max_features in range(2, features):
|
||||
num = len(Splitter._generate_spaces(features, max_features))
|
||||
self.assertEqual(5, num)
|
||||
self.assertEqual(3, len(Splitter._generate_spaces(3, 2)))
|
||||
self.assertEqual(4, len(Splitter._generate_spaces(4, 3)))
|
||||
|
||||
def test_best_splitter_few_sets(self):
|
||||
X, y = load_iris(return_X_y=True)
|
||||
X = np.delete(X, 3, 1)
|
||||
tcl = self.build(
|
||||
feature_select="best", random_state=self._random_state
|
||||
)
|
||||
dataset, computed = tcl.get_subspace(X, y, max_features=2)
|
||||
self.assertListEqual([0, 2], list(computed))
|
||||
self.assertListEqual(X[:, computed].tolist(), dataset.tolist())
|
||||
|
||||
def test_splitter_parameter(self):
|
||||
expected_values = [
|
||||
[0, 6, 11, 12], # best entropy max_samples
|
||||
[0, 6, 11, 12], # best entropy impurity
|
||||
[0, 6, 11, 12], # best gini max_samples
|
||||
[0, 6, 11, 12], # best gini impurity
|
||||
[0, 3, 8, 12], # random entropy max_samples
|
||||
[0, 3, 7, 12], # random entropy impurity
|
||||
[1, 7, 9, 12], # random gini max_samples
|
||||
[1, 5, 8, 12], # random gini impurity
|
||||
[6, 9, 11, 12], # mutual entropy max_samples
|
||||
[6, 9, 11, 12], # mutual entropy impurity
|
||||
[6, 9, 11, 12], # mutual gini max_samples
|
||||
[6, 9, 11, 12], # mutual gini impurity
|
||||
]
|
||||
X, y = load_wine(return_X_y=True)
|
||||
rn = 0
|
||||
for feature_select in ["best", "random", "mutual"]:
|
||||
for criterion in ["entropy", "gini"]:
|
||||
for criteria in [
|
||||
"max_samples",
|
||||
"impurity",
|
||||
]:
|
||||
tcl = self.build(
|
||||
feature_select=feature_select,
|
||||
criterion=criterion,
|
||||
criteria=criteria,
|
||||
)
|
||||
expected = expected_values.pop(0)
|
||||
random.seed(rn)
|
||||
rn += 1
|
||||
dataset, computed = tcl.get_subspace(X, y, max_features=4)
|
||||
# print(
|
||||
# "{}, # {:7s}{:8s}{:15s}".format(
|
||||
# list(computed),
|
||||
# feature_select,
|
||||
# criterion,
|
||||
# criteria,
|
||||
# )
|
||||
# )
|
||||
self.assertListEqual(expected, sorted(list(computed)))
|
||||
self.assertListEqual(
|
||||
X[:, computed].tolist(), dataset.tolist()
|
||||
)
|
||||
|
||||
def test_get_best_subspaces(self):
|
||||
results = [
|
||||
(4, [3, 4, 11, 13]),
|
||||
(7, [1, 3, 4, 5, 11, 13, 16]),
|
||||
(9, [1, 3, 4, 5, 7, 10, 11, 13, 16]),
|
||||
]
|
||||
X, y = load_dataset(n_features=20)
|
||||
for k, expected in results:
|
||||
tcl = self.build(
|
||||
feature_select="best",
|
||||
)
|
||||
Xs, computed = tcl.get_subspace(X, y, k)
|
||||
self.assertListEqual(expected, list(computed))
|
||||
self.assertListEqual(X[:, expected].tolist(), Xs.tolist())
|
662
stree/tests/Stree_test.py
Normal file
662
stree/tests/Stree_test.py
Normal file
@@ -0,0 +1,662 @@
|
||||
import os
|
||||
import unittest
|
||||
import warnings
|
||||
|
||||
import numpy as np
|
||||
from sklearn.datasets import load_iris, load_wine
|
||||
from sklearn.exceptions import ConvergenceWarning
|
||||
from sklearn.svm import LinearSVC
|
||||
|
||||
from stree import Stree, Snode
|
||||
from .utils import load_dataset
|
||||
|
||||
|
||||
class Stree_test(unittest.TestCase):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._random_state = 1
|
||||
self._kernels = ["liblinear", "linear", "rbf", "poly", "sigmoid"]
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def setUp(cls):
|
||||
os.environ["TESTING"] = "1"
|
||||
|
||||
def test_valid_kernels(self):
|
||||
X, y = load_dataset()
|
||||
for kernel in self._kernels:
|
||||
clf = Stree(kernel=kernel, multiclass_strategy="ovr")
|
||||
clf.fit(X, y)
|
||||
self.assertIsNotNone(clf.tree_)
|
||||
|
||||
def test_bogus_kernel(self):
|
||||
kernel = "other"
|
||||
X, y = load_dataset()
|
||||
clf = Stree(kernel=kernel)
|
||||
with self.assertRaises(ValueError):
|
||||
clf.fit(X, y)
|
||||
|
||||
def _check_tree(self, node: Snode):
|
||||
"""Check recursively that the nodes that are not leaves have the
|
||||
correct number of labels and its sons have the right number of elements
|
||||
in their dataset
|
||||
|
||||
Parameters
|
||||
----------
|
||||
node : Snode
|
||||
node to check
|
||||
"""
|
||||
if node.is_leaf():
|
||||
return
|
||||
y_prediction = node._clf.predict(node._X)
|
||||
y_down = node.get_down()._y
|
||||
y_up = node.get_up()._y
|
||||
# Is a correct partition in terms of cadinality?
|
||||
# i.e. The partition algorithm didn't forget any sample
|
||||
self.assertEqual(node._y.shape[0], y_down.shape[0] + y_up.shape[0])
|
||||
unique_y, count_y = np.unique(node._y, return_counts=True)
|
||||
labels_d, count_d = np.unique(y_down, return_counts=True)
|
||||
labels_u, count_u = np.unique(y_up, return_counts=True)
|
||||
dict_d = {label: count_d[i] for i, label in enumerate(labels_d)}
|
||||
dict_u = {label: count_u[i] for i, label in enumerate(labels_u)}
|
||||
#
|
||||
for i in unique_y:
|
||||
try:
|
||||
number_up = dict_u[i]
|
||||
except KeyError:
|
||||
number_up = 0
|
||||
try:
|
||||
number_down = dict_d[i]
|
||||
except KeyError:
|
||||
number_down = 0
|
||||
self.assertEqual(count_y[i], number_down + number_up)
|
||||
# Is the partition made the same as the prediction?
|
||||
# as the node is not a leaf...
|
||||
_, count_yp = np.unique(y_prediction, return_counts=True)
|
||||
self.assertEqual(count_yp[1], y_up.shape[0])
|
||||
self.assertEqual(count_yp[0], y_down.shape[0])
|
||||
self._check_tree(node.get_down())
|
||||
self._check_tree(node.get_up())
|
||||
|
||||
def test_build_tree(self):
|
||||
"""Check if the tree is built the same way as predictions of models"""
|
||||
warnings.filterwarnings("ignore")
|
||||
for kernel in self._kernels:
|
||||
clf = Stree(
|
||||
kernel="sigmoid",
|
||||
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
|
||||
random_state=self._random_state,
|
||||
)
|
||||
clf.fit(*load_dataset(self._random_state))
|
||||
self._check_tree(clf.tree_)
|
||||
|
||||
def test_single_prediction(self):
|
||||
X, y = load_dataset(self._random_state)
|
||||
for kernel in self._kernels:
|
||||
clf = Stree(
|
||||
kernel=kernel,
|
||||
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
|
||||
random_state=self._random_state,
|
||||
)
|
||||
yp = clf.fit(X, y).predict((X[0, :].reshape(-1, X.shape[1])))
|
||||
self.assertEqual(yp[0], y[0])
|
||||
|
||||
def test_multiple_prediction(self):
|
||||
# First 27 elements the predictions are the same as the truth
|
||||
num = 27
|
||||
X, y = load_dataset(self._random_state)
|
||||
for kernel in ["liblinear", "linear", "rbf", "poly"]:
|
||||
clf = Stree(
|
||||
kernel=kernel,
|
||||
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
|
||||
random_state=self._random_state,
|
||||
)
|
||||
yp = clf.fit(X, y).predict(X[:num, :])
|
||||
self.assertListEqual(y[:num].tolist(), yp.tolist())
|
||||
|
||||
def test_single_vs_multiple_prediction(self):
|
||||
"""Check if predicting sample by sample gives the same result as
|
||||
predicting all samples at once
|
||||
"""
|
||||
X, y = load_dataset(self._random_state)
|
||||
for kernel in self._kernels:
|
||||
clf = Stree(
|
||||
kernel=kernel,
|
||||
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
|
||||
random_state=self._random_state,
|
||||
)
|
||||
clf.fit(X, y)
|
||||
# Compute prediction line by line
|
||||
yp_line = np.array([], dtype=int)
|
||||
for xp in X:
|
||||
yp_line = np.append(
|
||||
yp_line, clf.predict(xp.reshape(-1, X.shape[1]))
|
||||
)
|
||||
# Compute prediction at once
|
||||
yp_once = clf.predict(X)
|
||||
self.assertListEqual(yp_line.tolist(), yp_once.tolist())
|
||||
|
||||
def test_iterator_and_str(self):
|
||||
"""Check preorder iterator"""
|
||||
expected = [
|
||||
"root feaures=(0, 1, 2) impurity=1.0000 counts=(array([0, 1]), "
|
||||
"array([750, 750]))",
|
||||
"root - Down(2), <cgaf> - Leaf class=0 belief= 0.928297 impurity="
|
||||
"0.3722 counts=(array([0, 1]), array([725, 56]))",
|
||||
"root - Up(2) feaures=(0, 1, 2) impurity=0.2178 counts=(array([0, "
|
||||
"1]), array([ 25, 694]))",
|
||||
"root - Up(2) - Down(3) feaures=(0, 1, 2) impurity=0.8454 counts="
|
||||
"(array([0, 1]), array([8, 3]))",
|
||||
"root - Up(2) - Down(3) - Down(4), <pure> - Leaf class=0 belief= "
|
||||
"1.000000 impurity=0.0000 counts=(array([0]), array([7]))",
|
||||
"root - Up(2) - Down(3) - Up(4), <cgaf> - Leaf class=1 belief= "
|
||||
"0.750000 impurity=0.8113 counts=(array([0, 1]), array([1, 3]))",
|
||||
"root - Up(2) - Up(3), <cgaf> - Leaf class=1 belief= 0.975989 "
|
||||
"impurity=0.1634 counts=(array([0, 1]), array([ 17, 691]))",
|
||||
]
|
||||
computed = []
|
||||
expected_string = ""
|
||||
clf = Stree(
|
||||
kernel="liblinear",
|
||||
multiclass_strategy="ovr",
|
||||
random_state=self._random_state,
|
||||
)
|
||||
clf.fit(*load_dataset(self._random_state))
|
||||
for node in iter(clf):
|
||||
computed.append(str(node))
|
||||
expected_string += str(node) + "\n"
|
||||
self.assertListEqual(expected, computed)
|
||||
self.assertEqual(expected_string, str(clf))
|
||||
|
||||
@staticmethod
|
||||
def test_is_a_sklearn_classifier():
|
||||
warnings.filterwarnings("ignore", category=ConvergenceWarning)
|
||||
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
||||
from sklearn.utils.estimator_checks import check_estimator
|
||||
|
||||
check_estimator(Stree())
|
||||
|
||||
def test_exception_if_C_is_negative(self):
|
||||
tclf = Stree(C=-1)
|
||||
with self.assertRaises(ValueError):
|
||||
tclf.fit(*load_dataset(self._random_state))
|
||||
|
||||
def test_exception_if_bogus_split_criteria(self):
|
||||
tclf = Stree(split_criteria="duck")
|
||||
with self.assertRaises(ValueError):
|
||||
tclf.fit(*load_dataset(self._random_state))
|
||||
|
||||
def test_check_max_depth_is_positive_or_None(self):
|
||||
tcl = Stree()
|
||||
self.assertIsNone(tcl.max_depth)
|
||||
tcl = Stree(max_depth=1)
|
||||
self.assertGreaterEqual(1, tcl.max_depth)
|
||||
with self.assertRaises(ValueError):
|
||||
tcl = Stree(max_depth=-1)
|
||||
tcl.fit(*load_dataset(self._random_state))
|
||||
|
||||
def test_check_max_depth(self):
|
||||
depths = (3, 4)
|
||||
for depth in depths:
|
||||
tcl = Stree(
|
||||
kernel="liblinear",
|
||||
multiclass_strategy="ovr",
|
||||
random_state=self._random_state,
|
||||
max_depth=depth,
|
||||
)
|
||||
tcl.fit(*load_dataset(self._random_state))
|
||||
self.assertEqual(depth, tcl.depth_)
|
||||
|
||||
def test_unfitted_tree_is_iterable(self):
|
||||
tcl = Stree()
|
||||
self.assertEqual(0, len(list(tcl)))
|
||||
|
||||
def test_min_samples_split(self):
|
||||
dataset = [[1], [2], [3]], [1, 1, 0]
|
||||
tcl_split = Stree(min_samples_split=3).fit(*dataset)
|
||||
self.assertIsNotNone(tcl_split.tree_.get_down())
|
||||
self.assertIsNotNone(tcl_split.tree_.get_up())
|
||||
tcl_nosplit = Stree(min_samples_split=4).fit(*dataset)
|
||||
self.assertIsNone(tcl_nosplit.tree_.get_down())
|
||||
self.assertIsNone(tcl_nosplit.tree_.get_up())
|
||||
|
||||
def test_simple_muticlass_dataset(self):
|
||||
for kernel in self._kernels:
|
||||
clf = Stree(
|
||||
kernel=kernel,
|
||||
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
|
||||
random_state=self._random_state,
|
||||
)
|
||||
px = [[1, 2], [5, 6], [9, 10]]
|
||||
py = [0, 1, 2]
|
||||
clf.fit(px, py)
|
||||
self.assertEqual(1.0, clf.score(px, py))
|
||||
self.assertListEqual(py, clf.predict(px).tolist())
|
||||
self.assertListEqual(py, clf.classes_.tolist())
|
||||
|
||||
def test_muticlass_dataset(self):
|
||||
warnings.filterwarnings("ignore", category=ConvergenceWarning)
|
||||
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
||||
datasets = {
|
||||
"Synt": load_dataset(random_state=self._random_state, n_classes=3),
|
||||
"Iris": load_wine(return_X_y=True),
|
||||
}
|
||||
outcomes = {
|
||||
"Synt": {
|
||||
"max_samples liblinear": 0.9493333333333334,
|
||||
"max_samples linear": 0.9426666666666667,
|
||||
"max_samples rbf": 0.9606666666666667,
|
||||
"max_samples poly": 0.9373333333333334,
|
||||
"max_samples sigmoid": 0.824,
|
||||
"impurity liblinear": 0.9493333333333334,
|
||||
"impurity linear": 0.9426666666666667,
|
||||
"impurity rbf": 0.9606666666666667,
|
||||
"impurity poly": 0.9373333333333334,
|
||||
"impurity sigmoid": 0.824,
|
||||
},
|
||||
"Iris": {
|
||||
"max_samples liblinear": 0.9550561797752809,
|
||||
"max_samples linear": 1.0,
|
||||
"max_samples rbf": 0.6685393258426966,
|
||||
"max_samples poly": 0.6853932584269663,
|
||||
"max_samples sigmoid": 0.6404494382022472,
|
||||
"impurity liblinear": 0.9550561797752809,
|
||||
"impurity linear": 1.0,
|
||||
"impurity rbf": 0.6685393258426966,
|
||||
"impurity poly": 0.6853932584269663,
|
||||
"impurity sigmoid": 0.6404494382022472,
|
||||
},
|
||||
}
|
||||
|
||||
for name, dataset in datasets.items():
|
||||
px, py = dataset
|
||||
for criteria in ["max_samples", "impurity"]:
|
||||
for kernel in self._kernels:
|
||||
clf = Stree(
|
||||
max_iter=1e4,
|
||||
multiclass_strategy="ovr"
|
||||
if kernel == "liblinear"
|
||||
else "ovo",
|
||||
kernel=kernel,
|
||||
random_state=self._random_state,
|
||||
)
|
||||
clf.fit(px, py)
|
||||
outcome = outcomes[name][f"{criteria} {kernel}"]
|
||||
# print(f'"{criteria} {kernel}": {clf.score(px, py)},')
|
||||
self.assertAlmostEqual(
|
||||
outcome,
|
||||
clf.score(px, py),
|
||||
5,
|
||||
f"{name} - {criteria} - {kernel}",
|
||||
)
|
||||
|
||||
def test_max_features(self):
|
||||
n_features = 16
|
||||
expected_values = [
|
||||
("auto", 4),
|
||||
("log2", 4),
|
||||
("sqrt", 4),
|
||||
(0.5, 8),
|
||||
(3, 3),
|
||||
(None, 16),
|
||||
]
|
||||
clf = Stree()
|
||||
clf.n_features_ = n_features
|
||||
for max_features, expected in expected_values:
|
||||
clf.set_params(**dict(max_features=max_features))
|
||||
computed = clf._initialize_max_features()
|
||||
self.assertEqual(expected, computed)
|
||||
# Check bogus max_features
|
||||
values = ["duck", -0.1, 0.0]
|
||||
for max_features in values:
|
||||
clf.set_params(**dict(max_features=max_features))
|
||||
with self.assertRaises(ValueError):
|
||||
_ = clf._initialize_max_features()
|
||||
|
||||
def test_wrong_max_features(self):
|
||||
X, y = load_dataset(n_features=15)
|
||||
clf = Stree(max_features=16)
|
||||
with self.assertRaises(ValueError):
|
||||
clf.fit(X, y)
|
||||
|
||||
def test_get_subspaces(self):
|
||||
dataset = np.random.random((10, 16))
|
||||
y = np.random.randint(0, 2, 10)
|
||||
expected_values = [
|
||||
("auto", 4),
|
||||
("log2", 4),
|
||||
("sqrt", 4),
|
||||
(0.5, 8),
|
||||
(3, 3),
|
||||
(None, 16),
|
||||
]
|
||||
clf = Stree()
|
||||
for max_features, expected in expected_values:
|
||||
clf.set_params(**dict(max_features=max_features))
|
||||
clf.fit(dataset, y)
|
||||
computed, indices = clf.splitter_.get_subspace(
|
||||
dataset, y, clf.max_features_
|
||||
)
|
||||
self.assertListEqual(
|
||||
dataset[:, indices].tolist(), computed.tolist()
|
||||
)
|
||||
self.assertEqual(expected, len(indices))
|
||||
|
||||
def test_bogus_criterion(self):
|
||||
clf = Stree(criterion="duck")
|
||||
with self.assertRaises(ValueError):
|
||||
clf.fit(*load_dataset())
|
||||
|
||||
def test_predict_feature_dimensions(self):
|
||||
X = np.random.rand(10, 5)
|
||||
y = np.random.randint(0, 2, 10)
|
||||
clf = Stree()
|
||||
clf.fit(X, y)
|
||||
with self.assertRaises(ValueError):
|
||||
clf.predict(X[:, :3])
|
||||
|
||||
# Tests of score
|
||||
def test_score_binary(self):
|
||||
X, y = load_dataset(self._random_state)
|
||||
accuracies = [
|
||||
0.9506666666666667,
|
||||
0.9493333333333334,
|
||||
0.9606666666666667,
|
||||
0.9433333333333334,
|
||||
0.9153333333333333,
|
||||
]
|
||||
for kernel, accuracy_expected in zip(self._kernels, accuracies):
|
||||
clf = Stree(
|
||||
random_state=self._random_state,
|
||||
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
|
||||
kernel=kernel,
|
||||
)
|
||||
clf.fit(X, y)
|
||||
accuracy_score = clf.score(X, y)
|
||||
yp = clf.predict(X)
|
||||
accuracy_computed = np.mean(yp == y)
|
||||
self.assertEqual(accuracy_score, accuracy_computed)
|
||||
self.assertAlmostEqual(accuracy_expected, accuracy_score)
|
||||
|
||||
def test_score_max_features(self):
|
||||
X, y = load_dataset(self._random_state)
|
||||
clf = Stree(
|
||||
kernel="liblinear",
|
||||
multiclass_strategy="ovr",
|
||||
random_state=self._random_state,
|
||||
max_features=2,
|
||||
)
|
||||
clf.fit(X, y)
|
||||
self.assertAlmostEqual(0.9453333333333334, clf.score(X, y))
|
||||
|
||||
def test_bogus_splitter_parameter(self):
|
||||
clf = Stree(splitter="duck")
|
||||
with self.assertRaises(ValueError):
|
||||
clf.fit(*load_dataset())
|
||||
|
||||
def test_multiclass_classifier_integrity(self):
|
||||
"""Checks if the multiclass operation is done right"""
|
||||
X, y = load_iris(return_X_y=True)
|
||||
clf = Stree(
|
||||
kernel="liblinear", multiclass_strategy="ovr", random_state=0
|
||||
)
|
||||
clf.fit(X, y)
|
||||
score = clf.score(X, y)
|
||||
# Check accuracy of the whole model
|
||||
self.assertAlmostEquals(0.98, score, 5)
|
||||
svm = LinearSVC(random_state=0)
|
||||
svm.fit(X, y)
|
||||
self.assertAlmostEquals(0.9666666666666667, svm.score(X, y), 5)
|
||||
data = svm.decision_function(X)
|
||||
expected = [
|
||||
0.4444444444444444,
|
||||
0.35777777777777775,
|
||||
0.4569777777777778,
|
||||
]
|
||||
ty = data.copy()
|
||||
ty[data <= 0] = 0
|
||||
ty[data > 0] = 1
|
||||
ty = ty.astype(int)
|
||||
for i in range(3):
|
||||
self.assertAlmostEquals(
|
||||
expected[i],
|
||||
clf.splitter_._gini(ty[:, i]),
|
||||
)
|
||||
# 1st Branch
|
||||
# up has to have 50 samples of class 0
|
||||
# down should have 100 [50, 50]
|
||||
up = data[:, 2] > 0
|
||||
resup = np.unique(y[up], return_counts=True)
|
||||
resdn = np.unique(y[~up], return_counts=True)
|
||||
self.assertListEqual([1, 2], resup[0].tolist())
|
||||
self.assertListEqual([3, 50], resup[1].tolist())
|
||||
self.assertListEqual([0, 1], resdn[0].tolist())
|
||||
self.assertListEqual([50, 47], resdn[1].tolist())
|
||||
# 2nd Branch
|
||||
# up should have 53 samples of classes [1, 2] [3, 50]
|
||||
# down shoud have 47 samples of class 1
|
||||
node_up = clf.tree_.get_down().get_up()
|
||||
node_dn = clf.tree_.get_down().get_down()
|
||||
resup = np.unique(node_up._y, return_counts=True)
|
||||
resdn = np.unique(node_dn._y, return_counts=True)
|
||||
self.assertListEqual([1, 2], resup[0].tolist())
|
||||
self.assertListEqual([3, 50], resup[1].tolist())
|
||||
self.assertListEqual([1], resdn[0].tolist())
|
||||
self.assertListEqual([47], resdn[1].tolist())
|
||||
|
||||
def test_score_multiclass_rbf(self):
|
||||
X, y = load_dataset(
|
||||
random_state=self._random_state,
|
||||
n_classes=3,
|
||||
n_features=5,
|
||||
n_samples=500,
|
||||
)
|
||||
clf = Stree(kernel="rbf", random_state=self._random_state)
|
||||
clf2 = Stree(
|
||||
kernel="rbf", random_state=self._random_state, normalize=True
|
||||
)
|
||||
self.assertEqual(0.966, clf.fit(X, y).score(X, y))
|
||||
self.assertEqual(0.964, clf2.fit(X, y).score(X, y))
|
||||
X, y = load_wine(return_X_y=True)
|
||||
self.assertEqual(0.6685393258426966, clf.fit(X, y).score(X, y))
|
||||
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
||||
|
||||
def test_score_multiclass_poly(self):
|
||||
X, y = load_dataset(
|
||||
random_state=self._random_state,
|
||||
n_classes=3,
|
||||
n_features=5,
|
||||
n_samples=500,
|
||||
)
|
||||
clf = Stree(
|
||||
kernel="poly", random_state=self._random_state, C=10, degree=5
|
||||
)
|
||||
clf2 = Stree(
|
||||
kernel="poly",
|
||||
random_state=self._random_state,
|
||||
normalize=True,
|
||||
)
|
||||
self.assertEqual(0.946, clf.fit(X, y).score(X, y))
|
||||
self.assertEqual(0.972, clf2.fit(X, y).score(X, y))
|
||||
X, y = load_wine(return_X_y=True)
|
||||
self.assertEqual(0.7808988764044944, clf.fit(X, y).score(X, y))
|
||||
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
||||
|
||||
def test_score_multiclass_liblinear(self):
|
||||
X, y = load_dataset(
|
||||
random_state=self._random_state,
|
||||
n_classes=3,
|
||||
n_features=5,
|
||||
n_samples=500,
|
||||
)
|
||||
clf = Stree(
|
||||
kernel="liblinear",
|
||||
multiclass_strategy="ovr",
|
||||
random_state=self._random_state,
|
||||
C=10,
|
||||
)
|
||||
clf2 = Stree(
|
||||
kernel="liblinear",
|
||||
multiclass_strategy="ovr",
|
||||
random_state=self._random_state,
|
||||
normalize=True,
|
||||
)
|
||||
self.assertEqual(0.968, clf.fit(X, y).score(X, y))
|
||||
self.assertEqual(0.97, clf2.fit(X, y).score(X, y))
|
||||
X, y = load_wine(return_X_y=True)
|
||||
self.assertEqual(1.0, clf.fit(X, y).score(X, y))
|
||||
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
||||
|
||||
def test_score_multiclass_sigmoid(self):
|
||||
X, y = load_dataset(
|
||||
random_state=self._random_state,
|
||||
n_classes=3,
|
||||
n_features=5,
|
||||
n_samples=500,
|
||||
)
|
||||
clf = Stree(kernel="sigmoid", random_state=self._random_state, C=10)
|
||||
clf2 = Stree(
|
||||
kernel="sigmoid",
|
||||
random_state=self._random_state,
|
||||
normalize=True,
|
||||
C=10,
|
||||
)
|
||||
self.assertEqual(0.796, clf.fit(X, y).score(X, y))
|
||||
self.assertEqual(0.952, clf2.fit(X, y).score(X, y))
|
||||
X, y = load_wine(return_X_y=True)
|
||||
self.assertEqual(0.6910112359550562, clf.fit(X, y).score(X, y))
|
||||
self.assertEqual(0.9662921348314607, clf2.fit(X, y).score(X, y))
|
||||
|
||||
def test_score_multiclass_linear(self):
|
||||
warnings.filterwarnings("ignore", category=ConvergenceWarning)
|
||||
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
||||
X, y = load_dataset(
|
||||
random_state=self._random_state,
|
||||
n_classes=3,
|
||||
n_features=5,
|
||||
n_samples=1500,
|
||||
)
|
||||
clf = Stree(
|
||||
kernel="liblinear",
|
||||
multiclass_strategy="ovr",
|
||||
random_state=self._random_state,
|
||||
)
|
||||
self.assertEqual(0.9533333333333334, clf.fit(X, y).score(X, y))
|
||||
# Check with context based standardization
|
||||
clf2 = Stree(
|
||||
kernel="liblinear",
|
||||
multiclass_strategy="ovr",
|
||||
random_state=self._random_state,
|
||||
normalize=True,
|
||||
)
|
||||
self.assertEqual(0.9526666666666667, clf2.fit(X, y).score(X, y))
|
||||
X, y = load_wine(return_X_y=True)
|
||||
self.assertEqual(0.9831460674157303, clf.fit(X, y).score(X, y))
|
||||
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
||||
|
||||
def test_zero_all_sample_weights(self):
|
||||
X, y = load_dataset(self._random_state)
|
||||
with self.assertRaises(ValueError):
|
||||
Stree().fit(X, y, np.zeros(len(y)))
|
||||
|
||||
def test_mask_samples_weighted_zero(self):
|
||||
X = np.array(
|
||||
[
|
||||
[1, 1],
|
||||
[1, 1],
|
||||
[1, 1],
|
||||
[2, 2],
|
||||
[2, 2],
|
||||
[2, 2],
|
||||
[3, 3],
|
||||
[3, 3],
|
||||
[3, 3],
|
||||
]
|
||||
)
|
||||
y = np.array([1, 1, 1, 2, 2, 2, 5, 5, 5])
|
||||
yw = np.array([1, 1, 1, 1, 1, 1, 5, 5, 5])
|
||||
w = [1, 1, 1, 0, 0, 0, 1, 1, 1]
|
||||
model1 = Stree().fit(X, y)
|
||||
model2 = Stree().fit(X, y, w)
|
||||
predict1 = model1.predict(X)
|
||||
predict2 = model2.predict(X)
|
||||
self.assertListEqual(y.tolist(), predict1.tolist())
|
||||
self.assertListEqual(yw.tolist(), predict2.tolist())
|
||||
self.assertEqual(model1.score(X, y), 1)
|
||||
self.assertAlmostEqual(model2.score(X, y), 0.66666667)
|
||||
self.assertEqual(model2.score(X, y, w), 1)
|
||||
|
||||
def test_depth(self):
|
||||
X, y = load_dataset(
|
||||
random_state=self._random_state,
|
||||
n_classes=3,
|
||||
n_features=5,
|
||||
n_samples=1500,
|
||||
)
|
||||
clf = Stree(random_state=self._random_state)
|
||||
clf.fit(X, y)
|
||||
self.assertEqual(6, clf.depth_)
|
||||
X, y = load_wine(return_X_y=True)
|
||||
clf = Stree(random_state=self._random_state)
|
||||
clf.fit(X, y)
|
||||
self.assertEqual(4, clf.depth_)
|
||||
|
||||
def test_nodes_leaves(self):
|
||||
X, y = load_dataset(
|
||||
random_state=self._random_state,
|
||||
n_classes=3,
|
||||
n_features=5,
|
||||
n_samples=1500,
|
||||
)
|
||||
clf = Stree(random_state=self._random_state)
|
||||
clf.fit(X, y)
|
||||
nodes, leaves = clf.nodes_leaves()
|
||||
self.assertEqual(31, nodes)
|
||||
self.assertEqual(16, leaves)
|
||||
X, y = load_wine(return_X_y=True)
|
||||
clf = Stree(random_state=self._random_state)
|
||||
clf.fit(X, y)
|
||||
nodes, leaves = clf.nodes_leaves()
|
||||
self.assertEqual(11, nodes)
|
||||
self.assertEqual(6, leaves)
|
||||
|
||||
def test_nodes_leaves_artificial(self):
|
||||
n1 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test1")
|
||||
n2 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test2")
|
||||
n3 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test3")
|
||||
n4 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test4")
|
||||
n5 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test5")
|
||||
n6 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test6")
|
||||
n1.set_up(n2)
|
||||
n2.set_up(n3)
|
||||
n2.set_down(n4)
|
||||
n3.set_up(n5)
|
||||
n4.set_down(n6)
|
||||
clf = Stree(random_state=self._random_state)
|
||||
clf.tree_ = n1
|
||||
nodes, leaves = clf.nodes_leaves()
|
||||
self.assertEqual(6, nodes)
|
||||
self.assertEqual(2, leaves)
|
||||
|
||||
def test_bogus_multiclass_strategy(self):
|
||||
clf = Stree(multiclass_strategy="other")
|
||||
X, y = load_wine(return_X_y=True)
|
||||
with self.assertRaises(ValueError):
|
||||
clf.fit(X, y)
|
||||
|
||||
def test_multiclass_strategy(self):
|
||||
X, y = load_wine(return_X_y=True)
|
||||
clf_o = Stree(multiclass_strategy="ovo")
|
||||
clf_r = Stree(multiclass_strategy="ovr")
|
||||
score_o = clf_o.fit(X, y).score(X, y)
|
||||
score_r = clf_r.fit(X, y).score(X, y)
|
||||
self.assertEqual(1.0, score_o)
|
||||
self.assertEqual(0.9269662921348315, score_r)
|
||||
|
||||
def test_incompatible_hyperparameters(self):
|
||||
X, y = load_wine(return_X_y=True)
|
||||
clf = Stree(kernel="liblinear", multiclass_strategy="ovo")
|
||||
with self.assertRaises(ValueError):
|
||||
clf.fit(X, y)
|
||||
clf = Stree(multiclass_strategy="ovo", split_criteria="max_samples")
|
||||
with self.assertRaises(ValueError):
|
||||
clf.fit(X, y)
|
@@ -1,226 +0,0 @@
|
||||
import os
|
||||
import imghdr
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
import matplotlib
|
||||
import matplotlib.pyplot as plt
|
||||
import warnings
|
||||
from sklearn.datasets import make_classification
|
||||
|
||||
from stree import Stree_grapher, Snode_graph, Snode
|
||||
|
||||
|
||||
def get_dataset(random_state=0, n_features=3):
|
||||
X, y = make_classification(
|
||||
n_samples=1500,
|
||||
n_features=n_features,
|
||||
n_informative=3,
|
||||
n_redundant=0,
|
||||
n_repeated=0,
|
||||
n_classes=2,
|
||||
n_clusters_per_class=2,
|
||||
class_sep=1.5,
|
||||
flip_y=0,
|
||||
weights=[0.5, 0.5],
|
||||
random_state=random_state,
|
||||
)
|
||||
return X, y
|
||||
|
||||
|
||||
class Stree_grapher_test(unittest.TestCase):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._random_state = 1
|
||||
self._clf = Stree_grapher(dict(random_state=self._random_state))
|
||||
self._clf.fit(*get_dataset(self._random_state, n_features=4))
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def setUp(cls):
|
||||
os.environ["TESTING"] = "1"
|
||||
|
||||
def test_iterator(self):
|
||||
"""Check preorder iterator
|
||||
"""
|
||||
expected = [
|
||||
"root",
|
||||
"root - Down",
|
||||
"root - Down - Down, <cgaf> - Leaf class=1 belief= 0.976023 counts"
|
||||
"=(array([0, 1]), array([ 17, 692]))",
|
||||
"root - Down - Up",
|
||||
"root - Down - Up - Down, <cgaf> - Leaf class=0 belief= 0.500000 "
|
||||
"counts=(array([0, 1]), array([1, 1]))",
|
||||
"root - Down - Up - Up, <cgaf> - Leaf class=0 belief= 0.888889 "
|
||||
"counts=(array([0, 1]), array([8, 1]))",
|
||||
"root - Up, <cgaf> - Leaf class=0 belief= 0.928205 counts=(array("
|
||||
"[0, 1]), array([724, 56]))",
|
||||
]
|
||||
computed = []
|
||||
for node in self._clf:
|
||||
computed.append(str(node))
|
||||
self.assertListEqual(expected, computed)
|
||||
|
||||
def test_score(self):
|
||||
X, y = get_dataset(self._random_state)
|
||||
accuracy_score = self._clf.score(X, y)
|
||||
yp = self._clf.predict(X)
|
||||
accuracy_computed = np.mean(yp == y)
|
||||
self.assertEqual(accuracy_score, accuracy_computed)
|
||||
self.assertGreater(accuracy_score, 0.86)
|
||||
|
||||
def test_score_4dims(self):
|
||||
X, y = get_dataset(self._random_state, n_features=4)
|
||||
accuracy_score = self._clf.score(X, y)
|
||||
self.assertEqual(accuracy_score, 0.95)
|
||||
|
||||
def test_save_all(self):
|
||||
folder_name = os.path.join(os.sep, "tmp", "stree")
|
||||
if os.path.isdir(folder_name):
|
||||
os.rmdir(folder_name)
|
||||
file_names = [
|
||||
os.path.join(folder_name, f"STnode{i}.png") for i in range(1, 8)
|
||||
]
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
matplotlib.use("Agg")
|
||||
self._clf.save_all(save_folder=folder_name)
|
||||
for file_name in file_names:
|
||||
self.assertTrue(os.path.exists(file_name))
|
||||
self.assertEqual("png", imghdr.what(file_name))
|
||||
os.remove(file_name)
|
||||
os.rmdir(folder_name)
|
||||
|
||||
def test_plot_all(self):
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
matplotlib.use("Agg")
|
||||
num_figures_before = plt.gcf().number
|
||||
self._clf.plot_all()
|
||||
num_figures_after = plt.gcf().number
|
||||
self.assertEqual(7, num_figures_after - num_figures_before)
|
||||
|
||||
|
||||
class Snode_graph_test(unittest.TestCase):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._random_state = 1
|
||||
self._clf = Stree_grapher(dict(random_state=self._random_state))
|
||||
self._clf.fit(*get_dataset(self._random_state))
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def setUp(cls):
|
||||
os.environ["TESTING"] = "1"
|
||||
|
||||
def test_plot_size(self):
|
||||
default = self._clf._tree_gr.get_plot_size()
|
||||
expected = (17, 3)
|
||||
self._clf._tree_gr.set_plot_size(expected)
|
||||
self.assertEqual(expected, self._clf._tree_gr.get_plot_size())
|
||||
self._clf._tree_gr.set_plot_size(default)
|
||||
self.assertEqual(default, self._clf._tree_gr.get_plot_size())
|
||||
|
||||
def test_attributes_in_leaves_graph(self):
|
||||
"""Check if the attributes in leaves have correct values so they form a
|
||||
predictor
|
||||
"""
|
||||
|
||||
def check_leave(node: Snode_graph):
|
||||
if not node.is_leaf():
|
||||
check_leave(node.get_down())
|
||||
check_leave(node.get_up())
|
||||
return
|
||||
# Check Belief in leave
|
||||
classes, card = np.unique(node._y, return_counts=True)
|
||||
max_card = max(card)
|
||||
min_card = min(card)
|
||||
if len(classes) > 1:
|
||||
try:
|
||||
belief = max_card / (max_card + min_card)
|
||||
except ZeroDivisionError:
|
||||
belief = 0.0
|
||||
else:
|
||||
belief = 1
|
||||
self.assertEqual(belief, node._belief)
|
||||
# Check Class
|
||||
class_computed = classes[card == max_card]
|
||||
self.assertEqual(class_computed, node._class)
|
||||
|
||||
check_leave(self._clf._tree_gr)
|
||||
|
||||
def test_nodes_graph_coefs(self):
|
||||
"""Check if the nodes of the tree have the right attributes filled
|
||||
"""
|
||||
|
||||
def run_tree(node: Snode_graph):
|
||||
if node._belief < 1:
|
||||
# only exclude pure leaves
|
||||
self.assertIsNotNone(node._clf)
|
||||
self.assertIsNotNone(node._clf.coef_)
|
||||
if node.is_leaf():
|
||||
return
|
||||
run_tree(node.get_down())
|
||||
run_tree(node.get_up())
|
||||
|
||||
run_tree(self._clf._tree_gr)
|
||||
|
||||
def test_save_hyperplane(self):
|
||||
folder_name = "/tmp/"
|
||||
file_name = os.path.join(folder_name, "STnode1.png")
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
matplotlib.use("Agg")
|
||||
self._clf._tree_gr.save_hyperplane(folder_name)
|
||||
self.assertTrue(os.path.exists(file_name))
|
||||
self.assertEqual("png", imghdr.what(file_name))
|
||||
os.remove(file_name)
|
||||
|
||||
def test_plot_hyperplane_with_distribution(self):
|
||||
plt.close()
|
||||
# select a pure node
|
||||
node = self._clf._tree_gr.get_down().get_up().get_up()
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
matplotlib.use("Agg")
|
||||
num_figures_before = plt.gcf().number
|
||||
node.plot_hyperplane(plot_distribution=True)
|
||||
num_figures_after = plt.gcf().number
|
||||
self.assertEqual(1, num_figures_after - num_figures_before)
|
||||
|
||||
def test_plot_hyperplane_without_distribution(self):
|
||||
plt.close()
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
matplotlib.use("Agg")
|
||||
num_figures_before = plt.gcf().number
|
||||
self._clf._tree_gr.plot_hyperplane(plot_distribution=False)
|
||||
num_figures_after = plt.gcf().number
|
||||
self.assertEqual(1, num_figures_after - num_figures_before)
|
||||
|
||||
def test_plot_distribution(self):
|
||||
plt.close()
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
matplotlib.use("Agg")
|
||||
num_figures_before = plt.gcf().number
|
||||
self._clf._tree_gr.plot_distribution()
|
||||
num_figures_after = plt.gcf().number
|
||||
self.assertEqual(1, num_figures_after - num_figures_before)
|
||||
|
||||
def test_set_axis_limits(self):
|
||||
node = Snode_graph(Snode(None, None, None, "test"))
|
||||
limits = (-2, 2), (-3, 3), (-4, 4)
|
||||
node.set_axis_limits(limits)
|
||||
computed = node.get_axis_limits()
|
||||
x, y, z = limits
|
||||
xx, yy, zz = computed
|
||||
self.assertEqual(x, xx)
|
||||
self.assertEqual(y, yy)
|
||||
self.assertEqual(z, zz)
|
||||
|
||||
def test_cmap_change(self):
|
||||
node = Snode_graph(Snode(None, None, None, "test"))
|
||||
self.assertEqual("jet", node._get_cmap())
|
||||
# make node pure
|
||||
node._belief = 1.0
|
||||
node._class = 1
|
||||
self.assertEqual("jet_r", node._get_cmap())
|
@@ -1,355 +0,0 @@
|
||||
import os
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
from sklearn.datasets import make_classification, load_iris
|
||||
|
||||
from stree import Stree, Snode
|
||||
|
||||
|
||||
def get_dataset(random_state=0, n_classes=2):
|
||||
X, y = make_classification(
|
||||
n_samples=1500,
|
||||
n_features=3,
|
||||
n_informative=3,
|
||||
n_redundant=0,
|
||||
n_repeated=0,
|
||||
n_classes=n_classes,
|
||||
n_clusters_per_class=2,
|
||||
class_sep=1.5,
|
||||
flip_y=0,
|
||||
random_state=random_state,
|
||||
)
|
||||
return X, y
|
||||
|
||||
|
||||
class Stree_test(unittest.TestCase):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._random_state = 1
|
||||
self._kernels = ["linear", "rbf", "poly"]
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def setUp(cls):
|
||||
os.environ["TESTING"] = "1"
|
||||
|
||||
def _check_tree(self, node: Snode):
|
||||
"""Check recursively that the nodes that are not leaves have the
|
||||
correct number of labels and its sons have the right number of elements
|
||||
in their dataset
|
||||
|
||||
Arguments:
|
||||
node {Snode} -- node to check
|
||||
"""
|
||||
if node.is_leaf():
|
||||
return
|
||||
y_prediction = node._clf.predict(node._X)
|
||||
y_down = node.get_down()._y
|
||||
y_up = node.get_up()._y
|
||||
# Is a correct partition in terms of cadinality?
|
||||
# i.e. The partition algorithm didn't forget any sample
|
||||
self.assertEqual(node._y.shape[0], y_down.shape[0] + y_up.shape[0])
|
||||
unique_y, count_y = np.unique(node._y, return_counts=True)
|
||||
_, count_d = np.unique(y_down, return_counts=True)
|
||||
_, count_u = np.unique(y_up, return_counts=True)
|
||||
#
|
||||
for i in unique_y:
|
||||
try:
|
||||
number_down = count_d[i]
|
||||
except IndexError:
|
||||
number_down = 0
|
||||
try:
|
||||
number_up = count_u[i]
|
||||
except IndexError:
|
||||
number_up = 0
|
||||
self.assertEqual(count_y[i], number_down + number_up)
|
||||
# Is the partition made the same as the prediction?
|
||||
# as the node is not a leaf...
|
||||
_, count_yp = np.unique(y_prediction, return_counts=True)
|
||||
self.assertEqual(count_yp[0], y_up.shape[0])
|
||||
self.assertEqual(count_yp[1], y_down.shape[0])
|
||||
self._check_tree(node.get_down())
|
||||
self._check_tree(node.get_up())
|
||||
|
||||
def test_build_tree(self):
|
||||
"""Check if the tree is built the same way as predictions of models
|
||||
"""
|
||||
import warnings
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
for kernel in self._kernels:
|
||||
clf = Stree(kernel=kernel, random_state=self._random_state)
|
||||
clf.fit(*get_dataset(self._random_state))
|
||||
self._check_tree(clf.tree_)
|
||||
|
||||
def _find_out(
|
||||
self, px: np.array, x_original: np.array, y_original
|
||||
) -> list:
|
||||
"""Find the original values of y for a given array of samples
|
||||
|
||||
Arguments:
|
||||
px {np.array} -- array of samples to search for
|
||||
x_original {np.array} -- original dataset
|
||||
y_original {[type]} -- original classes
|
||||
|
||||
Returns:
|
||||
np.array -- classes of the given samples
|
||||
"""
|
||||
res = []
|
||||
for needle in px:
|
||||
for row in range(x_original.shape[0]):
|
||||
if all(x_original[row, :] == needle):
|
||||
res.append(y_original[row])
|
||||
return res
|
||||
|
||||
def test_single_prediction(self):
|
||||
X, y = get_dataset(self._random_state)
|
||||
for kernel in self._kernels:
|
||||
clf = Stree(kernel=kernel, random_state=self._random_state)
|
||||
yp = clf.fit(X, y).predict((X[0, :].reshape(-1, X.shape[1])))
|
||||
self.assertEqual(yp[0], y[0])
|
||||
|
||||
def test_multiple_prediction(self):
|
||||
# First 27 elements the predictions are the same as the truth
|
||||
num = 27
|
||||
X, y = get_dataset(self._random_state)
|
||||
for kernel in self._kernels:
|
||||
clf = Stree(kernel=kernel, random_state=self._random_state)
|
||||
yp = clf.fit(X, y).predict(X[:num, :])
|
||||
self.assertListEqual(y[:num].tolist(), yp.tolist())
|
||||
|
||||
def test_score(self):
|
||||
X, y = get_dataset(self._random_state)
|
||||
accuracies = [
|
||||
0.9506666666666667,
|
||||
0.9606666666666667,
|
||||
0.9433333333333334,
|
||||
]
|
||||
for kernel, accuracy_expected in zip(self._kernels, accuracies):
|
||||
clf = Stree(random_state=self._random_state, kernel=kernel,)
|
||||
clf.fit(X, y)
|
||||
accuracy_score = clf.score(X, y)
|
||||
yp = clf.predict(X)
|
||||
accuracy_computed = np.mean(yp == y)
|
||||
self.assertEqual(accuracy_score, accuracy_computed)
|
||||
self.assertAlmostEqual(accuracy_expected, accuracy_score)
|
||||
|
||||
def test_single_vs_multiple_prediction(self):
|
||||
"""Check if predicting sample by sample gives the same result as
|
||||
predicting all samples at once
|
||||
"""
|
||||
X, y = get_dataset(self._random_state)
|
||||
for kernel in self._kernels:
|
||||
clf = Stree(kernel=kernel, random_state=self._random_state)
|
||||
clf.fit(X, y)
|
||||
# Compute prediction line by line
|
||||
yp_line = np.array([], dtype=int)
|
||||
for xp in X:
|
||||
yp_line = np.append(
|
||||
yp_line, clf.predict(xp.reshape(-1, X.shape[1]))
|
||||
)
|
||||
# Compute prediction at once
|
||||
yp_once = clf.predict(X)
|
||||
self.assertListEqual(yp_line.tolist(), yp_once.tolist())
|
||||
|
||||
def test_iterator_and_str(self):
|
||||
"""Check preorder iterator
|
||||
"""
|
||||
expected = [
|
||||
"root",
|
||||
"root - Down",
|
||||
"root - Down - Down, <cgaf> - Leaf class=1 belief= 0.975989 counts"
|
||||
"=(array([0, 1]), array([ 17, 691]))",
|
||||
"root - Down - Up",
|
||||
"root - Down - Up - Down, <cgaf> - Leaf class=1 belief= 0.750000 "
|
||||
"counts=(array([0, 1]), array([1, 3]))",
|
||||
"root - Down - Up - Up, <pure> - Leaf class=0 belief= 1.000000 "
|
||||
"counts=(array([0]), array([7]))",
|
||||
"root - Up, <cgaf> - Leaf class=0 belief= 0.928297 counts=(array("
|
||||
"[0, 1]), array([725, 56]))",
|
||||
]
|
||||
computed = []
|
||||
expected_string = ""
|
||||
clf = Stree(kernel="linear", random_state=self._random_state)
|
||||
clf.fit(*get_dataset(self._random_state))
|
||||
for node in clf:
|
||||
computed.append(str(node))
|
||||
expected_string += str(node) + "\n"
|
||||
self.assertListEqual(expected, computed)
|
||||
self.assertEqual(expected_string, str(clf))
|
||||
|
||||
def test_is_a_sklearn_classifier(self):
|
||||
import warnings
|
||||
from sklearn.exceptions import ConvergenceWarning
|
||||
|
||||
warnings.filterwarnings("ignore", category=ConvergenceWarning)
|
||||
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
||||
from sklearn.utils.estimator_checks import check_estimator
|
||||
|
||||
check_estimator(Stree())
|
||||
|
||||
def test_exception_if_C_is_negative(self):
|
||||
tclf = Stree(C=-1)
|
||||
with self.assertRaises(ValueError):
|
||||
tclf.fit(*get_dataset(self._random_state))
|
||||
|
||||
def test_exception_if_bogus_split_criteria(self):
|
||||
tclf = Stree(split_criteria="duck")
|
||||
with self.assertRaises(ValueError):
|
||||
tclf.fit(*get_dataset(self._random_state))
|
||||
|
||||
def test_check_max_depth_is_positive_or_None(self):
|
||||
tcl = Stree()
|
||||
self.assertIsNone(tcl.max_depth)
|
||||
tcl = Stree(max_depth=1)
|
||||
self.assertGreaterEqual(1, tcl.max_depth)
|
||||
with self.assertRaises(ValueError):
|
||||
tcl = Stree(max_depth=-1)
|
||||
tcl.fit(*get_dataset(self._random_state))
|
||||
|
||||
def test_check_max_depth(self):
|
||||
depths = (3, 4)
|
||||
for depth in depths:
|
||||
tcl = Stree(random_state=self._random_state, max_depth=depth)
|
||||
tcl.fit(*get_dataset(self._random_state))
|
||||
self.assertEqual(depth, tcl.depth_)
|
||||
|
||||
def test_unfitted_tree_is_iterable(self):
|
||||
tcl = Stree()
|
||||
self.assertEqual(0, len(list(tcl)))
|
||||
|
||||
def test_min_samples_split(self):
|
||||
tcl_split = Stree(min_samples_split=3)
|
||||
tcl_nosplit = Stree(min_samples_split=4)
|
||||
dataset = [[1], [2], [3]], [1, 1, 0]
|
||||
tcl_split.fit(*dataset)
|
||||
self.assertIsNotNone(tcl_split.tree_.get_down())
|
||||
self.assertIsNotNone(tcl_split.tree_.get_up())
|
||||
tcl_nosplit.fit(*dataset)
|
||||
self.assertIsNone(tcl_nosplit.tree_.get_down())
|
||||
self.assertIsNone(tcl_nosplit.tree_.get_up())
|
||||
|
||||
def test_simple_muticlass_dataset(self):
|
||||
for kernel in self._kernels:
|
||||
clf = Stree(
|
||||
kernel=kernel,
|
||||
split_criteria="max_samples",
|
||||
random_state=self._random_state,
|
||||
)
|
||||
px = [[1, 2], [5, 6], [9, 10]]
|
||||
py = [0, 1, 2]
|
||||
clf.fit(px, py)
|
||||
self.assertEqual(1.0, clf.score(px, py))
|
||||
self.assertListEqual(py, clf.predict(px).tolist())
|
||||
self.assertListEqual(py, clf.classes_.tolist())
|
||||
|
||||
def test_muticlass_dataset(self):
|
||||
datasets = {
|
||||
"Synt": get_dataset(random_state=self._random_state, n_classes=3),
|
||||
"Iris": load_iris(return_X_y=True),
|
||||
}
|
||||
outcomes = {
|
||||
"Synt": {
|
||||
"max_samples linear": 0.9533333333333334,
|
||||
"max_samples rbf": 0.836,
|
||||
"max_samples poly": 0.9473333333333334,
|
||||
"min_distance linear": 0.9533333333333334,
|
||||
"min_distance rbf": 0.836,
|
||||
"min_distance poly": 0.9473333333333334,
|
||||
},
|
||||
"Iris": {
|
||||
"max_samples linear": 0.98,
|
||||
"max_samples rbf": 1.0,
|
||||
"max_samples poly": 1.0,
|
||||
"min_distance linear": 0.98,
|
||||
"min_distance rbf": 1.0,
|
||||
"min_distance poly": 1.0,
|
||||
},
|
||||
}
|
||||
for name, dataset in datasets.items():
|
||||
px, py = dataset
|
||||
for criteria in ["max_samples", "min_distance"]:
|
||||
for kernel in self._kernels:
|
||||
clf = Stree(
|
||||
C=1e4,
|
||||
max_iter=1e4,
|
||||
kernel=kernel,
|
||||
random_state=self._random_state,
|
||||
)
|
||||
clf.fit(px, py)
|
||||
outcome = outcomes[name][f"{criteria} {kernel}"]
|
||||
self.assertAlmostEqual(outcome, clf.score(px, py))
|
||||
|
||||
|
||||
class Snode_test(unittest.TestCase):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._random_state = 1
|
||||
self._clf = Stree(random_state=self._random_state)
|
||||
self._clf.fit(*get_dataset(self._random_state))
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def setUp(cls):
|
||||
os.environ["TESTING"] = "1"
|
||||
|
||||
def test_attributes_in_leaves(self):
|
||||
"""Check if the attributes in leaves have correct values so they form a
|
||||
predictor
|
||||
"""
|
||||
|
||||
def check_leave(node: Snode):
|
||||
if not node.is_leaf():
|
||||
check_leave(node.get_down())
|
||||
check_leave(node.get_up())
|
||||
return
|
||||
# Check Belief in leave
|
||||
classes, card = np.unique(node._y, return_counts=True)
|
||||
max_card = max(card)
|
||||
min_card = min(card)
|
||||
if len(classes) > 1:
|
||||
try:
|
||||
belief = max_card / (max_card + min_card)
|
||||
except ZeroDivisionError:
|
||||
belief = 0.0
|
||||
else:
|
||||
belief = 1
|
||||
self.assertEqual(belief, node._belief)
|
||||
# Check Class
|
||||
class_computed = classes[card == max_card]
|
||||
self.assertEqual(class_computed, node._class)
|
||||
|
||||
check_leave(self._clf.tree_)
|
||||
|
||||
def test_nodes_coefs(self):
|
||||
"""Check if the nodes of the tree have the right attributes filled
|
||||
"""
|
||||
|
||||
def run_tree(node: Snode):
|
||||
if node._belief < 1:
|
||||
# only exclude pure leaves
|
||||
self.assertIsNotNone(node._clf)
|
||||
self.assertIsNotNone(node._clf.coef_)
|
||||
if node.is_leaf():
|
||||
return
|
||||
run_tree(node.get_down())
|
||||
run_tree(node.get_up())
|
||||
|
||||
run_tree(self._clf.tree_)
|
||||
|
||||
def test_make_predictor_on_leaf(self):
|
||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], "test")
|
||||
test.make_predictor()
|
||||
self.assertEqual(1, test._class)
|
||||
self.assertEqual(0.75, test._belief)
|
||||
|
||||
def test_make_predictor_on_not_leaf(self):
|
||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], "test")
|
||||
test.set_up(Snode(None, [1], [1], "another_test"))
|
||||
test.make_predictor()
|
||||
self.assertIsNone(test._class)
|
||||
self.assertEqual(0, test._belief)
|
||||
|
||||
def test_make_predictor_on_leaf_bogus_data(self):
|
||||
test = Snode(None, [1, 2, 3, 4], [], "test")
|
||||
test.make_predictor()
|
||||
self.assertIsNone(test._class)
|
@@ -1,9 +1,5 @@
|
||||
from .Strees_test import Stree_test, Snode_test
|
||||
from .Strees_grapher_test import Stree_grapher_test, Snode_graph_test
|
||||
from .Stree_test import Stree_test
|
||||
from .Snode_test import Snode_test
|
||||
from .Splitter_test import Splitter_test
|
||||
|
||||
__all__ = [
|
||||
"Stree_test",
|
||||
"Snode_test",
|
||||
"Stree_grapher_test",
|
||||
"Snode_graph_test",
|
||||
]
|
||||
__all__ = ["Stree_test", "Snode_test", "Splitter_test"]
|
||||
|
17
stree/tests/utils.py
Normal file
17
stree/tests/utils.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from sklearn.datasets import make_classification
|
||||
|
||||
|
||||
def load_dataset(random_state=0, n_classes=2, n_features=3, n_samples=1500):
|
||||
X, y = make_classification(
|
||||
n_samples=n_samples,
|
||||
n_features=n_features,
|
||||
n_informative=3,
|
||||
n_redundant=0,
|
||||
n_repeated=0,
|
||||
n_classes=n_classes,
|
||||
n_clusters_per_class=2,
|
||||
class_sep=1.5,
|
||||
flip_y=0,
|
||||
random_state=random_state,
|
||||
)
|
||||
return X, y
|
Reference in New Issue
Block a user