mirror of
https://github.com/Doctorado-ML/STree.git
synced 2025-08-17 16:36:01 +00:00
Compare commits
22 Commits
complete-s
...
package_do
Author | SHA1 | Date | |
---|---|---|---|
|
4370433d4d | ||
|
02de394c96 | ||
8fe5fdff2b
|
|||
881777c38c
|
|||
3af7864278
|
|||
|
a4aac9d310 | ||
a2df31628d
|
|||
fec094a75f
|
|||
045e2fd446
|
|||
2d6921f9a5
|
|||
9eb06a9169
|
|||
951f1cfaa7
|
|||
|
8a18c998df | ||
b55f59a3ec
|
|||
783d105099
|
|||
c36f685263
|
|||
0f89b044f1
|
|||
|
6ba973dfe1 | ||
|
460c63a6d0 | ||
|
f438124057 | ||
|
147dad684c | ||
|
3bdac9bd60 |
56
.github/workflows/codeql-analysis.yml
vendored
Normal file
56
.github/workflows/codeql-analysis.yml
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ master ]
|
||||
schedule:
|
||||
- cron: '16 17 * * 3'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'python' ]
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||
# Learn more:
|
||||
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -133,3 +133,4 @@ dmypy.json
|
||||
.pre-commit-config.yaml
|
||||
|
||||
**.csv
|
||||
.virtual_documents
|
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2020 Doctorado-ML
|
||||
Copyright (c) 2020-2021, Ricardo Montañana Gómez
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
39
Makefile
Normal file
39
Makefile
Normal file
@@ -0,0 +1,39 @@
|
||||
SHELL := /bin/bash
|
||||
.DEFAULT_GOAL := help
|
||||
.PHONY: coverage deps help lint push test
|
||||
|
||||
coverage: ## Run tests with coverage
|
||||
coverage erase
|
||||
coverage run -m unittest -v stree.tests
|
||||
coverage report -m
|
||||
|
||||
deps: ## Install dependencies
|
||||
pip install -r requirements.txt
|
||||
|
||||
lint: ## Lint and static-check
|
||||
black stree
|
||||
flake8 stree
|
||||
mypy stree
|
||||
|
||||
push: ## Push code with tags
|
||||
git push && git push --tags
|
||||
|
||||
test: ## Run tests
|
||||
python -m unittest -v stree.tests
|
||||
|
||||
help: ## Show help message
|
||||
@IFS=$$'\n' ; \
|
||||
help_lines=(`fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##/:/'`); \
|
||||
printf "%s\n\n" "Usage: make [task]"; \
|
||||
printf "%-20s %s\n" "task" "help" ; \
|
||||
printf "%-20s %s\n" "------" "----" ; \
|
||||
for help_line in $${help_lines[@]}; do \
|
||||
IFS=$$':' ; \
|
||||
help_split=($$help_line) ; \
|
||||
help_command=`echo $${help_split[0]} | sed -e 's/^ *//' -e 's/ *$$//'` ; \
|
||||
help_info=`echo $${help_split[2]} | sed -e 's/^ *//' -e 's/ *$$//'` ; \
|
||||
printf '\033[36m'; \
|
||||
printf "%-20s %s" $$help_command ; \
|
||||
printf '\033[0m'; \
|
||||
printf "%s\n" $$help_info; \
|
||||
done
|
47
README.md
47
README.md
@@ -14,30 +14,59 @@ Oblique Tree classifier based on SVM nodes. The nodes are built and splitted wit
|
||||
pip install git+https://github.com/doctorado-ml/stree
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
Can be found in
|
||||
|
||||
## Examples
|
||||
|
||||
### Jupyter notebooks
|
||||
|
||||
- [](https://mybinder.org/v2/gh/Doctorado-ML/STree/master?urlpath=lab/tree/notebooks/benchmark.ipynb) Benchmark
|
||||
|
||||
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/benchmark.ipynb) Benchmark
|
||||
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/benchmark.ipynb) Benchmark
|
||||
|
||||
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/features.ipynb) Test features
|
||||
|
||||
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/adaboost.ipynb) Adaboost
|
||||
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/features.ipynb) Some features
|
||||
|
||||
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/gridsearch.ipynb) Gridsearch
|
||||
|
||||
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/test_graphs.ipynb) Test Graphics
|
||||
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/ensemble.ipynb) Ensembles
|
||||
|
||||
### Command line
|
||||
## Hyperparameters
|
||||
|
||||
```bash
|
||||
python main.py
|
||||
```
|
||||
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
|
||||
| --- | ------------------ | ------------------------------------------------------ | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
|
||||
| \* | kernel | {"linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of ‘linear’, ‘poly’ or ‘rbf’. |
|
||||
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
|
||||
| \* | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls |
|
||||
| | max_depth | \<int\> | None | Specifies the maximum depth of the tree |
|
||||
| \* | tol | \<float\> | 1e-4 | Tolerance for stopping criterion. |
|
||||
| \* | degree | \<int\> | 3 | Degree of the polynomial kernel function (‘poly’). Ignored by all other kernels. |
|
||||
| \* | gamma | {"scale", "auto"} or \<float\> | scale | Kernel coefficient for ‘rbf’ and ‘poly’.<br>if gamma='scale' (default) is passed then it uses 1 / (n_features \* X.var()) as value of gamma,<br>if ‘auto’, uses 1 / n_features. |
|
||||
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\* |
|
||||
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
|
||||
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
|
||||
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
|
||||
| | splitter | {"best", "random"} | random | The strategy used to choose the feature set at each node (only used if max_features != num_features). <br>Supported strategies are “best” to choose the best feature set and “random” to choose a random combination. <br>The algorithm generates 5 candidates at most to choose from in both strategies. |
|
||||
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
|
||||
|
||||
\* Hyperparameter used by the support vector classifier of every node
|
||||
|
||||
\*\* **Splitting in a STree node**
|
||||
|
||||
The decision function is applied to the dataset and distances from samples to hyperplanes are computed in a matrix. This matrix has as many columns as classes the samples belongs to (if more than two, i.e. multiclass classification) or 1 column if it's a binary class dataset. In binary classification only one hyperplane is computed and therefore only one column is needed to store the distances of the samples to it. If three or more classes are present in the dataset we need as many hyperplanes as classes are there, and therefore one column per hyperplane is needed.
|
||||
|
||||
In case of multiclass classification we have to decide which column take into account to make the split, that depends on hyperparameter _split_criteria_, if "impurity" is chosen then STree computes information gain of every split candidate using each column and chooses the one that maximize the information gain, otherwise STree choses the column with more samples with a predicted class (the column with more positive numbers in it).
|
||||
|
||||
Once we have the column to take into account for the split, the algorithm splits samples with positive distances to hyperplane from the rest.
|
||||
|
||||
## Tests
|
||||
|
||||
```bash
|
||||
python -m unittest -v stree.tests
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
STree is [MIT](https://github.com/doctorado-ml/stree/blob/master/LICENSE) licensed
|
||||
|
@@ -1,8 +1,8 @@
|
||||
overage:
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: 90%
|
||||
target: 100%
|
||||
comment:
|
||||
layout: "reach, diff, flags, files"
|
||||
behavior: default
|
||||
|
20
docs/Makefile
Normal file
20
docs/Makefile
Normal file
@@ -0,0 +1,20 @@
|
||||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line, and also
|
||||
# from the environment for the first two.
|
||||
SPHINXOPTS ?=
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
SOURCEDIR = source
|
||||
BUILDDIR = build
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
.PHONY: help Makefile
|
||||
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
3
docs/requirements.txt
Normal file
3
docs/requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
sphinx
|
||||
sphinx-rtd-theme
|
||||
myst-parser
|
9
docs/source/api/Siterator.rst
Normal file
9
docs/source/api/Siterator.rst
Normal file
@@ -0,0 +1,9 @@
|
||||
Siterator
|
||||
=========
|
||||
|
||||
.. automodule:: stree
|
||||
.. autoclass:: Siterator
|
||||
:members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
:show-inheritance:
|
9
docs/source/api/Snode.rst
Normal file
9
docs/source/api/Snode.rst
Normal file
@@ -0,0 +1,9 @@
|
||||
Snode
|
||||
=====
|
||||
|
||||
.. automodule:: stree
|
||||
.. autoclass:: Snode
|
||||
:members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
:show-inheritance:
|
9
docs/source/api/Splitter.rst
Normal file
9
docs/source/api/Splitter.rst
Normal file
@@ -0,0 +1,9 @@
|
||||
Splitter
|
||||
========
|
||||
|
||||
.. automodule:: stree
|
||||
.. autoclass:: Splitter
|
||||
:members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
:show-inheritance:
|
9
docs/source/api/Stree.rst
Normal file
9
docs/source/api/Stree.rst
Normal file
@@ -0,0 +1,9 @@
|
||||
Stree
|
||||
=====
|
||||
|
||||
.. automodule:: stree
|
||||
.. autoclass:: Stree
|
||||
:members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
:show-inheritance:
|
11
docs/source/api/index.rst
Normal file
11
docs/source/api/index.rst
Normal file
@@ -0,0 +1,11 @@
|
||||
API index
|
||||
=========
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Contents:
|
||||
|
||||
Stree
|
||||
Splitter
|
||||
Snode
|
||||
Siterator
|
55
docs/source/conf.py
Normal file
55
docs/source/conf.py
Normal file
@@ -0,0 +1,55 @@
|
||||
# Configuration file for the Sphinx documentation builder.
|
||||
#
|
||||
# This file only contains a selection of the most common options. For a full
|
||||
# list see the documentation:
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
||||
|
||||
# -- Path setup --------------------------------------------------------------
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.abspath("../../stree/"))
|
||||
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
|
||||
project = "STree"
|
||||
copyright = "2020 - 2021, Ricardo Montañana Gómez"
|
||||
author = "Ricardo Montañana Gómez"
|
||||
|
||||
# The full version, including alpha/beta/rc tags
|
||||
release = "1.0"
|
||||
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = ["myst_parser", "sphinx.ext.autodoc", "sphinx.ext.viewcode"]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ["_templates"]
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
# This pattern also affects html_static_path and html_extra_path.
|
||||
exclude_patterns = []
|
||||
|
||||
|
||||
# -- Options for HTML output -------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
#
|
||||
html_theme = "sphinx_rtd_theme"
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ["_static"]
|
44
docs/source/example.md
Normal file
44
docs/source/example.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# Examples
|
||||
|
||||
## Notebooks
|
||||
|
||||
- [](https://mybinder.org/v2/gh/Doctorado-ML/STree/master?urlpath=lab/tree/notebooks/benchmark.ipynb) Benchmark
|
||||
|
||||
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/benchmark.ipynb) Benchmark
|
||||
|
||||
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/features.ipynb) Some features
|
||||
|
||||
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/gridsearch.ipynb) Gridsearch
|
||||
|
||||
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/ensemble.ipynb) Ensembles
|
||||
|
||||
## Sample Code
|
||||
|
||||
```python
|
||||
import time
|
||||
from sklearn.model_selection import train_test_split
|
||||
from sklearn.datasets import load_iris
|
||||
from stree import Stree
|
||||
|
||||
random_state = 1
|
||||
X, y = load_iris(return_X_y=True)
|
||||
Xtrain, Xtest, ytrain, ytest = train_test_split(
|
||||
X, y, test_size=0.2, random_state=random_state
|
||||
)
|
||||
now = time.time()
|
||||
print("Predicting with max_features=sqrt(n_features)")
|
||||
clf = Stree(random_state=random_state, max_features="auto")
|
||||
clf.fit(Xtrain, ytrain)
|
||||
print(f"Took {time.time() - now:.2f} seconds to train")
|
||||
print(clf)
|
||||
print(f"Classifier's accuracy (train): {clf.score(Xtrain, ytrain):.4f}")
|
||||
print(f"Classifier's accuracy (test) : {clf.score(Xtest, ytest):.4f}")
|
||||
print("=" * 40)
|
||||
print("Predicting with max_features=n_features")
|
||||
clf = Stree(random_state=random_state)
|
||||
clf.fit(Xtrain, ytrain)
|
||||
print(f"Took {time.time() - now:.2f} seconds to train")
|
||||
print(clf)
|
||||
print(f"Classifier's accuracy (train): {clf.score(Xtrain, ytrain):.4f}")
|
||||
print(f"Classifier's accuracy (test) : {clf.score(Xtest, ytest):.4f}")
|
||||
```
|
BIN
docs/source/example.png
Normal file
BIN
docs/source/example.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 3.1 MiB |
28
docs/source/hyperparameters.md
Normal file
28
docs/source/hyperparameters.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# Hyperparameters
|
||||
|
||||
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
|
||||
| --- | ------------------ | ------------------------------------------------------ | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
|
||||
| \* | kernel | {"linear", "poly", "rbf"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of ‘linear’, ‘poly’ or ‘rbf’. |
|
||||
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
|
||||
| \* | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls |
|
||||
| | max_depth | \<int\> | None | Specifies the maximum depth of the tree |
|
||||
| \* | tol | \<float\> | 1e-4 | Tolerance for stopping criterion. |
|
||||
| \* | degree | \<int\> | 3 | Degree of the polynomial kernel function (‘poly’). Ignored by all other kernels. |
|
||||
| \* | gamma | {"scale", "auto"} or \<float\> | scale | Kernel coefficient for ‘rbf’ and ‘poly’.<br>if gamma='scale' (default) is passed then it uses 1 / (n_features \* X.var()) as value of gamma,<br>if ‘auto’, uses 1 / n_features. |
|
||||
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\* |
|
||||
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
|
||||
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
|
||||
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
|
||||
| | splitter | {"best", "random"} | random | The strategy used to choose the feature set at each node (only used if max_features != num_features). <br>Supported strategies are “best” to choose the best feature set and “random” to choose a random combination. <br>The algorithm generates 5 candidates at most to choose from in both strategies. |
|
||||
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
|
||||
|
||||
\* Hyperparameter used by the support vector classifier of every node
|
||||
|
||||
\*\* **Splitting in a STree node**
|
||||
|
||||
The decision function is applied to the dataset and distances from samples to hyperplanes are computed in a matrix. This matrix has as many columns as classes the samples belongs to (if more than two, i.e. multiclass classification) or 1 column if it's a binary class dataset. In binary classification only one hyperplane is computed and therefore only one column is needed to store the distances of the samples to it. If three or more classes are present in the dataset we need as many hyperplanes as classes are there, and therefore one column per hyperplane is needed.
|
||||
|
||||
In case of multiclass classification we have to decide which column take into account to make the split, that depends on hyperparameter _split_criteria_, if "impurity" is chosen then STree computes information gain of every split candidate using each column and chooses the one that maximize the information gain, otherwise STree choses the column with more samples with a predicted class (the column with more positive numbers in it).
|
||||
|
||||
Once we have the column to take into account for the split, the algorithm splits samples with positive distances to hyperplane from the rest.
|
15
docs/source/index.rst
Normal file
15
docs/source/index.rst
Normal file
@@ -0,0 +1,15 @@
|
||||
Welcome to STree's documentation!
|
||||
=================================
|
||||
|
||||
.. toctree::
|
||||
:caption: Contents:
|
||||
:titlesonly:
|
||||
|
||||
|
||||
stree
|
||||
install
|
||||
hyperparameters
|
||||
example
|
||||
api/index
|
||||
|
||||
* :ref:`genindex`
|
16
docs/source/install.rst
Normal file
16
docs/source/install.rst
Normal file
@@ -0,0 +1,16 @@
|
||||
Install
|
||||
=======
|
||||
|
||||
The main stable release
|
||||
|
||||
``pip install stree``
|
||||
|
||||
or the last development branch
|
||||
|
||||
``pip install git+https://github.com/doctorado-ml/stree``
|
||||
|
||||
Tests
|
||||
*****
|
||||
|
||||
|
||||
``python -m unittest -v stree.tests``
|
13
docs/source/stree.md
Normal file
13
docs/source/stree.md
Normal file
@@ -0,0 +1,13 @@
|
||||
# Stree
|
||||
|
||||
[](https://app.codeship.com/projects/399170)
|
||||
[](https://codecov.io/gh/doctorado-ml/stree)
|
||||
[](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
|
||||
|
||||
Oblique Tree classifier based on SVM nodes. The nodes are built and splitted with sklearn SVC models. Stree is a sklearn estimator and can be integrated in pipelines, grid searches, etc.
|
||||
|
||||

|
||||
|
||||
## License
|
||||
|
||||
STree is [MIT](https://github.com/doctorado-ml/stree/blob/master/LICENSE) licensed
|
29
main.py
29
main.py
@@ -1,29 +0,0 @@
|
||||
import time
|
||||
from sklearn.model_selection import train_test_split
|
||||
from sklearn.datasets import load_iris
|
||||
from stree import Stree
|
||||
|
||||
random_state = 1
|
||||
|
||||
X, y = load_iris(return_X_y=True)
|
||||
|
||||
Xtrain, Xtest, ytrain, ytest = train_test_split(
|
||||
X, y, test_size=0.2, random_state=random_state
|
||||
)
|
||||
|
||||
now = time.time()
|
||||
print("Predicting with max_features=sqrt(n_features)")
|
||||
clf = Stree(C=0.01, random_state=random_state, max_features="auto")
|
||||
clf.fit(Xtrain, ytrain)
|
||||
print(f"Took {time.time() - now:.2f} seconds to train")
|
||||
print(clf)
|
||||
print(f"Classifier's accuracy (train): {clf.score(Xtrain, ytrain):.4f}")
|
||||
print(f"Classifier's accuracy (test) : {clf.score(Xtest, ytest):.4f}")
|
||||
print("=" * 40)
|
||||
print("Predicting with max_features=n_features")
|
||||
clf = Stree(C=0.01, random_state=random_state)
|
||||
clf.fit(Xtrain, ytrain)
|
||||
print(f"Took {time.time() - now:.2f} seconds to train")
|
||||
print(clf)
|
||||
print(f"Classifier's accuracy (train): {clf.score(Xtrain, ytrain):.4f}")
|
||||
print(f"Classifier's accuracy (test) : {clf.score(Xtest, ytest):.4f}")
|
@@ -17,39 +17,42 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#\n",
|
||||
"# Google Colab setup\n",
|
||||
"#\n",
|
||||
"#!pip install git+https://github.com/doctorado-ml/stree"
|
||||
"#!pip install git+https://github.com/doctorado-ml/stree\n",
|
||||
"!pip install pandas"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import datetime, time\n",
|
||||
"import os\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"from sklearn import tree\n",
|
||||
"from sklearn.metrics import classification_report, confusion_matrix, f1_score\n",
|
||||
"from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, BaggingClassifier\n",
|
||||
"from sklearn.tree import DecisionTreeClassifier\n",
|
||||
"from sklearn.naive_bayes import GaussianNB\n",
|
||||
"from sklearn.neural_network import MLPClassifier\n",
|
||||
"from sklearn.svm import LinearSVC\n",
|
||||
"from stree import Stree"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"if not os.path.isfile('data/creditcard.csv'):\n",
|
||||
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
||||
" !tar xzf creditcard.tgz"
|
||||
@@ -64,19 +67,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"2020-11-01 11:14:06\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(datetime.date.today(), time.strftime(\"%H:%M:%S\"))"
|
||||
]
|
||||
@@ -90,7 +85,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -102,19 +97,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Fraud: 0.173% 492\nValid: 99.827% 284,315\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
||||
"print(\"Valid: {0:.3f}% {1:,}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))"
|
||||
@@ -122,7 +109,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -134,19 +121,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"X shape: (284807, 29)\ny shape: (284807,)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Remove unneeded features\n",
|
||||
"y = df.Class.values\n",
|
||||
@@ -163,7 +142,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -174,27 +153,27 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Linear Tree\n",
|
||||
"linear_tree = tree.DecisionTreeClassifier(random_state=random_state)"
|
||||
"linear_tree = DecisionTreeClassifier(random_state=random_state)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Random Forest\n",
|
||||
"random_forest = RandomForestClassifier(random_state=random_state)"
|
||||
"# Naive Bayes\n",
|
||||
"naive_bayes = GaussianNB()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -204,22 +183,22 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# AdaBoost\n",
|
||||
"adaboost = AdaBoostClassifier(random_state=random_state)"
|
||||
"# Neural Network\n",
|
||||
"mlp = MLPClassifier(random_state=random_state, alpha=1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Bagging\n",
|
||||
"bagging = BaggingClassifier(random_state=random_state)"
|
||||
"# SVC (linear)\n",
|
||||
"svc = LinearSVC(random_state=random_state, C=.01, max_iter=1e3)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -231,7 +210,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -256,163 +235,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"************************** Linear Tree **********************\n",
|
||||
"Train Model Linear Tree took: 15.14 seconds\n",
|
||||
"=========== Linear Tree - Train 199,364 samples =============\n",
|
||||
" precision recall f1-score support\n",
|
||||
"\n",
|
||||
" 0 1.000000 1.000000 1.000000 199020\n",
|
||||
" 1 1.000000 1.000000 1.000000 344\n",
|
||||
"\n",
|
||||
" accuracy 1.000000 199364\n",
|
||||
" macro avg 1.000000 1.000000 1.000000 199364\n",
|
||||
"weighted avg 1.000000 1.000000 1.000000 199364\n",
|
||||
"\n",
|
||||
"=========== Linear Tree - Test 85,443 samples =============\n",
|
||||
" precision recall f1-score support\n",
|
||||
"\n",
|
||||
" 0 0.999578 0.999613 0.999596 85295\n",
|
||||
" 1 0.772414 0.756757 0.764505 148\n",
|
||||
"\n",
|
||||
" accuracy 0.999192 85443\n",
|
||||
" macro avg 0.885996 0.878185 0.882050 85443\n",
|
||||
"weighted avg 0.999184 0.999192 0.999188 85443\n",
|
||||
"\n",
|
||||
"Confusion Matrix in Train\n",
|
||||
"[[199020 0]\n",
|
||||
" [ 0 344]]\n",
|
||||
"Confusion Matrix in Test\n",
|
||||
"[[85262 33]\n",
|
||||
" [ 36 112]]\n",
|
||||
"************************** Random Forest **********************\n",
|
||||
"Train Model Random Forest took: 181.1 seconds\n",
|
||||
"=========== Random Forest - Train 199,364 samples =============\n",
|
||||
" precision recall f1-score support\n",
|
||||
"\n",
|
||||
" 0 1.000000 1.000000 1.000000 199020\n",
|
||||
" 1 1.000000 1.000000 1.000000 344\n",
|
||||
"\n",
|
||||
" accuracy 1.000000 199364\n",
|
||||
" macro avg 1.000000 1.000000 1.000000 199364\n",
|
||||
"weighted avg 1.000000 1.000000 1.000000 199364\n",
|
||||
"\n",
|
||||
"=========== Random Forest - Test 85,443 samples =============\n",
|
||||
" precision recall f1-score support\n",
|
||||
"\n",
|
||||
" 0 0.999660 0.999965 0.999812 85295\n",
|
||||
" 1 0.975410 0.804054 0.881481 148\n",
|
||||
"\n",
|
||||
" accuracy 0.999625 85443\n",
|
||||
" macro avg 0.987535 0.902009 0.940647 85443\n",
|
||||
"weighted avg 0.999618 0.999625 0.999607 85443\n",
|
||||
"\n",
|
||||
"Confusion Matrix in Train\n",
|
||||
"[[199020 0]\n",
|
||||
" [ 0 344]]\n",
|
||||
"Confusion Matrix in Test\n",
|
||||
"[[85292 3]\n",
|
||||
" [ 29 119]]\n",
|
||||
"************************** Stree (SVM Tree) **********************\n",
|
||||
"Train Model Stree (SVM Tree) took: 36.6 seconds\n",
|
||||
"=========== Stree (SVM Tree) - Train 199,364 samples =============\n",
|
||||
" precision recall f1-score support\n",
|
||||
"\n",
|
||||
" 0 0.999623 0.999864 0.999744 199020\n",
|
||||
" 1 0.908784 0.781977 0.840625 344\n",
|
||||
"\n",
|
||||
" accuracy 0.999488 199364\n",
|
||||
" macro avg 0.954204 0.890921 0.920184 199364\n",
|
||||
"weighted avg 0.999467 0.999488 0.999469 199364\n",
|
||||
"\n",
|
||||
"=========== Stree (SVM Tree) - Test 85,443 samples =============\n",
|
||||
" precision recall f1-score support\n",
|
||||
"\n",
|
||||
" 0 0.999637 0.999918 0.999777 85295\n",
|
||||
" 1 0.943548 0.790541 0.860294 148\n",
|
||||
"\n",
|
||||
" accuracy 0.999555 85443\n",
|
||||
" macro avg 0.971593 0.895229 0.930036 85443\n",
|
||||
"weighted avg 0.999540 0.999555 0.999536 85443\n",
|
||||
"\n",
|
||||
"Confusion Matrix in Train\n",
|
||||
"[[198993 27]\n",
|
||||
" [ 75 269]]\n",
|
||||
"Confusion Matrix in Test\n",
|
||||
"[[85288 7]\n",
|
||||
" [ 31 117]]\n",
|
||||
"************************** AdaBoost model **********************\n",
|
||||
"Train Model AdaBoost model took: 46.14 seconds\n",
|
||||
"=========== AdaBoost model - Train 199,364 samples =============\n",
|
||||
" precision recall f1-score support\n",
|
||||
"\n",
|
||||
" 0 0.999392 0.999678 0.999535 199020\n",
|
||||
" 1 0.777003 0.648256 0.706815 344\n",
|
||||
"\n",
|
||||
" accuracy 0.999072 199364\n",
|
||||
" macro avg 0.888198 0.823967 0.853175 199364\n",
|
||||
"weighted avg 0.999008 0.999072 0.999030 199364\n",
|
||||
"\n",
|
||||
"=========== AdaBoost model - Test 85,443 samples =============\n",
|
||||
" precision recall f1-score support\n",
|
||||
"\n",
|
||||
" 0 0.999484 0.999707 0.999596 85295\n",
|
||||
" 1 0.806202 0.702703 0.750903 148\n",
|
||||
"\n",
|
||||
" accuracy 0.999192 85443\n",
|
||||
" macro avg 0.902843 0.851205 0.875249 85443\n",
|
||||
"weighted avg 0.999149 0.999192 0.999165 85443\n",
|
||||
"\n",
|
||||
"Confusion Matrix in Train\n",
|
||||
"[[198956 64]\n",
|
||||
" [ 121 223]]\n",
|
||||
"Confusion Matrix in Test\n",
|
||||
"[[85270 25]\n",
|
||||
" [ 44 104]]\n",
|
||||
"************************** Bagging model **********************\n",
|
||||
"Train Model Bagging model took: 77.73 seconds\n",
|
||||
"=========== Bagging model - Train 199,364 samples =============\n",
|
||||
" precision recall f1-score support\n",
|
||||
"\n",
|
||||
" 0 0.999864 1.000000 0.999932 199020\n",
|
||||
" 1 1.000000 0.921512 0.959153 344\n",
|
||||
"\n",
|
||||
" accuracy 0.999865 199364\n",
|
||||
" macro avg 0.999932 0.960756 0.979542 199364\n",
|
||||
"weighted avg 0.999865 0.999865 0.999862 199364\n",
|
||||
"\n",
|
||||
"=========== Bagging model - Test 85,443 samples =============\n",
|
||||
" precision recall f1-score support\n",
|
||||
"\n",
|
||||
" 0 0.999637 0.999953 0.999795 85295\n",
|
||||
" 1 0.966942 0.790541 0.869888 148\n",
|
||||
"\n",
|
||||
" accuracy 0.999590 85443\n",
|
||||
" macro avg 0.983289 0.895247 0.934842 85443\n",
|
||||
"weighted avg 0.999580 0.999590 0.999570 85443\n",
|
||||
"\n",
|
||||
"Confusion Matrix in Train\n",
|
||||
"[[199020 0]\n",
|
||||
" [ 27 317]]\n",
|
||||
"Confusion Matrix in Test\n",
|
||||
"[[85291 4]\n",
|
||||
" [ 31 117]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Train & Test models\n",
|
||||
"models = {\n",
|
||||
" 'Linear Tree':linear_tree, 'Random Forest': random_forest, 'Stree (SVM Tree)': stree, \n",
|
||||
" 'AdaBoost model': adaboost, 'Bagging model': bagging\n",
|
||||
" 'Linear Tree':linear_tree, 'Naive Bayes': naive_bayes, 'Stree ': stree, \n",
|
||||
" 'Neural Network': mlp, 'SVC (linear)': svc\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"best_f1 = 0\n",
|
||||
@@ -428,19 +260,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"**************************************************************************************************************\n*The best f1 model is Random Forest, with a f1 score: 0.8815 in 181.07 seconds with 0.7 samples in train dataset\n**************************************************************************************************************\nModel: Linear Tree\t Time: 15.14 seconds\t f1: 0.7645\nModel: Random Forest\t Time: 181.07 seconds\t f1: 0.8815\nModel: Stree (SVM Tree)\t Time: 36.60 seconds\t f1: 0.8603\nModel: AdaBoost model\t Time: 46.14 seconds\t f1: 0.7509\nModel: Bagging model\t Time: 77.73 seconds\t f1: 0.8699\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"*\"*110)\n",
|
||||
"print(f\"*The best f1 model is {best_model}, with a f1 score: {best_f1:.4} in {best_time:.6} seconds with {train_size:,} samples in train dataset\")\n",
|
||||
@@ -454,61 +278,20 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**************************************************************************************************************\n",
|
||||
"*The best f1 model is Random Forest, with a f1 score: 0.8815 in 152.54 seconds with 0.7 samples in train dataset\n",
|
||||
"*The best f1 model is Stree (SVM Tree), with a f1 score: 0.8603 in 28.4743 seconds with 0.7 samples in train dataset\n",
|
||||
"**************************************************************************************************************\n",
|
||||
"Model: Linear Tree\t Time: 13.52 seconds\t f1: 0.7645\n",
|
||||
"Model: Random Forest\t Time: 152.54 seconds\t f1: 0.8815\n",
|
||||
"Model: Stree (SVM Tree)\t Time: 32.55 seconds\t f1: 0.8603\n",
|
||||
"Model: AdaBoost model\t Time: 47.34 seconds\t f1: 0.7509\n",
|
||||
"Model: Gradient Boost.\t Time: 244.12 seconds\t f1: 0.5259"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```\n",
|
||||
"******************************************************************************************************************\n",
|
||||
"*The best f1 model is Random Forest, with a f1 score: 0.8815 in 218.966 seconds with 0.7 samples in train dataset\n",
|
||||
"******************************************************************************************************************\n",
|
||||
"Model: Linear Tree Time: 23.05 seconds\t f1: 0.7645\n",
|
||||
"Model: Random Forest\t Time: 218.97 seconds\t f1: 0.8815\n",
|
||||
"Model: Stree (SVM Tree)\t Time: 49.45 seconds\t f1: 0.8603\n",
|
||||
"Model: AdaBoost model\t Time: 73.83 seconds\t f1: 0.7509\n",
|
||||
"Model: Neural Network\t Time: 25.47 seconds\t f1: 0.8328\n",
|
||||
"Model: Bagging model\t Time: 77.93 seconds\t f1: 0.8699\n",
|
||||
"\n",
|
||||
"```"
|
||||
"Model: Linear Tree\t Time: 10.25 seconds\t f1: 0.7645\n",
|
||||
"Model: Naive Bayes\t Time: 0.10 seconds\t f1: 0.1154\n",
|
||||
"Model: Stree (SVM Tree)\t Time: 28.47 seconds\t f1: 0.8603\n",
|
||||
"Model: Neural Network\t Time: 9.76 seconds\t f1: 0.7381\n",
|
||||
"Model: SVC (linear)\t Time: 8.21 seconds\t f1: 0.739"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "execute_result",
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'C': 0.01,\n",
|
||||
" 'criterion': 'entropy',\n",
|
||||
" 'degree': 3,\n",
|
||||
" 'gamma': 'scale',\n",
|
||||
" 'kernel': 'linear',\n",
|
||||
" 'max_depth': None,\n",
|
||||
" 'max_features': None,\n",
|
||||
" 'max_iter': 1000.0,\n",
|
||||
" 'min_samples_split': 0,\n",
|
||||
" 'random_state': 2020,\n",
|
||||
" 'split_criteria': 'impurity',\n",
|
||||
" 'splitter': 'random',\n",
|
||||
" 'tol': 0.0001}"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"execution_count": 18
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"stree.get_params()"
|
||||
]
|
||||
@@ -517,9 +300,9 @@
|
||||
"metadata": {
|
||||
"hide_input": false,
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.8.4 64-bit ('general': venv)",
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python38464bitgeneralvenv77203c0a6afd4428bd66253ef62753dc"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -531,7 +314,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.4-final"
|
||||
"version": "3.8.2-final"
|
||||
},
|
||||
"toc": {
|
||||
"base_numbering": 1,
|
||||
|
@@ -17,35 +17,43 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#\n",
|
||||
"# Google Colab setup\n",
|
||||
"#\n",
|
||||
"#!pip install git+https://github.com/doctorado-ml/stree"
|
||||
"#!pip install git+https://github.com/doctorado-ml/stree\n",
|
||||
"!pip install pandas"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import time\n",
|
||||
"import os\n",
|
||||
"import random\n",
|
||||
"import warnings\n",
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"from sklearn.ensemble import AdaBoostClassifier, BaggingClassifier\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"from stree import Stree"
|
||||
"from sklearn.exceptions import ConvergenceWarning\n",
|
||||
"from stree import Stree\n",
|
||||
"\n",
|
||||
"warnings.filterwarnings(\"ignore\", category=ConvergenceWarning)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"if not os.path.isfile('data/creditcard.csv'):\n",
|
||||
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
||||
" !tar xzf creditcard.tgz"
|
||||
@@ -53,30 +61,15 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Fraud: 0.173% 492\n",
|
||||
"Valid: 99.827% 284315\n",
|
||||
"X.shape (100492, 28) y.shape (100492,)\n",
|
||||
"Fraud: 0.652% 655\n",
|
||||
"Valid: 99.348% 99837\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"random_state=1\n",
|
||||
"\n",
|
||||
"def load_creditcard(n_examples=0):\n",
|
||||
" import pandas as pd\n",
|
||||
" import numpy as np\n",
|
||||
" import random\n",
|
||||
" df = pd.read_csv('data/creditcard.csv')\n",
|
||||
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
||||
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
||||
@@ -127,19 +120,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Score Train: 0.9985073353804162\nScore Test: 0.9983746848878864\nTook 35.80 seconds\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"now = time.time()\n",
|
||||
"clf = Stree(max_depth=3, random_state=random_state, max_iter=1e3)\n",
|
||||
@@ -158,7 +143,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -169,21 +154,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Kernel: linear\tTime: 49.66 seconds\tScore Train: 0.9983225\tScore Test: 0.9983083\n",
|
||||
"Kernel: rbf\tTime: 12.73 seconds\tScore Train: 0.9934891\tScore Test: 0.9934656\n",
|
||||
"Kernel: poly\tTime: 76.24 seconds\tScore Train: 0.9972706\tScore Test: 0.9969152\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for kernel in ['linear', 'rbf', 'poly']:\n",
|
||||
" now = time.time()\n",
|
||||
@@ -203,7 +178,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -214,21 +189,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Kernel: linear\tTime: 231.51 seconds\tScore Train: 0.9984931\tScore Test: 0.9983083\n",
|
||||
"Kernel: rbf\tTime: 114.77 seconds\tScore Train: 0.9992323\tScore Test: 0.9983083\n",
|
||||
"Kernel: poly\tTime: 67.87 seconds\tScore Train: 0.9993319\tScore Test: 0.9985074\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for kernel in ['linear', 'rbf', 'poly']:\n",
|
||||
" now = time.time()\n",
|
||||
@@ -241,6 +206,11 @@
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
@@ -251,14 +221,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.4-final"
|
||||
},
|
||||
"orig_nbformat": 2,
|
||||
"kernelspec": {
|
||||
"name": "python38464bitgeneralf6de308d3831407c8bd68d4a5e328a38",
|
||||
"display_name": "Python 3.8.4 64-bit ('general')"
|
||||
"version": "3.8.2-final"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
@@ -17,22 +17,27 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#\n",
|
||||
"# Google Colab setup\n",
|
||||
"#\n",
|
||||
"#!pip install git+https://github.com/doctorado-ml/stree"
|
||||
"#!pip install git+https://github.com/doctorado-ml/stree\n",
|
||||
"!pip install pandas"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import time\n",
|
||||
"import random\n",
|
||||
"import warnings\n",
|
||||
"import os\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
"from sklearn.svm import SVC\n",
|
||||
@@ -40,19 +45,20 @@
|
||||
"from sklearn.utils.estimator_checks import check_estimator\n",
|
||||
"from sklearn.datasets import make_classification, load_iris, load_wine\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"from sklearn.utils.class_weight import compute_sample_weight\n",
|
||||
"from sklearn.exceptions import ConvergenceWarning\n",
|
||||
"from stree import Stree\n",
|
||||
"import time"
|
||||
"warnings.filterwarnings(\"ignore\", category=ConvergenceWarning)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"if not os.path.isfile('data/creditcard.csv'):\n",
|
||||
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
||||
" !tar xzf creditcard.tgz"
|
||||
@@ -60,26 +66,15 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Fraud: 0.173% 492\nValid: 99.827% 284315\nX.shape (5492, 28) y.shape (5492,)\nFraud: 9.141% 502\nValid: 90.859% 4990\n[0.09183143 0.09183143 0.09183143 0.09183143] [0.09041262 0.09041262 0.09041262 0.09041262]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"random_state=1\n",
|
||||
"\n",
|
||||
"def load_creditcard(n_examples=0):\n",
|
||||
" import pandas as pd\n",
|
||||
" import numpy as np\n",
|
||||
" import random\n",
|
||||
" df = pd.read_csv('data/creditcard.csv')\n",
|
||||
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
||||
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
||||
@@ -111,17 +106,8 @@
|
||||
"Xtest = data[1]\n",
|
||||
"ytrain = data[2]\n",
|
||||
"ytest = data[3]\n",
|
||||
"_, data = np.unique(ytrain, return_counts=True)\n",
|
||||
"wtrain = (data[1] / np.sum(data), data[0] / np.sum(data))\n",
|
||||
"_, data = np.unique(ytest, return_counts=True)\n",
|
||||
"wtest = (data[1] / np.sum(data), data[0] / np.sum(data))\n",
|
||||
"# Set weights inverse to its count class in dataset\n",
|
||||
"weights = np.ones(Xtrain.shape[0],)\n",
|
||||
"weights[ytrain==0] = wtrain[0]\n",
|
||||
"weights[ytrain==1] = wtrain[1]\n",
|
||||
"weights_test = np.ones(Xtest.shape[0],)\n",
|
||||
"weights_test[ytest==0] = wtest[0]\n",
|
||||
"weights_test[ytest==1] = wtest[1]\n",
|
||||
"weights = compute_sample_weight(\"balanced\", ytrain)\n",
|
||||
"weights_test = compute_sample_weight(\"balanced\", ytest)\n",
|
||||
"print(weights[:4], weights_test[:4])"
|
||||
]
|
||||
},
|
||||
@@ -142,22 +128,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Accuracy of Train without weights 0.9851716961498439\n",
|
||||
"Accuracy of Train with weights 0.986732570239334\n",
|
||||
"Accuracy of Tests without weights 0.9866504854368932\n",
|
||||
"Accuracy of Tests with weights 0.9781553398058253\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"C = 23\n",
|
||||
"print(\"Accuracy of Train without weights\", Stree(C=C, random_state=1).fit(Xtrain, ytrain).score(Xtrain, ytrain))\n",
|
||||
@@ -176,21 +151,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Time: 26.03s\tKernel: linear\tAccuracy_train: 0.9851716961498439\tAccuracy_test: 0.9866504854368932\n",
|
||||
"Time: 0.54s\tKernel: rbf\tAccuracy_train: 0.9947970863683663\tAccuracy_test: 0.9878640776699029\n",
|
||||
"Time: 0.43s\tKernel: poly\tAccuracy_train: 0.9960978147762747\tAccuracy_test: 0.9854368932038835\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"random_state=1\n",
|
||||
"for kernel in ['linear', 'rbf', 'poly']:\n",
|
||||
@@ -211,69 +176,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"************** C=0.001 ****************************\n",
|
||||
"Classifier's accuracy (train): 0.9828\n",
|
||||
"Classifier's accuracy (test) : 0.9848\n",
|
||||
"root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4426 counts=(array([0, 1]), array([3491, 353]))\n",
|
||||
"root - Down, <cgaf> - Leaf class=0 belief= 0.981716 impurity=0.1317 counts=(array([0, 1]), array([3490, 65]))\n",
|
||||
"root - Up, <cgaf> - Leaf class=1 belief= 0.996540 impurity=0.0333 counts=(array([0, 1]), array([ 1, 288]))\n",
|
||||
"\n",
|
||||
"**************************************************\n",
|
||||
"************** C=0.01 ****************************\n",
|
||||
"Classifier's accuracy (train): 0.9834\n",
|
||||
"Classifier's accuracy (test) : 0.9854\n",
|
||||
"root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4426 counts=(array([0, 1]), array([3491, 353]))\n",
|
||||
"root - Down, <cgaf> - Leaf class=0 belief= 0.982269 impurity=0.1285 counts=(array([0, 1]), array([3490, 63]))\n",
|
||||
"root - Up, <cgaf> - Leaf class=1 belief= 0.996564 impurity=0.0331 counts=(array([0, 1]), array([ 1, 290]))\n",
|
||||
"\n",
|
||||
"**************************************************\n",
|
||||
"************** C=1 ****************************\n",
|
||||
"Classifier's accuracy (train): 0.9847\n",
|
||||
"Classifier's accuracy (test) : 0.9867\n",
|
||||
"root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4426 counts=(array([0, 1]), array([3491, 353]))\n",
|
||||
"root - Down, <cgaf> - Leaf class=0 belief= 0.983371 impurity=0.1221 counts=(array([0, 1]), array([3489, 59]))\n",
|
||||
"root - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0584 counts=(array([0, 1]), array([ 2, 294]))\n",
|
||||
"root - Up - Down, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([2]))\n",
|
||||
"root - Up - Up, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([294]))\n",
|
||||
"\n",
|
||||
"**************************************************\n",
|
||||
"************** C=5 ****************************\n",
|
||||
"Classifier's accuracy (train): 0.9852\n",
|
||||
"Classifier's accuracy (test) : 0.9867\n",
|
||||
"root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4426 counts=(array([0, 1]), array([3491, 353]))\n",
|
||||
"root - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1205 counts=(array([0, 1]), array([3488, 58]))\n",
|
||||
"root - Down - Down, <cgaf> - Leaf class=0 belief= 0.983921 impurity=0.1188 counts=(array([0, 1]), array([3488, 57]))\n",
|
||||
"root - Down - Up, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([1]))\n",
|
||||
"root - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0812 counts=(array([0, 1]), array([ 3, 295]))\n",
|
||||
"root - Up - Down, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([3]))\n",
|
||||
"root - Up - Up, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([295]))\n",
|
||||
"\n",
|
||||
"**************************************************\n",
|
||||
"************** C=17 ****************************\n",
|
||||
"Classifier's accuracy (train): 0.9852\n",
|
||||
"Classifier's accuracy (test) : 0.9867\n",
|
||||
"root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4426 counts=(array([0, 1]), array([3491, 353]))\n",
|
||||
"root - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1205 counts=(array([0, 1]), array([3488, 58]))\n",
|
||||
"root - Down - Down, <cgaf> - Leaf class=0 belief= 0.983921 impurity=0.1188 counts=(array([0, 1]), array([3488, 57]))\n",
|
||||
"root - Down - Up, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([1]))\n",
|
||||
"root - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0812 counts=(array([0, 1]), array([ 3, 295]))\n",
|
||||
"root - Up - Down, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([3]))\n",
|
||||
"root - Up - Up, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([295]))\n",
|
||||
"\n",
|
||||
"**************************************************\n",
|
||||
"64.5792 secs\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"t = time.time()\n",
|
||||
"for C in (.001, .01, 1, 5, 17):\n",
|
||||
@@ -292,24 +199,16 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Test iterator\n",
|
||||
"Check different weays of using the iterator"
|
||||
"Check different ways of using the iterator"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4426 counts=(array([0, 1]), array([3491, 353]))\nroot - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1205 counts=(array([0, 1]), array([3488, 58]))\nroot - Down - Down, <cgaf> - Leaf class=0 belief= 0.983921 impurity=0.1188 counts=(array([0, 1]), array([3488, 57]))\nroot - Down - Up, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([1]))\nroot - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0812 counts=(array([0, 1]), array([ 3, 295]))\nroot - Up - Down, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([3]))\nroot - Up - Up, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([295]))\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#check iterator\n",
|
||||
"for i in list(clf):\n",
|
||||
@@ -318,19 +217,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4426 counts=(array([0, 1]), array([3491, 353]))\nroot - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1205 counts=(array([0, 1]), array([3488, 58]))\nroot - Down - Down, <cgaf> - Leaf class=0 belief= 0.983921 impurity=0.1188 counts=(array([0, 1]), array([3488, 57]))\nroot - Down - Up, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([1]))\nroot - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0812 counts=(array([0, 1]), array([ 3, 295]))\nroot - Up - Down, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([3]))\nroot - Up - Up, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([295]))\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#check iterator again\n",
|
||||
"for i in clf:\n",
|
||||
@@ -346,61 +237,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"1 functools.partial(<function check_no_attributes_set_in_init at 0x125acaee0>, 'Stree')\n",
|
||||
"2 functools.partial(<function check_estimators_dtypes at 0x125ac7040>, 'Stree')\n",
|
||||
"3 functools.partial(<function check_fit_score_takes_y at 0x125ac2ee0>, 'Stree')\n",
|
||||
"4 functools.partial(<function check_sample_weights_pandas_series at 0x125ac0820>, 'Stree')\n",
|
||||
"5 functools.partial(<function check_sample_weights_not_an_array at 0x125ac0940>, 'Stree')\n",
|
||||
"6 functools.partial(<function check_sample_weights_list at 0x125ac0a60>, 'Stree')\n",
|
||||
"7 functools.partial(<function check_sample_weights_shape at 0x125ac0b80>, 'Stree')\n",
|
||||
"8 functools.partial(<function check_sample_weights_invariance at 0x125ac0ca0>, 'Stree')\n",
|
||||
"9 functools.partial(<function check_estimators_fit_returns_self at 0x125aca040>, 'Stree')\n",
|
||||
"10 functools.partial(<function check_estimators_fit_returns_self at 0x125aca040>, 'Stree', readonly_memmap=True)\n",
|
||||
"11 functools.partial(<function check_complex_data at 0x125ac0e50>, 'Stree')\n",
|
||||
"12 functools.partial(<function check_dtype_object at 0x125ac0dc0>, 'Stree')\n",
|
||||
"13 functools.partial(<function check_estimators_empty_data_messages at 0x125ac7160>, 'Stree')\n",
|
||||
"14 functools.partial(<function check_pipeline_consistency at 0x125ac2dc0>, 'Stree')\n",
|
||||
"15 functools.partial(<function check_estimators_nan_inf at 0x125ac7280>, 'Stree')\n",
|
||||
"16 functools.partial(<function check_estimators_overwrite_params at 0x125acadc0>, 'Stree')\n",
|
||||
"17 functools.partial(<function check_estimator_sparse_data at 0x125ac0700>, 'Stree')\n",
|
||||
"18 functools.partial(<function check_estimators_pickle at 0x125ac74c0>, 'Stree')\n",
|
||||
"19 functools.partial(<function check_classifier_data_not_an_array at 0x125acd160>, 'Stree')\n",
|
||||
"20 functools.partial(<function check_classifiers_one_label at 0x125ac7b80>, 'Stree')\n",
|
||||
"21 functools.partial(<function check_classifiers_classes at 0x125aca5e0>, 'Stree')\n",
|
||||
"22 functools.partial(<function check_estimators_partial_fit_n_features at 0x125ac75e0>, 'Stree')\n",
|
||||
"23 functools.partial(<function check_classifiers_train at 0x125ac7ca0>, 'Stree')\n",
|
||||
"24 functools.partial(<function check_classifiers_train at 0x125ac7ca0>, 'Stree', readonly_memmap=True)\n",
|
||||
"25 functools.partial(<function check_classifiers_train at 0x125ac7ca0>, 'Stree', readonly_memmap=True, X_dtype='float32')\n",
|
||||
"26 functools.partial(<function check_classifiers_regression_target at 0x125acdc10>, 'Stree')\n",
|
||||
"27 functools.partial(<function check_supervised_y_no_nan at 0x125aab790>, 'Stree')\n",
|
||||
"28 functools.partial(<function check_supervised_y_2d at 0x125aca280>, 'Stree')\n",
|
||||
"29 functools.partial(<function check_estimators_unfitted at 0x125aca160>, 'Stree')\n",
|
||||
"30 functools.partial(<function check_non_transformer_estimators_n_iter at 0x125acd790>, 'Stree')\n",
|
||||
"31 functools.partial(<function check_decision_proba_consistency at 0x125acdd30>, 'Stree')\n",
|
||||
"32 functools.partial(<function check_fit2d_predict1d at 0x125ac23a0>, 'Stree')\n",
|
||||
"33 functools.partial(<function check_methods_subset_invariance at 0x125ac2550>, 'Stree')\n",
|
||||
"34 functools.partial(<function check_fit2d_1sample at 0x125ac2670>, 'Stree')\n",
|
||||
"35 functools.partial(<function check_fit2d_1feature at 0x125ac2790>, 'Stree')\n",
|
||||
"36 functools.partial(<function check_fit1d at 0x125ac28b0>, 'Stree')\n",
|
||||
"37 functools.partial(<function check_get_params_invariance at 0x125acd9d0>, 'Stree')\n",
|
||||
"38 functools.partial(<function check_set_params at 0x125acdaf0>, 'Stree')\n",
|
||||
"39 functools.partial(<function check_dict_unchanged at 0x125ac0f70>, 'Stree')\n",
|
||||
"40 functools.partial(<function check_dont_overwrite_parameters at 0x125ac2280>, 'Stree')\n",
|
||||
"41 functools.partial(<function check_fit_idempotent at 0x125acdee0>, 'Stree')\n",
|
||||
"42 functools.partial(<function check_n_features_in at 0x125acdf70>, 'Stree')\n",
|
||||
"43 functools.partial(<function check_requires_y_none at 0x125ad1040>, 'Stree')\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Make checks one by one\n",
|
||||
"c = 0\n",
|
||||
@@ -413,7 +254,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -430,30 +271,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"== Not Weighted ===\n",
|
||||
"SVC train score ..: 0.9825702393340271\n",
|
||||
"STree train score : 0.9841311134235172\n",
|
||||
"SVC test score ...: 0.9830097087378641\n",
|
||||
"STree test score .: 0.9848300970873787\n",
|
||||
"==== Weighted =====\n",
|
||||
"SVC train score ..: 0.9786680541103018\n",
|
||||
"STree train score : 0.9802289281997919\n",
|
||||
"SVC test score ...: 0.9805825242718447\n",
|
||||
"STree test score .: 0.9817961165048543\n",
|
||||
"*SVC test score ..: 0.9439939825655582\n",
|
||||
"*STree test score : 0.9476832429673473\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"svc = SVC(C=7, kernel='rbf', gamma=.001, random_state=random_state)\n",
|
||||
"clf = Stree(C=17, kernel='rbf', gamma=.001, random_state=random_state)\n",
|
||||
@@ -477,19 +299,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4426 counts=(array([0, 1]), array([3491, 353]))\nroot - Down, <cgaf> - Leaf class=0 belief= 0.990520 impurity=0.0773 counts=(array([0, 1]), array([3448, 33]))\nroot - Up, <cgaf> - Leaf class=1 belief= 0.881543 impurity=0.5249 counts=(array([0, 1]), array([ 43, 320]))\n\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(clf)"
|
||||
]
|
||||
@@ -503,53 +317,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"****************************************\n",
|
||||
"max_features None = 28\n",
|
||||
"Train score : 0.9846514047866806\n",
|
||||
"Test score .: 0.9866504854368932\n",
|
||||
"Took 10.18 seconds\n",
|
||||
"****************************************\n",
|
||||
"max_features auto = 5\n",
|
||||
"Train score : 0.9836108220603538\n",
|
||||
"Test score .: 0.9842233009708737\n",
|
||||
"Took 5.22 seconds\n",
|
||||
"****************************************\n",
|
||||
"max_features log2 = 4\n",
|
||||
"Train score : 0.9791883454734651\n",
|
||||
"Test score .: 0.9793689320388349\n",
|
||||
"Took 2.05 seconds\n",
|
||||
"****************************************\n",
|
||||
"max_features 7 = 7\n",
|
||||
"Train score : 0.9737252861602498\n",
|
||||
"Test score .: 0.9739077669902912\n",
|
||||
"Took 2.86 seconds\n",
|
||||
"****************************************\n",
|
||||
"max_features 0.5 = 14\n",
|
||||
"Train score : 0.981789802289282\n",
|
||||
"Test score .: 0.9824029126213593\n",
|
||||
"Took 48.35 seconds\n",
|
||||
"****************************************\n",
|
||||
"max_features 0.1 = 2\n",
|
||||
"Train score : 0.9638397502601457\n",
|
||||
"Test score .: 0.9648058252427184\n",
|
||||
"Took 0.35 seconds\n",
|
||||
"****************************************\n",
|
||||
"max_features 0.7 = 19\n",
|
||||
"Train score : 0.9841311134235172\n",
|
||||
"Test score .: 0.9860436893203883\n",
|
||||
"Took 20.89 seconds\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for max_features in [None, \"auto\", \"log2\", 7, .5, .1, .7]:\n",
|
||||
" now = time.time()\n",
|
||||
@@ -565,9 +337,9 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.7.6 64-bit ('general': venv)",
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python37664bitgeneralvenvfbd0a23e74cf4e778460f5ffc6761f39"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -579,9 +351,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.4-final"
|
||||
"version": "3.8.2-final"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
@@ -1,446 +1,253 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Test Gridsearch\n",
|
||||
"with different kernels and different configurations"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Setup\n",
|
||||
"Uncomment the next cell if STree is not already installed"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#\n",
|
||||
"# Google Colab setup\n",
|
||||
"#\n",
|
||||
"#!pip install git+https://github.com/doctorado-ml/stree"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "zIHKVxthDZEa",
|
||||
"colab_type": "code",
|
||||
"colab": {}
|
||||
},
|
||||
"source": [
|
||||
"from sklearn.ensemble import AdaBoostClassifier\n",
|
||||
"from sklearn.svm import LinearSVC\n",
|
||||
"from sklearn.model_selection import GridSearchCV, train_test_split\n",
|
||||
"from stree import Stree"
|
||||
],
|
||||
"execution_count": 2,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "IEmq50QgDZEi",
|
||||
"colab_type": "code",
|
||||
"colab": {}
|
||||
},
|
||||
"source": [
|
||||
"import os\n",
|
||||
"if not os.path.isfile('data/creditcard.csv'):\n",
|
||||
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
||||
" !tar xzf creditcard.tgz"
|
||||
],
|
||||
"execution_count": 3,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "z9Q-YUfBDZEq",
|
||||
"colab_type": "code",
|
||||
"colab": {},
|
||||
"outputId": "afc822fb-f16a-4302-8a67-2b9e2880159b",
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"random_state=1\n",
|
||||
"\n",
|
||||
"def load_creditcard(n_examples=0):\n",
|
||||
" import pandas as pd\n",
|
||||
" import numpy as np\n",
|
||||
" import random\n",
|
||||
" df = pd.read_csv('data/creditcard.csv')\n",
|
||||
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
||||
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
||||
" y = df.Class\n",
|
||||
" X = df.drop(['Class', 'Time', 'Amount'], axis=1).values\n",
|
||||
" if n_examples > 0:\n",
|
||||
" # Take first n_examples samples\n",
|
||||
" X = X[:n_examples, :]\n",
|
||||
" y = y[:n_examples, :]\n",
|
||||
" else:\n",
|
||||
" # Take all the positive samples with a number of random negatives\n",
|
||||
" if n_examples < 0:\n",
|
||||
" Xt = X[(y == 1).ravel()]\n",
|
||||
" yt = y[(y == 1).ravel()]\n",
|
||||
" indices = random.sample(range(X.shape[0]), -1 * n_examples)\n",
|
||||
" X = np.append(Xt, X[indices], axis=0)\n",
|
||||
" y = np.append(yt, y[indices], axis=0)\n",
|
||||
" print(\"X.shape\", X.shape, \" y.shape\", y.shape)\n",
|
||||
" print(\"Fraud: {0:.3f}% {1}\".format(len(y[y == 1])*100/X.shape[0], len(y[y == 1])))\n",
|
||||
" print(\"Valid: {0:.3f}% {1}\".format(len(y[y == 0]) * 100 / X.shape[0], len(y[y == 0])))\n",
|
||||
" Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=0.7, shuffle=True, random_state=random_state, stratify=y)\n",
|
||||
" return Xtrain, Xtest, ytrain, ytest\n",
|
||||
"\n",
|
||||
"data = load_creditcard(-1000) # Take all true samples + 1000 of the others\n",
|
||||
"# data = load_creditcard(5000) # Take the first 5000 samples\n",
|
||||
"# data = load_creditcard(0) # Take all the samples\n",
|
||||
"\n",
|
||||
"Xtrain = data[0]\n",
|
||||
"Xtest = data[1]\n",
|
||||
"ytrain = data[2]\n",
|
||||
"ytest = data[3]"
|
||||
],
|
||||
"execution_count": 4,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Fraud: 0.173% 492\nValid: 99.827% 284315\nX.shape (1492, 28) y.shape (1492,)\nFraud: 33.177% 495\nValid: 66.823% 997\n"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tests"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "HmX3kR4PDZEw",
|
||||
"colab_type": "code",
|
||||
"colab": {}
|
||||
},
|
||||
"source": [
|
||||
"parameters = [{\n",
|
||||
" 'base_estimator': [Stree()],\n",
|
||||
" 'n_estimators': [10, 25],\n",
|
||||
" 'learning_rate': [.5, 1],\n",
|
||||
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||
" 'base_estimator__tol': [.1, 1e-02],\n",
|
||||
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
||||
" 'base_estimator__C': [1, 7, 55],\n",
|
||||
" 'base_estimator__kernel': ['linear']\n",
|
||||
"},\n",
|
||||
"{\n",
|
||||
" 'base_estimator': [Stree()],\n",
|
||||
" 'n_estimators': [10, 25],\n",
|
||||
" 'learning_rate': [.5, 1],\n",
|
||||
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||
" 'base_estimator__tol': [.1, 1e-02],\n",
|
||||
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
||||
" 'base_estimator__C': [1, 7, 55],\n",
|
||||
" 'base_estimator__degree': [3, 5, 7],\n",
|
||||
" 'base_estimator__kernel': ['poly']\n",
|
||||
"},\n",
|
||||
"{\n",
|
||||
" 'base_estimator': [Stree()],\n",
|
||||
" 'n_estimators': [10, 25],\n",
|
||||
" 'learning_rate': [.5, 1],\n",
|
||||
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||
" 'base_estimator__tol': [.1, 1e-02],\n",
|
||||
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
||||
" 'base_estimator__C': [1, 7, 55],\n",
|
||||
" 'base_estimator__gamma': [.1, 1, 10],\n",
|
||||
" 'base_estimator__kernel': ['rbf']\n",
|
||||
"}]"
|
||||
],
|
||||
"execution_count": 5,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "execute_result",
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'C': 1.0,\n",
|
||||
" 'criterion': 'entropy',\n",
|
||||
" 'degree': 3,\n",
|
||||
" 'gamma': 'scale',\n",
|
||||
" 'kernel': 'linear',\n",
|
||||
" 'max_depth': None,\n",
|
||||
" 'max_features': None,\n",
|
||||
" 'max_iter': 100000.0,\n",
|
||||
" 'min_samples_split': 0,\n",
|
||||
" 'random_state': None,\n",
|
||||
" 'split_criteria': 'impurity',\n",
|
||||
" 'splitter': 'random',\n",
|
||||
" 'tol': 0.0001}"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"execution_count": 6
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"Stree().get_params()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "CrcB8o6EDZE5",
|
||||
"colab_type": "code",
|
||||
"colab": {},
|
||||
"outputId": "7703413a-d563-4289-a13b-532f38f82762",
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"random_state=2020\n",
|
||||
"clf = AdaBoostClassifier(random_state=random_state, algorithm=\"SAMME\")\n",
|
||||
"grid = GridSearchCV(clf, parameters, verbose=10, n_jobs=-1, return_train_score=True)\n",
|
||||
"grid.fit(Xtrain, ytrain)"
|
||||
],
|
||||
"execution_count": 7,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Fitting 5 folds for each of 1008 candidates, totalling 5040 fits\n",
|
||||
"[Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers.\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 2 tasks | elapsed: 2.6s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 9 tasks | elapsed: 3.2s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 3.5s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 25 tasks | elapsed: 4.0s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 34 tasks | elapsed: 4.5s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 45 tasks | elapsed: 5.0s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 56 tasks | elapsed: 5.5s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 69 tasks | elapsed: 6.2s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 82 tasks | elapsed: 7.1s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 97 tasks | elapsed: 8.2s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 112 tasks | elapsed: 9.6s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 129 tasks | elapsed: 11.0s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 146 tasks | elapsed: 12.5s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 165 tasks | elapsed: 14.3s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 184 tasks | elapsed: 16.0s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 205 tasks | elapsed: 18.1s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 226 tasks | elapsed: 20.1s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 249 tasks | elapsed: 21.9s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 272 tasks | elapsed: 23.4s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 297 tasks | elapsed: 24.9s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 322 tasks | elapsed: 26.6s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 349 tasks | elapsed: 29.3s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 376 tasks | elapsed: 31.9s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 405 tasks | elapsed: 35.5s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 434 tasks | elapsed: 38.7s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 465 tasks | elapsed: 42.1s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 496 tasks | elapsed: 46.1s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 529 tasks | elapsed: 52.7s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 562 tasks | elapsed: 58.1s\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 597 tasks | elapsed: 1.1min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 632 tasks | elapsed: 1.3min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 669 tasks | elapsed: 1.5min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 706 tasks | elapsed: 1.6min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 745 tasks | elapsed: 1.7min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 784 tasks | elapsed: 1.8min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 825 tasks | elapsed: 1.8min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 866 tasks | elapsed: 1.8min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 909 tasks | elapsed: 1.9min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 952 tasks | elapsed: 1.9min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 997 tasks | elapsed: 2.0min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 1042 tasks | elapsed: 2.0min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 1089 tasks | elapsed: 2.1min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 1136 tasks | elapsed: 2.2min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 1185 tasks | elapsed: 2.2min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 1234 tasks | elapsed: 2.3min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 1285 tasks | elapsed: 2.4min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 1336 tasks | elapsed: 2.4min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 1389 tasks | elapsed: 2.5min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 1442 tasks | elapsed: 2.6min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 1497 tasks | elapsed: 2.6min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 1552 tasks | elapsed: 2.7min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 1609 tasks | elapsed: 2.8min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 1666 tasks | elapsed: 2.8min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 1725 tasks | elapsed: 2.9min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 1784 tasks | elapsed: 3.0min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 1845 tasks | elapsed: 3.0min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 1906 tasks | elapsed: 3.1min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 1969 tasks | elapsed: 3.2min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 2032 tasks | elapsed: 3.3min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 2097 tasks | elapsed: 3.3min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 2162 tasks | elapsed: 3.4min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 2229 tasks | elapsed: 3.5min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 2296 tasks | elapsed: 3.6min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 2365 tasks | elapsed: 3.6min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 2434 tasks | elapsed: 3.7min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 2505 tasks | elapsed: 3.8min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 2576 tasks | elapsed: 3.8min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 2649 tasks | elapsed: 3.9min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 2722 tasks | elapsed: 4.0min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 2797 tasks | elapsed: 4.1min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 2872 tasks | elapsed: 4.2min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 2949 tasks | elapsed: 4.3min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 3026 tasks | elapsed: 4.5min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 3105 tasks | elapsed: 4.7min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 3184 tasks | elapsed: 4.9min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 3265 tasks | elapsed: 5.0min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 3346 tasks | elapsed: 5.2min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 3429 tasks | elapsed: 5.4min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 3512 tasks | elapsed: 5.6min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 3597 tasks | elapsed: 5.9min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 3682 tasks | elapsed: 6.1min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 3769 tasks | elapsed: 6.3min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 3856 tasks | elapsed: 6.6min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 3945 tasks | elapsed: 6.9min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 4034 tasks | elapsed: 7.1min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 4125 tasks | elapsed: 7.4min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 4216 tasks | elapsed: 7.6min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 4309 tasks | elapsed: 7.8min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 4402 tasks | elapsed: 8.1min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 4497 tasks | elapsed: 8.5min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 4592 tasks | elapsed: 8.8min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 4689 tasks | elapsed: 9.0min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 4786 tasks | elapsed: 9.3min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 4885 tasks | elapsed: 9.6min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 4984 tasks | elapsed: 9.8min\n",
|
||||
"[Parallel(n_jobs=-1)]: Done 5040 out of 5040 | elapsed: 10.0min finished\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"output_type": "execute_result",
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"GridSearchCV(estimator=AdaBoostClassifier(algorithm='SAMME', random_state=2020),\n",
|
||||
" n_jobs=-1,\n",
|
||||
" param_grid=[{'base_estimator': [Stree(C=7, max_depth=5,\n",
|
||||
" split_criteria='max_samples',\n",
|
||||
" tol=0.01)],\n",
|
||||
" 'base_estimator__C': [1, 7, 55],\n",
|
||||
" 'base_estimator__kernel': ['linear'],\n",
|
||||
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
||||
" 'base_estimator__split_criteria': ['max_samples',\n",
|
||||
" 'impurity'],\n",
|
||||
" 'base_e...\n",
|
||||
" 'learning_rate': [0.5, 1], 'n_estimators': [10, 25]},\n",
|
||||
" {'base_estimator': [Stree()],\n",
|
||||
" 'base_estimator__C': [1, 7, 55],\n",
|
||||
" 'base_estimator__gamma': [0.1, 1, 10],\n",
|
||||
" 'base_estimator__kernel': ['rbf'],\n",
|
||||
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
||||
" 'base_estimator__split_criteria': ['max_samples',\n",
|
||||
" 'impurity'],\n",
|
||||
" 'base_estimator__tol': [0.1, 0.01],\n",
|
||||
" 'learning_rate': [0.5, 1],\n",
|
||||
" 'n_estimators': [10, 25]}],\n",
|
||||
" return_train_score=True, verbose=10)"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"execution_count": 7
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"source": [
|
||||
"GridSearchCV(estimator=AdaBoostClassifier(algorithm='SAMME', random_state=2020),\n",
|
||||
" n_jobs=-1,\n",
|
||||
" param_grid={'base_estimator': [Stree(C=55, max_depth=3, tol=0.01)],\n",
|
||||
" 'base_estimator__C': [7, 55],\n",
|
||||
" 'base_estimator__kernel': ['linear', 'poly', 'rbf'],\n",
|
||||
" 'base_estimator__max_depth': [3, 5],\n",
|
||||
" 'base_estimator__tol': [0.1, 0.01],\n",
|
||||
" 'learning_rate': [0.5, 1], 'n_estimators': [10, 25]},\n",
|
||||
" return_train_score=True, verbose=10)"
|
||||
],
|
||||
"cell_type": "markdown",
|
||||
"metadata": {}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "ZjX88NoYDZE8",
|
||||
"colab_type": "code",
|
||||
"colab": {},
|
||||
"outputId": "285163c8-fa33-4915-8ae7-61c4f7844344",
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"print(\"Best estimator: \", grid.best_estimator_)\n",
|
||||
"print(\"Best hyperparameters: \", grid.best_params_)\n",
|
||||
"print(\"Best accuracy: \", grid.best_score_)"
|
||||
],
|
||||
"execution_count": 8,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Best estimator: AdaBoostClassifier(algorithm='SAMME',\n base_estimator=Stree(C=7, max_depth=5,\n split_criteria='max_samples',\n tol=0.01),\n learning_rate=0.5, n_estimators=25, random_state=2020)\nBest hyperparameters: {'base_estimator': Stree(C=7, max_depth=5, split_criteria='max_samples', tol=0.01), 'base_estimator__C': 7, 'base_estimator__kernel': 'linear', 'base_estimator__max_depth': 5, 'base_estimator__split_criteria': 'max_samples', 'base_estimator__tol': 0.01, 'learning_rate': 0.5, 'n_estimators': 25}\nBest accuracy: 0.9549825174825175\n"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"source": [
|
||||
"Best estimator: AdaBoostClassifier(algorithm='SAMME',\n",
|
||||
" base_estimator=Stree(C=55, max_depth=3, tol=0.01),\n",
|
||||
" learning_rate=0.5, n_estimators=25, random_state=2020)\n",
|
||||
"\n",
|
||||
"Best hyperparameters: {'base_estimator': Stree(C=55, max_depth=3, tol=0.01), 'base_estimator__C': 55, 'base_estimator__kernel': 'linear', 'base_estimator__max_depth': 3, 'base_estimator__tol': 0.01, 'learning_rate': 0.5, 'n_estimators': 25}\n",
|
||||
"\n",
|
||||
"Best accuracy: 0.9559440559440558"
|
||||
],
|
||||
"cell_type": "markdown",
|
||||
"metadata": {}
|
||||
},
|
||||
{
|
||||
"source": [
|
||||
"0.9511547662863451"
|
||||
],
|
||||
"cell_type": "markdown",
|
||||
"metadata": {}
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.4-final"
|
||||
},
|
||||
"orig_nbformat": 2,
|
||||
"kernelspec": {
|
||||
"name": "python38464bitgeneralvenv77203c0a6afd4428bd66253ef62753dc",
|
||||
"display_name": "Python 3.8.4 64-bit ('general': venv)"
|
||||
},
|
||||
"colab": {
|
||||
"name": "gridsearch.ipynb",
|
||||
"provenance": []
|
||||
}
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Test Gridsearch\n",
|
||||
"with different kernels and different configurations"
|
||||
]
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Setup\n",
|
||||
"Uncomment the next cell if STree is not already installed"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#\n",
|
||||
"# Google Colab setup\n",
|
||||
"#\n",
|
||||
"#!pip install git+https://github.com/doctorado-ml/stree\n",
|
||||
"!pip install pandas"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {},
|
||||
"colab_type": "code",
|
||||
"id": "zIHKVxthDZEa"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import random\n",
|
||||
"import os\n",
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"from sklearn.ensemble import AdaBoostClassifier\n",
|
||||
"from sklearn.svm import LinearSVC\n",
|
||||
"from sklearn.model_selection import GridSearchCV, train_test_split\n",
|
||||
"from stree import Stree"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {},
|
||||
"colab_type": "code",
|
||||
"id": "IEmq50QgDZEi"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if not os.path.isfile('data/creditcard.csv'):\n",
|
||||
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
||||
" !tar xzf creditcard.tgz"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {},
|
||||
"colab_type": "code",
|
||||
"id": "z9Q-YUfBDZEq",
|
||||
"outputId": "afc822fb-f16a-4302-8a67-2b9e2880159b",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"random_state=1\n",
|
||||
"\n",
|
||||
"def load_creditcard(n_examples=0):\n",
|
||||
" df = pd.read_csv('data/creditcard.csv')\n",
|
||||
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
||||
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
||||
" y = df.Class\n",
|
||||
" X = df.drop(['Class', 'Time', 'Amount'], axis=1).values\n",
|
||||
" if n_examples > 0:\n",
|
||||
" # Take first n_examples samples\n",
|
||||
" X = X[:n_examples, :]\n",
|
||||
" y = y[:n_examples, :]\n",
|
||||
" else:\n",
|
||||
" # Take all the positive samples with a number of random negatives\n",
|
||||
" if n_examples < 0:\n",
|
||||
" Xt = X[(y == 1).ravel()]\n",
|
||||
" yt = y[(y == 1).ravel()]\n",
|
||||
" indices = random.sample(range(X.shape[0]), -1 * n_examples)\n",
|
||||
" X = np.append(Xt, X[indices], axis=0)\n",
|
||||
" y = np.append(yt, y[indices], axis=0)\n",
|
||||
" print(\"X.shape\", X.shape, \" y.shape\", y.shape)\n",
|
||||
" print(\"Fraud: {0:.3f}% {1}\".format(len(y[y == 1])*100/X.shape[0], len(y[y == 1])))\n",
|
||||
" print(\"Valid: {0:.3f}% {1}\".format(len(y[y == 0]) * 100 / X.shape[0], len(y[y == 0])))\n",
|
||||
" Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=0.7, shuffle=True, random_state=random_state, stratify=y)\n",
|
||||
" return Xtrain, Xtest, ytrain, ytest\n",
|
||||
"\n",
|
||||
"data = load_creditcard(-1000) # Take all true samples + 1000 of the others\n",
|
||||
"# data = load_creditcard(5000) # Take the first 5000 samples\n",
|
||||
"# data = load_creditcard(0) # Take all the samples\n",
|
||||
"\n",
|
||||
"Xtrain = data[0]\n",
|
||||
"Xtest = data[1]\n",
|
||||
"ytrain = data[2]\n",
|
||||
"ytest = data[3]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tests"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {},
|
||||
"colab_type": "code",
|
||||
"id": "HmX3kR4PDZEw"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"parameters = [{\n",
|
||||
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
||||
" 'n_estimators': [10, 25],\n",
|
||||
" 'learning_rate': [.5, 1],\n",
|
||||
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||
" 'base_estimator__tol': [.1, 1e-02],\n",
|
||||
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
||||
" 'base_estimator__C': [1, 7, 55],\n",
|
||||
" 'base_estimator__kernel': ['linear']\n",
|
||||
"},\n",
|
||||
"{\n",
|
||||
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
||||
" 'n_estimators': [10, 25],\n",
|
||||
" 'learning_rate': [.5, 1],\n",
|
||||
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||
" 'base_estimator__tol': [.1, 1e-02],\n",
|
||||
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
||||
" 'base_estimator__C': [1, 7, 55],\n",
|
||||
" 'base_estimator__degree': [3, 5, 7],\n",
|
||||
" 'base_estimator__kernel': ['poly']\n",
|
||||
"},\n",
|
||||
"{\n",
|
||||
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
||||
" 'n_estimators': [10, 25],\n",
|
||||
" 'learning_rate': [.5, 1],\n",
|
||||
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||
" 'base_estimator__tol': [.1, 1e-02],\n",
|
||||
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
||||
" 'base_estimator__C': [1, 7, 55],\n",
|
||||
" 'base_estimator__gamma': [.1, 1, 10],\n",
|
||||
" 'base_estimator__kernel': ['rbf']\n",
|
||||
"}]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"Stree().get_params()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {},
|
||||
"colab_type": "code",
|
||||
"id": "CrcB8o6EDZE5",
|
||||
"outputId": "7703413a-d563-4289-a13b-532f38f82762",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"clf = AdaBoostClassifier(random_state=random_state, algorithm=\"SAMME\")\n",
|
||||
"grid = GridSearchCV(clf, parameters, verbose=5, n_jobs=-1, return_train_score=True)\n",
|
||||
"grid.fit(Xtrain, ytrain)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {},
|
||||
"colab_type": "code",
|
||||
"id": "ZjX88NoYDZE8",
|
||||
"outputId": "285163c8-fa33-4915-8ae7-61c4f7844344",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"Best estimator: \", grid.best_estimator_)\n",
|
||||
"print(\"Best hyperparameters: \", grid.best_params_)\n",
|
||||
"print(\"Best accuracy: \", grid.best_score_)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Best estimator: AdaBoostClassifier(algorithm='SAMME',\n",
|
||||
" base_estimator=Stree(C=55, max_depth=7, random_state=1,\n",
|
||||
" split_criteria='max_samples', tol=0.1),\n",
|
||||
" learning_rate=0.5, n_estimators=25, random_state=1)\n",
|
||||
"Best hyperparameters: {'base_estimator': Stree(C=55, max_depth=7, random_state=1, split_criteria='max_samples', tol=0.1), 'base_estimator__C': 55, 'base_estimator__kernel': 'linear', 'base_estimator__max_depth': 7, 'base_estimator__split_criteria': 'max_samples', 'base_estimator__tol': 0.1, 'learning_rate': 0.5, 'n_estimators': 25}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Best accuracy: 0.9511777695988222"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"name": "gridsearch.ipynb",
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.2-final"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
@@ -1,4 +1 @@
|
||||
numpy
|
||||
scikit-learn==0.23.2
|
||||
pandas
|
||||
ipympl
|
||||
scikit-learn>0.24
|
1
runtime.txt
Normal file
1
runtime.txt
Normal file
@@ -0,0 +1 @@
|
||||
python-3.8
|
21
setup.py
21
setup.py
@@ -1,7 +1,5 @@
|
||||
import setuptools
|
||||
|
||||
__version__ = "0.9rc6"
|
||||
__author__ = "Ricardo Montañana Gómez"
|
||||
import stree
|
||||
|
||||
|
||||
def readme():
|
||||
@@ -9,28 +7,29 @@ def readme():
|
||||
return f.read()
|
||||
|
||||
|
||||
VERSION = stree.__version__
|
||||
setuptools.setup(
|
||||
name="STree",
|
||||
version=__version__,
|
||||
license="MIT License",
|
||||
version=stree.__version__,
|
||||
license=stree.__license__,
|
||||
description="Oblique decision tree with svm nodes",
|
||||
long_description=readme(),
|
||||
long_description_content_type="text/markdown",
|
||||
packages=setuptools.find_packages(),
|
||||
url="https://github.com/doctorado-ml/stree",
|
||||
author=__author__,
|
||||
author_email="ricardo.montanana@alu.uclm.es",
|
||||
url=stree.__url__,
|
||||
author=stree.__author__,
|
||||
author_email=stree.__author_email__,
|
||||
keywords="scikit-learn oblique-classifier oblique-decision-tree decision-\
|
||||
tree svm svc",
|
||||
classifiers=[
|
||||
"Development Status :: 4 - Beta",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"License :: OSI Approved :: " + stree.__license__,
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Natural Language :: English",
|
||||
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
||||
"Intended Audience :: Science/Research",
|
||||
],
|
||||
install_requires=["scikit-learn==0.23.2", "numpy", "ipympl"],
|
||||
install_requires=["scikit-learn", "numpy", "ipympl"],
|
||||
test_suite="stree.tests",
|
||||
zip_safe=False,
|
||||
)
|
||||
|
516
stree/Strees.py
516
stree/Strees.py
@@ -1,9 +1,5 @@
|
||||
"""
|
||||
__author__ = "Ricardo Montañana Gómez"
|
||||
__copyright__ = "Copyright 2020, Ricardo Montañana Gómez"
|
||||
__license__ = "MIT"
|
||||
__version__ = "0.9"
|
||||
Build an oblique tree classifier based on SVM Trees
|
||||
Oblique decision tree classifier based on SVM nodes
|
||||
"""
|
||||
|
||||
import os
|
||||
@@ -15,7 +11,8 @@ from typing import Optional
|
||||
import numpy as np
|
||||
from sklearn.base import BaseEstimator, ClassifierMixin
|
||||
from sklearn.svm import SVC, LinearSVC
|
||||
from sklearn.utils import check_consistent_length
|
||||
from sklearn.feature_selection import SelectKBest
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
from sklearn.utils.multiclass import check_classification_targets
|
||||
from sklearn.exceptions import ConvergenceWarning
|
||||
from sklearn.utils.validation import (
|
||||
@@ -24,7 +21,6 @@ from sklearn.utils.validation import (
|
||||
check_is_fitted,
|
||||
_check_sample_weight,
|
||||
)
|
||||
from sklearn.metrics._classification import _weighted_sum, _check_targets
|
||||
|
||||
|
||||
class Snode:
|
||||
@@ -41,6 +37,7 @@ class Snode:
|
||||
impurity: float,
|
||||
title: str,
|
||||
weight: np.ndarray = None,
|
||||
scaler: StandardScaler = None,
|
||||
):
|
||||
self._clf = clf
|
||||
self._title = title
|
||||
@@ -58,6 +55,7 @@ class Snode:
|
||||
self._features = features
|
||||
self._impurity = impurity
|
||||
self._partition_column: int = -1
|
||||
self._scaler = scaler
|
||||
|
||||
@classmethod
|
||||
def copy(cls, node: "Snode") -> "Snode":
|
||||
@@ -68,6 +66,8 @@ class Snode:
|
||||
node._features,
|
||||
node._impurity,
|
||||
node._title,
|
||||
node._sample_weight,
|
||||
node._scaler,
|
||||
)
|
||||
|
||||
def set_partition_column(self, col: int):
|
||||
@@ -79,6 +79,30 @@ class Snode:
|
||||
def set_down(self, son):
|
||||
self._down = son
|
||||
|
||||
def set_title(self, title):
|
||||
self._title = title
|
||||
|
||||
def set_classifier(self, clf):
|
||||
self._clf = clf
|
||||
|
||||
def set_features(self, features):
|
||||
self._features = features
|
||||
|
||||
def set_impurity(self, impurity):
|
||||
self._impurity = impurity
|
||||
|
||||
def get_title(self) -> str:
|
||||
return self._title
|
||||
|
||||
def get_classifier(self) -> SVC:
|
||||
return self._clf
|
||||
|
||||
def get_impurity(self) -> float:
|
||||
return self._impurity
|
||||
|
||||
def get_features(self) -> np.array:
|
||||
return self._features
|
||||
|
||||
def set_up(self, son):
|
||||
self._up = son
|
||||
|
||||
@@ -117,12 +141,11 @@ class Snode:
|
||||
f"{self._belief: .6f} impurity={self._impurity:.4f} "
|
||||
f"counts={count_values}"
|
||||
)
|
||||
else:
|
||||
return (
|
||||
f"{self._title} feaures={self._features} impurity="
|
||||
f"{self._impurity:.4f} "
|
||||
f"counts={count_values}"
|
||||
)
|
||||
return (
|
||||
f"{self._title} feaures={self._features} impurity="
|
||||
f"{self._impurity:.4f} "
|
||||
f"counts={count_values}"
|
||||
)
|
||||
|
||||
|
||||
class Siterator:
|
||||
@@ -150,10 +173,11 @@ class Splitter:
|
||||
self,
|
||||
clf: SVC = None,
|
||||
criterion: str = None,
|
||||
splitter_type: str = None,
|
||||
feature_select: str = None,
|
||||
criteria: str = None,
|
||||
min_samples_split: int = None,
|
||||
random_state=None,
|
||||
normalize=False,
|
||||
):
|
||||
self._clf = clf
|
||||
self._random_state = random_state
|
||||
@@ -162,7 +186,8 @@ class Splitter:
|
||||
self._criterion = criterion
|
||||
self._min_samples_split = min_samples_split
|
||||
self._criteria = criteria
|
||||
self._splitter_type = splitter_type
|
||||
self._feature_select = feature_select
|
||||
self._normalize = normalize
|
||||
|
||||
if clf is None:
|
||||
raise ValueError(f"clf has to be a sklearn estimator, got({clf})")
|
||||
@@ -180,9 +205,10 @@ class Splitter:
|
||||
f"criteria has to be max_samples or impurity; got ({criteria})"
|
||||
)
|
||||
|
||||
if splitter_type not in ["random", "best"]:
|
||||
if feature_select not in ["random", "best"]:
|
||||
raise ValueError(
|
||||
f"splitter must be either random or best, got({splitter_type})"
|
||||
"splitter must be either random or best, got "
|
||||
f"({feature_select})"
|
||||
)
|
||||
self.criterion_function = getattr(self, f"_{self._criterion}")
|
||||
self.decision_criteria = getattr(self, f"_{self._criteria}")
|
||||
@@ -197,6 +223,18 @@ class Splitter:
|
||||
|
||||
@staticmethod
|
||||
def _entropy(y: np.array) -> float:
|
||||
"""Compute entropy of a labels set
|
||||
|
||||
Parameters
|
||||
----------
|
||||
y : np.array
|
||||
set of labels
|
||||
|
||||
Returns
|
||||
-------
|
||||
float
|
||||
entropy
|
||||
"""
|
||||
n_labels = len(y)
|
||||
if n_labels <= 1:
|
||||
return 0
|
||||
@@ -215,6 +253,22 @@ class Splitter:
|
||||
def information_gain(
|
||||
self, labels: np.array, labels_up: np.array, labels_dn: np.array
|
||||
) -> float:
|
||||
"""Compute information gain of a split candidate
|
||||
|
||||
Parameters
|
||||
----------
|
||||
labels : np.array
|
||||
labels of the dataset
|
||||
labels_up : np.array
|
||||
labels of one side
|
||||
labels_dn : np.array
|
||||
labels on the other side
|
||||
|
||||
Returns
|
||||
-------
|
||||
float
|
||||
information gain
|
||||
"""
|
||||
imp_prev = self.criterion_function(labels)
|
||||
card_up = card_dn = imp_up = imp_dn = 0
|
||||
if labels_up is not None:
|
||||
@@ -237,6 +291,23 @@ class Splitter:
|
||||
def _select_best_set(
|
||||
self, dataset: np.array, labels: np.array, features_sets: list
|
||||
) -> list:
|
||||
"""Return the best set of features among feature_sets, the criterion is
|
||||
the information gain
|
||||
|
||||
Parameters
|
||||
----------
|
||||
dataset : np.array
|
||||
array of samples (# samples, # features)
|
||||
labels : np.array
|
||||
array of labels
|
||||
features_sets : list
|
||||
list of features sets to check
|
||||
|
||||
Returns
|
||||
-------
|
||||
list
|
||||
best feature set
|
||||
"""
|
||||
max_gain = 0
|
||||
selected = None
|
||||
warnings.filterwarnings("ignore", category=ConvergenceWarning)
|
||||
@@ -255,15 +326,26 @@ class Splitter:
|
||||
|
||||
@staticmethod
|
||||
def _generate_spaces(features: int, max_features: int) -> list:
|
||||
"""Generate at most 5 feature random combinations
|
||||
|
||||
Parameters
|
||||
----------
|
||||
features : int
|
||||
number of features in each combination
|
||||
max_features : int
|
||||
number of features in dataset
|
||||
|
||||
Returns
|
||||
-------
|
||||
list
|
||||
list with up to 5 combination of features randomly selected
|
||||
"""
|
||||
comb = set()
|
||||
# Generate at most 5 combinations
|
||||
if max_features == features:
|
||||
set_length = 1
|
||||
else:
|
||||
number = factorial(features) / (
|
||||
factorial(max_features) * factorial(features - max_features)
|
||||
)
|
||||
set_length = min(5, number)
|
||||
number = factorial(features) / (
|
||||
factorial(max_features) * factorial(features - max_features)
|
||||
)
|
||||
set_length = min(5, number)
|
||||
while len(comb) < set_length:
|
||||
comb.add(
|
||||
tuple(sorted(random.sample(range(features), max_features)))
|
||||
@@ -272,33 +354,78 @@ class Splitter:
|
||||
|
||||
def _get_subspaces_set(
|
||||
self, dataset: np.array, labels: np.array, max_features: int
|
||||
) -> np.array:
|
||||
features_sets = self._generate_spaces(dataset.shape[1], max_features)
|
||||
if len(features_sets) > 1:
|
||||
if self._splitter_type == "random":
|
||||
index = random.randint(0, len(features_sets) - 1)
|
||||
return features_sets[index]
|
||||
else:
|
||||
return self._select_best_set(dataset, labels, features_sets)
|
||||
else:
|
||||
return features_sets[0]
|
||||
) -> tuple:
|
||||
"""Compute the indices of the features selected by splitter depending
|
||||
on the self._feature_select hyper parameter
|
||||
|
||||
Parameters
|
||||
----------
|
||||
dataset : np.array
|
||||
array of samples
|
||||
labels : np.array
|
||||
labels of the dataset
|
||||
max_features : int
|
||||
number of features of the subspace
|
||||
(<= number of features in dataset)
|
||||
|
||||
Returns
|
||||
-------
|
||||
tuple
|
||||
indices of the features selected
|
||||
"""
|
||||
if dataset.shape[1] == max_features:
|
||||
# No feature reduction applies
|
||||
return tuple(range(dataset.shape[1]))
|
||||
if self._feature_select == "random":
|
||||
features_sets = self._generate_spaces(
|
||||
dataset.shape[1], max_features
|
||||
)
|
||||
return self._select_best_set(dataset, labels, features_sets)
|
||||
# Take KBest features
|
||||
return (
|
||||
SelectKBest(k=max_features)
|
||||
.fit(dataset, labels)
|
||||
.get_support(indices=True)
|
||||
)
|
||||
|
||||
def get_subspace(
|
||||
self, dataset: np.array, labels: np.array, max_features: int
|
||||
) -> tuple:
|
||||
"""Return the best/random subspace to make a split"""
|
||||
"""Re3turn a subspace of the selected dataset of max_features length.
|
||||
Depending on hyperparmeter
|
||||
|
||||
Parameters
|
||||
----------
|
||||
dataset : np.array
|
||||
array of samples (# samples, # features)
|
||||
labels : np.array
|
||||
labels of the dataset
|
||||
max_features : int
|
||||
number of features to form the subspace
|
||||
|
||||
Returns
|
||||
-------
|
||||
tuple
|
||||
tuple with the dataset with only the features selected and the
|
||||
indices of the features selected
|
||||
"""
|
||||
indices = self._get_subspaces_set(dataset, labels, max_features)
|
||||
return dataset[:, indices], indices
|
||||
|
||||
def _impurity(self, data: np.array, y: np.array) -> np.array:
|
||||
"""return column of dataset to be taken into account to split dataset
|
||||
|
||||
:param data: distances to hyper plane of every class
|
||||
:type data: np.array (m, n_classes)
|
||||
:param y: vector of labels (classes)
|
||||
:type y: np.array (m,)
|
||||
:return: column of dataset to be taken into account to split dataset
|
||||
:rtype: int
|
||||
Parameters
|
||||
----------
|
||||
data : np.array
|
||||
distances to hyper plane of every class
|
||||
y : np.array
|
||||
vector of labels (classes)
|
||||
|
||||
Returns
|
||||
-------
|
||||
np.array
|
||||
column of dataset to be taken into account to split dataset
|
||||
"""
|
||||
max_gain = 0
|
||||
selected = -1
|
||||
@@ -315,12 +442,17 @@ class Splitter:
|
||||
def _max_samples(data: np.array, y: np.array) -> np.array:
|
||||
"""return column of dataset to be taken into account to split dataset
|
||||
|
||||
:param data: distances to hyper plane of every class
|
||||
:type data: np.array (m, n_classes)
|
||||
:param y: vector of labels (classes)
|
||||
:type y: np.array (m,)
|
||||
:return: column of dataset to be taken into account to split dataset
|
||||
:rtype: int
|
||||
Parameters
|
||||
----------
|
||||
data : np.array
|
||||
distances to hyper plane of every class
|
||||
y : np.array
|
||||
column of dataset to be taken into account to split dataset
|
||||
|
||||
Returns
|
||||
-------
|
||||
np.array
|
||||
column of dataset to be taken into account to split dataset
|
||||
"""
|
||||
# select the class with max number of samples
|
||||
_, samples = np.unique(y, return_counts=True)
|
||||
@@ -328,8 +460,16 @@ class Splitter:
|
||||
|
||||
def partition(self, samples: np.array, node: Snode, train: bool):
|
||||
"""Set the criteria to split arrays. Compute the indices of the samples
|
||||
that should go to one side of the tree (down)
|
||||
that should go to one side of the tree (up)
|
||||
|
||||
Parameters
|
||||
----------
|
||||
samples : np.array
|
||||
array of samples (# samples, # features)
|
||||
node : Snode
|
||||
Node of the tree where partition is going to be made
|
||||
train : bool
|
||||
Train time - True / Test time - False
|
||||
"""
|
||||
# data contains the distances of every sample to every class hyperplane
|
||||
# array of (m, nc) nc = # classes
|
||||
@@ -350,22 +490,25 @@ class Splitter:
|
||||
# in predcit time just use the column computed in train time
|
||||
# is taking the classifier of class <col>
|
||||
col = node.get_partition_column()
|
||||
if col == -1:
|
||||
# No partition is producing information gain
|
||||
data = np.ones(data.shape)
|
||||
if col == -1:
|
||||
# No partition is producing information gain
|
||||
data = np.ones(data.shape)
|
||||
data = data[:, col]
|
||||
self._up = data > 0
|
||||
|
||||
def part(self, origin: np.array) -> list:
|
||||
"""Split an array in two based on indices (down) and its complement
|
||||
partition has to be called first to establish down indices
|
||||
"""Split an array in two based on indices (self._up) and its complement
|
||||
partition has to be called first to establish up indices
|
||||
|
||||
:param origin: dataset to split
|
||||
:type origin: np.array
|
||||
:param down: indices to use to split array
|
||||
:type down: np.array
|
||||
:return: list with two splits of the array
|
||||
:rtype: list
|
||||
Parameters
|
||||
----------
|
||||
origin : np.array
|
||||
dataset to split
|
||||
|
||||
Returns
|
||||
-------
|
||||
list
|
||||
list with two splits of the array
|
||||
"""
|
||||
down = ~self._up
|
||||
return [
|
||||
@@ -373,19 +516,26 @@ class Splitter:
|
||||
origin[down] if any(down) else None,
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def _distances(node: Snode, data: np.ndarray) -> np.array:
|
||||
def _distances(self, node: Snode, data: np.ndarray) -> np.array:
|
||||
"""Compute distances of the samples to the hyperplane of the node
|
||||
|
||||
:param node: node containing the svm classifier
|
||||
:type node: Snode
|
||||
:param data: samples to find out distance to hyperplane
|
||||
:type data: np.ndarray
|
||||
:return: array of shape (m, nc) with the distances of every sample to
|
||||
the hyperplane of every class. nc = # of classes
|
||||
:rtype: np.array
|
||||
Parameters
|
||||
----------
|
||||
node : Snode
|
||||
node containing the svm classifier
|
||||
data : np.ndarray
|
||||
samples to compute distance to hyperplane
|
||||
|
||||
Returns
|
||||
-------
|
||||
np.array
|
||||
array of shape (m, nc) with the distances of every sample to
|
||||
the hyperplane of every class. nc = # of classes
|
||||
"""
|
||||
return node._clf.decision_function(data[:, node._features])
|
||||
X_transformed = data[:, node._features]
|
||||
if self._normalize:
|
||||
X_transformed = node._scaler.transform(X_transformed)
|
||||
return node._clf.decision_function(X_transformed)
|
||||
|
||||
|
||||
class Stree(BaseEstimator, ClassifierMixin):
|
||||
@@ -411,6 +561,7 @@ class Stree(BaseEstimator, ClassifierMixin):
|
||||
min_samples_split: int = 0,
|
||||
max_features=None,
|
||||
splitter: str = "random",
|
||||
normalize: bool = False,
|
||||
):
|
||||
self.max_iter = max_iter
|
||||
self.C = C
|
||||
@@ -425,9 +576,11 @@ class Stree(BaseEstimator, ClassifierMixin):
|
||||
self.max_features = max_features
|
||||
self.criterion = criterion
|
||||
self.splitter = splitter
|
||||
self.normalize = normalize
|
||||
|
||||
def _more_tags(self) -> dict:
|
||||
"""Required by sklearn to supply features of the classifier
|
||||
make mandatory the labels array
|
||||
|
||||
:return: the tag required
|
||||
:rtype: dict
|
||||
@@ -439,16 +592,19 @@ class Stree(BaseEstimator, ClassifierMixin):
|
||||
) -> "Stree":
|
||||
"""Build the tree based on the dataset of samples and its labels
|
||||
|
||||
:param X: dataset of samples to make predictions
|
||||
:type X: np.array
|
||||
:param y: samples labels
|
||||
:type y: np.array
|
||||
:param sample_weight: weights of the samples. Rescale C per sample.
|
||||
Hi' weights force the classifier to put more emphasis on these points
|
||||
:type sample_weight: np.array optional
|
||||
:raises ValueError: if parameters C or max_depth are out of bounds
|
||||
:return: itself to be able to chain actions: fit().predict() ...
|
||||
:rtype: Stree
|
||||
Returns
|
||||
-------
|
||||
Stree
|
||||
itself to be able to chain actions: fit().predict() ...
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
if C < 0
|
||||
ValueError
|
||||
if max_depth < 1
|
||||
ValueError
|
||||
if all samples have 0 or negative weights
|
||||
"""
|
||||
# Check parameters are Ok.
|
||||
if self.C < 0:
|
||||
@@ -465,21 +621,28 @@ class Stree(BaseEstimator, ClassifierMixin):
|
||||
f"Maximum depth has to be greater than 1... got (max_depth=\
|
||||
{self.max_depth})"
|
||||
)
|
||||
|
||||
kernels = ["linear", "rbf", "poly", "sigmoid"]
|
||||
if self.kernel not in kernels:
|
||||
raise ValueError(f"Kernel {self.kernel} not in {kernels}")
|
||||
check_classification_targets(y)
|
||||
X, y = check_X_y(X, y)
|
||||
sample_weight = _check_sample_weight(
|
||||
sample_weight, X, dtype=np.float64
|
||||
)
|
||||
if not any(sample_weight):
|
||||
raise ValueError(
|
||||
"Invalid input - all samples have zero or negative weights."
|
||||
)
|
||||
check_classification_targets(y)
|
||||
# Initialize computed parameters
|
||||
self.splitter_ = Splitter(
|
||||
clf=self._build_clf(),
|
||||
criterion=self.criterion,
|
||||
splitter_type=self.splitter,
|
||||
feature_select=self.splitter,
|
||||
criteria=self.split_criteria,
|
||||
random_state=self.random_state,
|
||||
min_samples_split=self.min_samples_split,
|
||||
normalize=self.normalize,
|
||||
)
|
||||
if self.random_state is not None:
|
||||
random.seed(self.random_state)
|
||||
@@ -491,7 +654,8 @@ class Stree(BaseEstimator, ClassifierMixin):
|
||||
self.n_features_in_ = X.shape[1]
|
||||
self.max_features_ = self._initialize_max_features()
|
||||
self.tree_ = self.train(X, y, sample_weight, 1, "root")
|
||||
self._build_predictor()
|
||||
self.X_ = X
|
||||
self.y_ = y
|
||||
return self
|
||||
|
||||
def train(
|
||||
@@ -505,81 +669,71 @@ class Stree(BaseEstimator, ClassifierMixin):
|
||||
"""Recursive function to split the original dataset into predictor
|
||||
nodes (leaves)
|
||||
|
||||
:param X: samples dataset
|
||||
:type X: np.ndarray
|
||||
:param y: samples labels
|
||||
:type y: np.ndarray
|
||||
:param sample_weight: weight of samples. Rescale C per sample.
|
||||
Hi weights force the classifier to put more emphasis on these points.
|
||||
:type sample_weight: np.ndarray
|
||||
:param depth: actual depth in the tree
|
||||
:type depth: int
|
||||
:param title: description of the node
|
||||
:type title: str
|
||||
:return: binary tree
|
||||
:rtype: Snode
|
||||
Parameters
|
||||
----------
|
||||
X : np.ndarray
|
||||
samples dataset
|
||||
y : np.ndarray
|
||||
samples labels
|
||||
sample_weight : np.ndarray
|
||||
weight of samples. Rescale C per sample.
|
||||
depth : int
|
||||
actual depth in the tree
|
||||
title : str
|
||||
description of the node
|
||||
|
||||
Returns
|
||||
-------
|
||||
Optional[Snode]
|
||||
binary tree
|
||||
"""
|
||||
if depth > self.__max_depth:
|
||||
return None
|
||||
# Mask samples with 0 weight
|
||||
if any(sample_weight == 0):
|
||||
indices_zero = sample_weight == 0
|
||||
X = X[~indices_zero, :]
|
||||
y = y[~indices_zero]
|
||||
sample_weight = sample_weight[~indices_zero]
|
||||
self.depth_ = max(depth, self.depth_)
|
||||
scaler = StandardScaler()
|
||||
node = Snode(None, X, y, X.shape[1], 0.0, title, sample_weight, scaler)
|
||||
if np.unique(y).shape[0] == 1:
|
||||
# only 1 class => pure dataset
|
||||
return Snode(
|
||||
clf=None,
|
||||
X=X,
|
||||
y=y,
|
||||
features=X.shape[1],
|
||||
impurity=0.0,
|
||||
title=title + ", <pure>",
|
||||
weight=sample_weight,
|
||||
)
|
||||
node.set_title(title + ", <pure>")
|
||||
node.make_predictor()
|
||||
return node
|
||||
# Train the model
|
||||
clf = self._build_clf()
|
||||
Xs, features = self.splitter_.get_subspace(X, y, self.max_features_)
|
||||
# solve WARNING: class label 0 specified in weight is not found
|
||||
# in bagging
|
||||
if any(sample_weight == 0):
|
||||
indices = sample_weight == 0
|
||||
y_next = y[~indices]
|
||||
# touch weights if removing any class
|
||||
if np.unique(y_next).shape[0] != self.n_classes_:
|
||||
sample_weight += 1e-5
|
||||
if self.normalize:
|
||||
scaler.fit(Xs)
|
||||
Xs = scaler.transform(Xs)
|
||||
clf.fit(Xs, y, sample_weight=sample_weight)
|
||||
impurity = self.splitter_.partition_impurity(y)
|
||||
node = Snode(clf, X, y, features, impurity, title, sample_weight)
|
||||
self.depth_ = max(depth, self.depth_)
|
||||
node.set_impurity(self.splitter_.partition_impurity(y))
|
||||
node.set_classifier(clf)
|
||||
node.set_features(features)
|
||||
self.splitter_.partition(X, node, True)
|
||||
X_U, X_D = self.splitter_.part(X)
|
||||
y_u, y_d = self.splitter_.part(y)
|
||||
sw_u, sw_d = self.splitter_.part(sample_weight)
|
||||
if X_U is None or X_D is None:
|
||||
# didn't part anything
|
||||
return Snode(
|
||||
clf,
|
||||
X,
|
||||
y,
|
||||
features=X.shape[1],
|
||||
impurity=impurity,
|
||||
title=title + ", <cgaf>",
|
||||
weight=sample_weight,
|
||||
node.set_title(title + ", <cgaf>")
|
||||
node.make_predictor()
|
||||
return node
|
||||
node.set_up(
|
||||
self.train(X_U, y_u, sw_u, depth + 1, title + f" - Up({depth+1})")
|
||||
)
|
||||
node.set_down(
|
||||
self.train(
|
||||
X_D, y_d, sw_d, depth + 1, title + f" - Down({depth+1})"
|
||||
)
|
||||
node.set_up(self.train(X_U, y_u, sw_u, depth + 1, title + " - Up"))
|
||||
node.set_down(self.train(X_D, y_d, sw_d, depth + 1, title + " - Down"))
|
||||
)
|
||||
return node
|
||||
|
||||
def _build_predictor(self):
|
||||
"""Process the leaves to make them predictors"""
|
||||
|
||||
def run_tree(node: Snode):
|
||||
if node.is_leaf():
|
||||
node.make_predictor()
|
||||
return
|
||||
run_tree(node.get_down())
|
||||
run_tree(node.get_up())
|
||||
|
||||
run_tree(self.tree_)
|
||||
|
||||
def _build_clf(self):
|
||||
"""Build the correct classifier for the node"""
|
||||
"""Build the right classifier for the node"""
|
||||
return (
|
||||
LinearSVC(
|
||||
max_iter=self.max_iter,
|
||||
@@ -595,6 +749,7 @@ class Stree(BaseEstimator, ClassifierMixin):
|
||||
C=self.C,
|
||||
gamma=self.gamma,
|
||||
degree=self.degree,
|
||||
random_state=self.random_state,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -602,12 +757,17 @@ class Stree(BaseEstimator, ClassifierMixin):
|
||||
def _reorder_results(y: np.array, indices: np.array) -> np.array:
|
||||
"""Reorder an array based on the array of indices passed
|
||||
|
||||
:param y: data untidy
|
||||
:type y: np.array
|
||||
:param indices: indices used to set order
|
||||
:type indices: np.array
|
||||
:return: array y ordered
|
||||
:rtype: np.array
|
||||
Parameters
|
||||
----------
|
||||
y : np.array
|
||||
data untidy
|
||||
indices : np.array
|
||||
indices used to set order
|
||||
|
||||
Returns
|
||||
-------
|
||||
np.array
|
||||
array y ordered
|
||||
"""
|
||||
# return array of same type given in y
|
||||
y_ordered = y.copy()
|
||||
@@ -619,10 +779,22 @@ class Stree(BaseEstimator, ClassifierMixin):
|
||||
def predict(self, X: np.array) -> np.array:
|
||||
"""Predict labels for each sample in dataset passed
|
||||
|
||||
:param X: dataset of samples
|
||||
:type X: np.array
|
||||
:return: array of labels
|
||||
:rtype: np.array
|
||||
Parameters
|
||||
----------
|
||||
X : np.array
|
||||
dataset of samples
|
||||
|
||||
Returns
|
||||
-------
|
||||
np.array
|
||||
array of labels
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
if dataset with inconsistent number of features
|
||||
NotFittedError
|
||||
if model is not fitted
|
||||
"""
|
||||
|
||||
def predict_class(
|
||||
@@ -659,38 +831,30 @@ class Stree(BaseEstimator, ClassifierMixin):
|
||||
)
|
||||
return self.classes_[result]
|
||||
|
||||
def score(
|
||||
self, X: np.array, y: np.array, sample_weight: np.array = None
|
||||
) -> float:
|
||||
"""Compute accuracy of the prediction
|
||||
def nodes_leaves(self) -> tuple:
|
||||
"""Compute the number of nodes and leaves in the built tree
|
||||
|
||||
:param X: dataset of samples to make predictions
|
||||
:type X: np.array
|
||||
:param y_true: samples labels
|
||||
:type y_true: np.array
|
||||
:param sample_weight: weights of the samples. Rescale C per sample.
|
||||
Hi' weights force the classifier to put more emphasis on these points
|
||||
:type sample_weight: np.array optional
|
||||
:return: accuracy of the prediction
|
||||
:rtype: float
|
||||
Returns
|
||||
-------
|
||||
[tuple]
|
||||
tuple with the number of nodes and the number of leaves
|
||||
"""
|
||||
# sklearn check
|
||||
check_is_fitted(self)
|
||||
check_classification_targets(y)
|
||||
X, y = check_X_y(X, y)
|
||||
y_pred = self.predict(X).reshape(y.shape)
|
||||
# Compute accuracy for each possible representation
|
||||
_, y_true, y_pred = _check_targets(y, y_pred)
|
||||
check_consistent_length(y_true, y_pred, sample_weight)
|
||||
score = y_true == y_pred
|
||||
return _weighted_sum(score, sample_weight, normalize=True)
|
||||
nodes = 0
|
||||
leaves = 0
|
||||
for node in self:
|
||||
nodes += 1
|
||||
if node.is_leaf():
|
||||
leaves += 1
|
||||
return nodes, leaves
|
||||
|
||||
def __iter__(self) -> Siterator:
|
||||
"""Create an iterator to be able to visit the nodes of the tree in
|
||||
preorder, can make a list with all the nodes in preorder
|
||||
|
||||
:return: an iterator, can for i in... and list(...)
|
||||
:rtype: Siterator
|
||||
Returns
|
||||
-------
|
||||
Siterator
|
||||
an iterator, can for i in... and list(...)
|
||||
"""
|
||||
try:
|
||||
tree = self.tree_
|
||||
@@ -701,8 +865,10 @@ class Stree(BaseEstimator, ClassifierMixin):
|
||||
def __str__(self) -> str:
|
||||
"""String representation of the tree
|
||||
|
||||
:return: description of nodes in the tree in preorder
|
||||
:rtype: str
|
||||
Returns
|
||||
-------
|
||||
str
|
||||
description of nodes in the tree in preorder
|
||||
"""
|
||||
output = ""
|
||||
for i in self:
|
||||
|
@@ -1,3 +1,11 @@
|
||||
from .Strees import Stree, Snode, Siterator, Splitter
|
||||
|
||||
__version__ = "1.0"
|
||||
|
||||
__author__ = "Ricardo Montañana Gómez"
|
||||
__copyright__ = "Copyright 2020-2021, Ricardo Montañana Gómez"
|
||||
__license__ = "MIT License"
|
||||
__author_email__ = "ricardo.montanana@alu.uclm.es"
|
||||
__url__ = "https://github.com/doctorado-ml/stree"
|
||||
|
||||
__all__ = ["Stree", "Snode", "Siterator", "Splitter"]
|
||||
|
@@ -1,8 +1,6 @@
|
||||
import os
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
from stree import Stree, Snode
|
||||
from .utils import load_dataset
|
||||
|
||||
@@ -69,6 +67,31 @@ class Snode_test(unittest.TestCase):
|
||||
self.assertEqual(0.75, test._belief)
|
||||
self.assertEqual(-1, test._partition_column)
|
||||
|
||||
def test_set_title(self):
|
||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||
self.assertEqual("test", test.get_title())
|
||||
test.set_title("another")
|
||||
self.assertEqual("another", test.get_title())
|
||||
|
||||
def test_set_classifier(self):
|
||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||
clf = Stree()
|
||||
self.assertIsNone(test.get_classifier())
|
||||
test.set_classifier(clf)
|
||||
self.assertEqual(clf, test.get_classifier())
|
||||
|
||||
def test_set_impurity(self):
|
||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||
self.assertEqual(0.0, test.get_impurity())
|
||||
test.set_impurity(54.7)
|
||||
self.assertEqual(54.7, test.get_impurity())
|
||||
|
||||
def test_set_features(self):
|
||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [0, 1], 0.0, "test")
|
||||
self.assertListEqual([0, 1], test.get_features())
|
||||
test.set_features([1, 2])
|
||||
self.assertListEqual([1, 2], test.get_features())
|
||||
|
||||
def test_make_predictor_on_not_leaf(self):
|
||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||
test.set_up(Snode(None, [1], [1], [], 0.0, "another_test"))
|
||||
@@ -94,3 +117,5 @@ class Snode_test(unittest.TestCase):
|
||||
self.assertEqual("test", computed._title)
|
||||
self.assertIsInstance(computed._clf, Stree)
|
||||
self.assertEqual(test._partition_column, computed._partition_column)
|
||||
self.assertEqual(test._sample_weight, computed._sample_weight)
|
||||
self.assertEqual(test._scaler, computed._scaler)
|
||||
|
@@ -6,6 +6,7 @@ import numpy as np
|
||||
from sklearn.svm import SVC
|
||||
from sklearn.datasets import load_wine, load_iris
|
||||
from stree import Splitter
|
||||
from .utils import load_dataset
|
||||
|
||||
|
||||
class Splitter_test(unittest.TestCase):
|
||||
@@ -17,7 +18,7 @@ class Splitter_test(unittest.TestCase):
|
||||
def build(
|
||||
clf=SVC,
|
||||
min_samples_split=0,
|
||||
splitter_type="random",
|
||||
feature_select="random",
|
||||
criterion="gini",
|
||||
criteria="max_samples",
|
||||
random_state=None,
|
||||
@@ -25,7 +26,7 @@ class Splitter_test(unittest.TestCase):
|
||||
return Splitter(
|
||||
clf=clf(random_state=random_state, kernel="rbf"),
|
||||
min_samples_split=min_samples_split,
|
||||
splitter_type=splitter_type,
|
||||
feature_select=feature_select,
|
||||
criterion=criterion,
|
||||
criteria=criteria,
|
||||
random_state=random_state,
|
||||
@@ -39,20 +40,20 @@ class Splitter_test(unittest.TestCase):
|
||||
with self.assertRaises(ValueError):
|
||||
self.build(criterion="duck")
|
||||
with self.assertRaises(ValueError):
|
||||
self.build(splitter_type="duck")
|
||||
self.build(feature_select="duck")
|
||||
with self.assertRaises(ValueError):
|
||||
self.build(criteria="duck")
|
||||
with self.assertRaises(ValueError):
|
||||
_ = Splitter(clf=None)
|
||||
for splitter_type in ["best", "random"]:
|
||||
for feature_select in ["best", "random"]:
|
||||
for criterion in ["gini", "entropy"]:
|
||||
for criteria in ["max_samples", "impurity"]:
|
||||
tcl = self.build(
|
||||
splitter_type=splitter_type,
|
||||
feature_select=feature_select,
|
||||
criterion=criterion,
|
||||
criteria=criteria,
|
||||
)
|
||||
self.assertEqual(splitter_type, tcl._splitter_type)
|
||||
self.assertEqual(feature_select, tcl._feature_select)
|
||||
self.assertEqual(criterion, tcl._criterion)
|
||||
self.assertEqual(criteria, tcl._criteria)
|
||||
|
||||
@@ -177,32 +178,34 @@ class Splitter_test(unittest.TestCase):
|
||||
def test_best_splitter_few_sets(self):
|
||||
X, y = load_iris(return_X_y=True)
|
||||
X = np.delete(X, 3, 1)
|
||||
tcl = self.build(splitter_type="best", random_state=self._random_state)
|
||||
tcl = self.build(
|
||||
feature_select="best", random_state=self._random_state
|
||||
)
|
||||
dataset, computed = tcl.get_subspace(X, y, max_features=2)
|
||||
self.assertListEqual([0, 2], list(computed))
|
||||
self.assertListEqual(X[:, computed].tolist(), dataset.tolist())
|
||||
|
||||
def test_splitter_parameter(self):
|
||||
expected_values = [
|
||||
[1, 4, 9, 12], # best entropy max_samples
|
||||
[1, 3, 6, 10], # best entropy impurity
|
||||
[6, 8, 10, 12], # best gini max_samples
|
||||
[7, 8, 10, 11], # best gini impurity
|
||||
[0, 6, 11, 12], # best entropy max_samples
|
||||
[0, 6, 11, 12], # best entropy impurity
|
||||
[0, 6, 11, 12], # best gini max_samples
|
||||
[0, 6, 11, 12], # best gini impurity
|
||||
[0, 3, 8, 12], # random entropy max_samples
|
||||
[0, 3, 9, 11], # random entropy impurity
|
||||
[0, 4, 7, 12], # random gini max_samples
|
||||
[0, 2, 5, 6], # random gini impurity
|
||||
[0, 3, 7, 12], # random entropy impurity
|
||||
[1, 7, 9, 12], # random gini max_samples
|
||||
[1, 5, 8, 12], # random gini impurity
|
||||
]
|
||||
X, y = load_wine(return_X_y=True)
|
||||
rn = 0
|
||||
for splitter_type in ["best", "random"]:
|
||||
for feature_select in ["best", "random"]:
|
||||
for criterion in ["entropy", "gini"]:
|
||||
for criteria in [
|
||||
"max_samples",
|
||||
"impurity",
|
||||
]:
|
||||
tcl = self.build(
|
||||
splitter_type=splitter_type,
|
||||
feature_select=feature_select,
|
||||
criterion=criterion,
|
||||
criteria=criteria,
|
||||
)
|
||||
@@ -213,7 +216,7 @@ class Splitter_test(unittest.TestCase):
|
||||
# print(
|
||||
# "{}, # {:7s}{:8s}{:15s}".format(
|
||||
# list(computed),
|
||||
# splitter_type,
|
||||
# feature_select,
|
||||
# criterion,
|
||||
# criteria,
|
||||
# )
|
||||
@@ -222,3 +225,18 @@ class Splitter_test(unittest.TestCase):
|
||||
self.assertListEqual(
|
||||
X[:, computed].tolist(), dataset.tolist()
|
||||
)
|
||||
|
||||
def test_get_best_subspaces(self):
|
||||
results = [
|
||||
(4, [3, 4, 11, 13]),
|
||||
(7, [1, 3, 4, 5, 11, 13, 16]),
|
||||
(9, [1, 3, 4, 5, 7, 10, 11, 13, 16]),
|
||||
]
|
||||
X, y = load_dataset(n_features=20)
|
||||
for k, expected in results:
|
||||
tcl = self.build(
|
||||
feature_select="best",
|
||||
)
|
||||
Xs, computed = tcl.get_subspace(X, y, k)
|
||||
self.assertListEqual(expected, list(computed))
|
||||
self.assertListEqual(X[:, expected].tolist(), Xs.tolist())
|
||||
|
@@ -21,13 +21,30 @@ class Stree_test(unittest.TestCase):
|
||||
def setUp(cls):
|
||||
os.environ["TESTING"] = "1"
|
||||
|
||||
def test_valid_kernels(self):
|
||||
valid_kernels = ["linear", "rbf", "poly", "sigmoid"]
|
||||
X, y = load_dataset()
|
||||
for kernel in valid_kernels:
|
||||
clf = Stree(kernel=kernel)
|
||||
clf.fit(X, y)
|
||||
self.assertIsNotNone(clf.tree_)
|
||||
|
||||
def test_bogus_kernel(self):
|
||||
kernel = "other"
|
||||
X, y = load_dataset()
|
||||
clf = Stree(kernel=kernel)
|
||||
with self.assertRaises(ValueError):
|
||||
clf.fit(X, y)
|
||||
|
||||
def _check_tree(self, node: Snode):
|
||||
"""Check recursively that the nodes that are not leaves have the
|
||||
correct number of labels and its sons have the right number of elements
|
||||
in their dataset
|
||||
|
||||
Arguments:
|
||||
node {Snode} -- node to check
|
||||
Parameters
|
||||
----------
|
||||
node : Snode
|
||||
node to check
|
||||
"""
|
||||
if node.is_leaf():
|
||||
return
|
||||
@@ -101,20 +118,20 @@ class Stree_test(unittest.TestCase):
|
||||
def test_iterator_and_str(self):
|
||||
"""Check preorder iterator"""
|
||||
expected = [
|
||||
"root feaures=(0, 1, 2) impurity=1.0000 counts=(array([0, 1]), arr"
|
||||
"ay([750, 750]))",
|
||||
"root - Down, <cgaf> - Leaf class=0 belief= 0.928297 impurity=0.37"
|
||||
"22 counts=(array([0, 1]), array([725, 56]))",
|
||||
"root - Up feaures=(0, 1, 2) impurity=0.2178 counts=(array([0, 1])"
|
||||
", array([ 25, 694]))",
|
||||
"root - Up - Down feaures=(0, 1, 2) impurity=0.8454 counts=(array("
|
||||
"[0, 1]), array([8, 3]))",
|
||||
"root - Up - Down - Down, <pure> - Leaf class=0 belief= 1.000000 i"
|
||||
"mpurity=0.0000 counts=(array([0]), array([7]))",
|
||||
"root - Up - Down - Up, <cgaf> - Leaf class=1 belief= 0.750000 imp"
|
||||
"urity=0.8113 counts=(array([0, 1]), array([1, 3]))",
|
||||
"root - Up - Up, <cgaf> - Leaf class=1 belief= 0.975989 impurity=0"
|
||||
".1634 counts=(array([0, 1]), array([ 17, 691]))",
|
||||
"root feaures=(0, 1, 2) impurity=1.0000 counts=(array([0, 1]), "
|
||||
"array([750, 750]))",
|
||||
"root - Down(2), <cgaf> - Leaf class=0 belief= 0.928297 impurity="
|
||||
"0.3722 counts=(array([0, 1]), array([725, 56]))",
|
||||
"root - Up(2) feaures=(0, 1, 2) impurity=0.2178 counts=(array([0, "
|
||||
"1]), array([ 25, 694]))",
|
||||
"root - Up(2) - Down(3) feaures=(0, 1, 2) impurity=0.8454 counts="
|
||||
"(array([0, 1]), array([8, 3]))",
|
||||
"root - Up(2) - Down(3) - Down(4), <pure> - Leaf class=0 belief= "
|
||||
"1.000000 impurity=0.0000 counts=(array([0]), array([7]))",
|
||||
"root - Up(2) - Down(3) - Up(4), <cgaf> - Leaf class=1 belief= "
|
||||
"0.750000 impurity=0.8113 counts=(array([0, 1]), array([1, 3]))",
|
||||
"root - Up(2) - Up(3), <cgaf> - Leaf class=1 belief= 0.975989 "
|
||||
"impurity=0.1634 counts=(array([0, 1]), array([ 17, 691]))",
|
||||
]
|
||||
computed = []
|
||||
expected_string = ""
|
||||
@@ -196,10 +213,10 @@ class Stree_test(unittest.TestCase):
|
||||
"Synt": {
|
||||
"max_samples linear": 0.9606666666666667,
|
||||
"max_samples rbf": 0.7133333333333334,
|
||||
"max_samples poly": 0.49066666666666664,
|
||||
"max_samples poly": 0.618,
|
||||
"impurity linear": 0.9606666666666667,
|
||||
"impurity rbf": 0.7133333333333334,
|
||||
"impurity poly": 0.49066666666666664,
|
||||
"impurity poly": 0.618,
|
||||
},
|
||||
"Iris": {
|
||||
"max_samples linear": 1.0,
|
||||
@@ -313,50 +330,13 @@ class Stree_test(unittest.TestCase):
|
||||
X, y = load_dataset(self._random_state)
|
||||
clf = Stree(random_state=self._random_state, max_features=2)
|
||||
clf.fit(X, y)
|
||||
self.assertAlmostEqual(0.9246666666666666, clf.score(X, y))
|
||||
self.assertAlmostEqual(0.9453333333333334, clf.score(X, y))
|
||||
|
||||
def test_bogus_splitter_parameter(self):
|
||||
clf = Stree(splitter="duck")
|
||||
with self.assertRaises(ValueError):
|
||||
clf.fit(*load_dataset())
|
||||
|
||||
def test_weights_removing_class(self):
|
||||
# This patch solves an stderr message from sklearn svm lib
|
||||
# "WARNING: class label x specified in weight is not found"
|
||||
X = np.array(
|
||||
[
|
||||
[0.1, 0.1],
|
||||
[0.1, 0.2],
|
||||
[0.2, 0.1],
|
||||
[5, 6],
|
||||
[8, 9],
|
||||
[6, 7],
|
||||
[0.2, 0.2],
|
||||
]
|
||||
)
|
||||
y = np.array([0, 0, 0, 1, 1, 1, 0])
|
||||
epsilon = 1e-5
|
||||
weights = [1, 1, 1, 0, 0, 0, 1]
|
||||
weights = np.array(weights, dtype="float64")
|
||||
weights_epsilon = [x + epsilon for x in weights]
|
||||
weights_no_zero = np.array([1, 1, 1, 0, 0, 2, 1])
|
||||
original = weights_no_zero.copy()
|
||||
clf = Stree()
|
||||
clf.fit(X, y)
|
||||
node = clf.train(
|
||||
X,
|
||||
y,
|
||||
weights,
|
||||
1,
|
||||
"test",
|
||||
)
|
||||
# if a class is lost with zero weights the patch adds epsilon
|
||||
self.assertListEqual(weights.tolist(), weights_epsilon)
|
||||
self.assertListEqual(node._sample_weight.tolist(), weights_epsilon)
|
||||
# zero weights are ok when they don't erase a class
|
||||
_ = clf.train(X, y, weights_no_zero, 1, "test")
|
||||
self.assertListEqual(weights_no_zero.tolist(), original.tolist())
|
||||
|
||||
def test_multiclass_classifier_integrity(self):
|
||||
"""Checks if the multiclass operation is done right"""
|
||||
X, y = load_iris(return_X_y=True)
|
||||
@@ -413,9 +393,14 @@ class Stree_test(unittest.TestCase):
|
||||
n_samples=500,
|
||||
)
|
||||
clf = Stree(kernel="rbf", random_state=self._random_state)
|
||||
self.assertEqual(0.824, clf.fit(X, y).score(X, y))
|
||||
clf2 = Stree(
|
||||
kernel="rbf", random_state=self._random_state, normalize=True
|
||||
)
|
||||
self.assertEqual(0.768, clf.fit(X, y).score(X, y))
|
||||
self.assertEqual(0.814, clf2.fit(X, y).score(X, y))
|
||||
X, y = load_wine(return_X_y=True)
|
||||
self.assertEqual(0.6741573033707865, clf.fit(X, y).score(X, y))
|
||||
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
||||
|
||||
def test_score_multiclass_poly(self):
|
||||
X, y = load_dataset(
|
||||
@@ -427,9 +412,16 @@ class Stree_test(unittest.TestCase):
|
||||
clf = Stree(
|
||||
kernel="poly", random_state=self._random_state, C=10, degree=5
|
||||
)
|
||||
clf2 = Stree(
|
||||
kernel="poly",
|
||||
random_state=self._random_state,
|
||||
normalize=True,
|
||||
)
|
||||
self.assertEqual(0.786, clf.fit(X, y).score(X, y))
|
||||
self.assertEqual(0.818, clf2.fit(X, y).score(X, y))
|
||||
X, y = load_wine(return_X_y=True)
|
||||
self.assertEqual(0.702247191011236, clf.fit(X, y).score(X, y))
|
||||
self.assertEqual(0.6067415730337079, clf2.fit(X, y).score(X, y))
|
||||
|
||||
def test_score_multiclass_linear(self):
|
||||
X, y = load_dataset(
|
||||
@@ -440,5 +432,95 @@ class Stree_test(unittest.TestCase):
|
||||
)
|
||||
clf = Stree(kernel="linear", random_state=self._random_state)
|
||||
self.assertEqual(0.9533333333333334, clf.fit(X, y).score(X, y))
|
||||
# Check with context based standardization
|
||||
clf2 = Stree(
|
||||
kernel="linear", random_state=self._random_state, normalize=True
|
||||
)
|
||||
self.assertEqual(0.9526666666666667, clf2.fit(X, y).score(X, y))
|
||||
X, y = load_wine(return_X_y=True)
|
||||
self.assertEqual(0.9550561797752809, clf.fit(X, y).score(X, y))
|
||||
self.assertEqual(0.9831460674157303, clf.fit(X, y).score(X, y))
|
||||
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
||||
|
||||
def test_zero_all_sample_weights(self):
|
||||
X, y = load_dataset(self._random_state)
|
||||
with self.assertRaises(ValueError):
|
||||
Stree().fit(X, y, np.zeros(len(y)))
|
||||
|
||||
def test_mask_samples_weighted_zero(self):
|
||||
X = np.array(
|
||||
[
|
||||
[1, 1],
|
||||
[1, 1],
|
||||
[1, 1],
|
||||
[2, 2],
|
||||
[2, 2],
|
||||
[2, 2],
|
||||
[3, 3],
|
||||
[3, 3],
|
||||
[3, 3],
|
||||
]
|
||||
)
|
||||
y = np.array([1, 1, 1, 2, 2, 2, 5, 5, 5])
|
||||
yw = np.array([1, 1, 1, 5, 5, 5, 5, 5, 5])
|
||||
w = [1, 1, 1, 0, 0, 0, 1, 1, 1]
|
||||
model1 = Stree().fit(X, y)
|
||||
model2 = Stree().fit(X, y, w)
|
||||
predict1 = model1.predict(X)
|
||||
predict2 = model2.predict(X)
|
||||
self.assertListEqual(y.tolist(), predict1.tolist())
|
||||
self.assertListEqual(yw.tolist(), predict2.tolist())
|
||||
self.assertEqual(model1.score(X, y), 1)
|
||||
self.assertAlmostEqual(model2.score(X, y), 0.66666667)
|
||||
self.assertEqual(model2.score(X, y, w), 1)
|
||||
|
||||
def test_depth(self):
|
||||
X, y = load_dataset(
|
||||
random_state=self._random_state,
|
||||
n_classes=3,
|
||||
n_features=5,
|
||||
n_samples=1500,
|
||||
)
|
||||
clf = Stree(random_state=self._random_state)
|
||||
clf.fit(X, y)
|
||||
self.assertEqual(6, clf.depth_)
|
||||
X, y = load_wine(return_X_y=True)
|
||||
clf = Stree(random_state=self._random_state)
|
||||
clf.fit(X, y)
|
||||
self.assertEqual(4, clf.depth_)
|
||||
|
||||
def test_nodes_leaves(self):
|
||||
X, y = load_dataset(
|
||||
random_state=self._random_state,
|
||||
n_classes=3,
|
||||
n_features=5,
|
||||
n_samples=1500,
|
||||
)
|
||||
clf = Stree(random_state=self._random_state)
|
||||
clf.fit(X, y)
|
||||
nodes, leaves = clf.nodes_leaves()
|
||||
self.assertEqual(25, nodes)
|
||||
self.assertEqual(13, leaves)
|
||||
X, y = load_wine(return_X_y=True)
|
||||
clf = Stree(random_state=self._random_state)
|
||||
clf.fit(X, y)
|
||||
nodes, leaves = clf.nodes_leaves()
|
||||
self.assertEqual(9, nodes)
|
||||
self.assertEqual(5, leaves)
|
||||
|
||||
def test_nodes_leaves_artificial(self):
|
||||
n1 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test1")
|
||||
n2 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test2")
|
||||
n3 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test3")
|
||||
n4 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test4")
|
||||
n5 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test5")
|
||||
n6 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test6")
|
||||
n1.set_up(n2)
|
||||
n2.set_up(n3)
|
||||
n2.set_down(n4)
|
||||
n3.set_up(n5)
|
||||
n4.set_down(n6)
|
||||
clf = Stree(random_state=self._random_state)
|
||||
clf.tree_ = n1
|
||||
nodes, leaves = clf.nodes_leaves()
|
||||
self.assertEqual(6, nodes)
|
||||
self.assertEqual(2, leaves)
|
||||
|
Reference in New Issue
Block a user