mirror of
https://github.com/Doctorado-ML/STree.git
synced 2025-08-17 16:36:01 +00:00
Compare commits
16 Commits
0.9rc5
...
selectKBes
Author | SHA1 | Date | |
---|---|---|---|
1b08cb9bdf
|
|||
|
a4aac9d310 | ||
|
8a18c998df | ||
b55f59a3ec
|
|||
783d105099
|
|||
c36f685263
|
|||
0f89b044f1
|
|||
|
6ba973dfe1 | ||
|
460c63a6d0 | ||
|
f438124057 | ||
|
147dad684c | ||
|
3bdac9bd60 | ||
|
e4ac5075e5 | ||
|
36816074ff | ||
475ad7e752
|
|||
|
1c869e154e |
56
.github/workflows/codeql-analysis.yml
vendored
Normal file
56
.github/workflows/codeql-analysis.yml
vendored
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
name: "CodeQL"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ master ]
|
||||||
|
pull_request:
|
||||||
|
# The branches below must be a subset of the branches above
|
||||||
|
branches: [ master ]
|
||||||
|
schedule:
|
||||||
|
- cron: '16 17 * * 3'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
name: Analyze
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
language: [ 'python' ]
|
||||||
|
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||||
|
# Learn more:
|
||||||
|
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
# Initializes the CodeQL tools for scanning.
|
||||||
|
- name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v1
|
||||||
|
with:
|
||||||
|
languages: ${{ matrix.language }}
|
||||||
|
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||||
|
# By default, queries listed here will override any specified in a config file.
|
||||||
|
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||||
|
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||||
|
|
||||||
|
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||||
|
# If this step fails, then you should remove it and run the build manually (see below)
|
||||||
|
- name: Autobuild
|
||||||
|
uses: github/codeql-action/autobuild@v1
|
||||||
|
|
||||||
|
# ℹ️ Command-line programs to run using the OS shell.
|
||||||
|
# 📚 https://git.io/JvXDl
|
||||||
|
|
||||||
|
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||||
|
# and modify them (or add more) to build your code if your project
|
||||||
|
# uses a compiled language
|
||||||
|
|
||||||
|
#- run: |
|
||||||
|
# make bootstrap
|
||||||
|
# make release
|
||||||
|
|
||||||
|
- name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@v1
|
47
.github/workflows/main.yml
vendored
Normal file
47
.github/workflows/main.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
name: CI
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [master]
|
||||||
|
pull_request:
|
||||||
|
branches: [master]
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [macos-latest, ubuntu-latest]
|
||||||
|
python: [3.8]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Set up Python ${{ matrix.python }}
|
||||||
|
uses: actions/setup-python@v2
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python }}
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
pip install -q --upgrade pip
|
||||||
|
pip install -q -r requirements.txt
|
||||||
|
pip install -q --upgrade codecov coverage black flake8 codacy-coverage
|
||||||
|
- name: Lint
|
||||||
|
run: |
|
||||||
|
black --check --diff stree
|
||||||
|
flake8 --count stree
|
||||||
|
- name: Tests
|
||||||
|
run: |
|
||||||
|
coverage run -m unittest -v stree.tests
|
||||||
|
coverage xml
|
||||||
|
- name: Upload coverage to Codecov
|
||||||
|
uses: codecov/codecov-action@v1
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
files: ./coverage.xml
|
||||||
|
- name: Run codacy-coverage-reporter
|
||||||
|
if: runner.os == 'Linux'
|
||||||
|
uses: codacy/codacy-coverage-reporter-action@master
|
||||||
|
with:
|
||||||
|
project-token: ${{ secrets.CODACY_PROJECT_TOKEN }}
|
||||||
|
coverage-reports: coverage.xml
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -133,3 +133,4 @@ dmypy.json
|
|||||||
.pre-commit-config.yaml
|
.pre-commit-config.yaml
|
||||||
|
|
||||||
**.csv
|
**.csv
|
||||||
|
.virtual_documents
|
44
README.md
44
README.md
@@ -1,6 +1,6 @@
|
|||||||
[](https://app.codeship.com/projects/399170)
|

|
||||||
[](https://codecov.io/gh/doctorado-ml/stree)
|
[](https://codecov.io/gh/doctorado-ml/stree)
|
||||||
[](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
|
[](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
|
||||||
|
|
||||||
# Stree
|
# Stree
|
||||||
|
|
||||||
@@ -18,23 +18,43 @@ pip install git+https://github.com/doctorado-ml/stree
|
|||||||
|
|
||||||
### Jupyter notebooks
|
### Jupyter notebooks
|
||||||
|
|
||||||
* [](https://mybinder.org/v2/gh/Doctorado-ML/STree/master?urlpath=lab/tree/notebooks/benchmark.ipynb) Benchmark
|
- [](https://mybinder.org/v2/gh/Doctorado-ML/STree/master?urlpath=lab/tree/notebooks/benchmark.ipynb) Benchmark
|
||||||
|
|
||||||
* [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/benchmark.ipynb) Benchmark
|
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/benchmark.ipynb) Benchmark
|
||||||
|
|
||||||
* [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/features.ipynb) Test features
|
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/features.ipynb) Some features
|
||||||
|
|
||||||
* [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/adaboost.ipynb) Adaboost
|
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/gridsearch.ipynb) Gridsearch
|
||||||
|
|
||||||
* [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/gridsearch.ipynb) Gridsearch
|
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/ensemble.ipynb) Ensembles
|
||||||
|
|
||||||
* [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/test_graphs.ipynb) Test Graphics
|
## Hyperparameters
|
||||||
|
|
||||||
### Command line
|
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
|
||||||
|
| --- | ------------------ | ------------------------------------------------------ | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
|
||||||
|
| \* | kernel | {"linear", "poly", "rbf"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of ‘linear’, ‘poly’ or ‘rbf’. |
|
||||||
|
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
|
||||||
|
| \* | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls |
|
||||||
|
| | max_depth | \<int\> | None | Specifies the maximum depth of the tree |
|
||||||
|
| \* | tol | \<float\> | 1e-4 | Tolerance for stopping criterion. |
|
||||||
|
| \* | degree | \<int\> | 3 | Degree of the polynomial kernel function (‘poly’). Ignored by all other kernels. |
|
||||||
|
| \* | gamma | {"scale", "auto"} or \<float\> | scale | Kernel coefficient for ‘rbf’ and ‘poly’.<br>if gamma='scale' (default) is passed then it uses 1 / (n_features \* X.var()) as value of gamma,<br>if ‘auto’, uses 1 / n_features. |
|
||||||
|
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\* |
|
||||||
|
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
|
||||||
|
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
|
||||||
|
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
|
||||||
|
| | splitter | {"best", "random"} | random | The strategy used to choose the feature set at each node (only used if max_features != num_features). <br>Supported strategies are “best” to choose the best feature set and “random” to choose a random combination. <br>The algorithm generates 5 candidates at most to choose from in both strategies. |
|
||||||
|
|
||||||
```bash
|
\* Hyperparameter used by the support vector classifier of every node
|
||||||
python main.py
|
|
||||||
```
|
\*\* **Splitting in a STree node**
|
||||||
|
|
||||||
|
The decision function is applied to the dataset and distances from samples to hyperplanes are computed in a matrix. This matrix has as many columns as classes the samples belongs to (if more than two, i.e. multiclass classification) or 1 column if it's a binary class dataset. In binary classification only one hyperplane is computed and therefore only one column is needed to store the distances of the samples to it. If three or more classes are present in the dataset we need as many hyperplanes as classes are there, and therefore one column per hyperplane is needed.
|
||||||
|
|
||||||
|
In case of multiclass classification we have to decide which column take into account to make the split, that depends on hyperparameter _split_criteria_, if "impurity" is chosen then STree computes information gain of every split candidate using each column and chooses the one that maximize the information gain, otherwise STree choses the column with more samples with a predicted class (the column with more positive numbers in it).
|
||||||
|
|
||||||
|
Once we have the column to take into account for the split, the algorithm splits samples with positive distances to hyperplane from the rest.
|
||||||
|
|
||||||
## Tests
|
## Tests
|
||||||
|
|
||||||
|
2
main.py
2
main.py
@@ -8,7 +8,7 @@ random_state = 1
|
|||||||
X, y = load_iris(return_X_y=True)
|
X, y = load_iris(return_X_y=True)
|
||||||
|
|
||||||
Xtrain, Xtest, ytrain, ytest = train_test_split(
|
Xtrain, Xtest, ytrain, ytest = train_test_split(
|
||||||
X, y, test_size=0.2, random_state=random_state
|
X, y, test_size=0.3, random_state=random_state
|
||||||
)
|
)
|
||||||
|
|
||||||
now = time.time()
|
now = time.time()
|
||||||
|
File diff suppressed because one or more lines are too long
@@ -17,35 +17,43 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 1,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"#\n",
|
"#\n",
|
||||||
"# Google Colab setup\n",
|
"# Google Colab setup\n",
|
||||||
"#\n",
|
"#\n",
|
||||||
"#!pip install git+https://github.com/doctorado-ml/stree"
|
"#!pip install git+https://github.com/doctorado-ml/stree\n",
|
||||||
|
"!pip install pandas"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 2,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import time\n",
|
"import time\n",
|
||||||
|
"import os\n",
|
||||||
|
"import random\n",
|
||||||
|
"import warnings\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"import numpy as np\n",
|
||||||
"from sklearn.ensemble import AdaBoostClassifier, BaggingClassifier\n",
|
"from sklearn.ensemble import AdaBoostClassifier, BaggingClassifier\n",
|
||||||
"from sklearn.model_selection import train_test_split\n",
|
"from sklearn.model_selection import train_test_split\n",
|
||||||
"from stree import Stree"
|
"from sklearn.exceptions import ConvergenceWarning\n",
|
||||||
|
"from stree import Stree\n",
|
||||||
|
"\n",
|
||||||
|
"warnings.filterwarnings(\"ignore\", category=ConvergenceWarning)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 3,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import os\n",
|
|
||||||
"if not os.path.isfile('data/creditcard.csv'):\n",
|
"if not os.path.isfile('data/creditcard.csv'):\n",
|
||||||
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
||||||
" !tar xzf creditcard.tgz"
|
" !tar xzf creditcard.tgz"
|
||||||
@@ -53,24 +61,15 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 4,
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"tags": []
|
"tags": []
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
|
||||||
"text": "Fraud: 0.173% 492\nValid: 99.827% 284315\nX.shape (100492, 28) y.shape (100492,)\nFraud: 0.644% 647\nValid: 99.356% 99845\n"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"random_state=1\n",
|
"random_state=1\n",
|
||||||
"\n",
|
"\n",
|
||||||
"def load_creditcard(n_examples=0):\n",
|
"def load_creditcard(n_examples=0):\n",
|
||||||
" import pandas as pd\n",
|
|
||||||
" import numpy as np\n",
|
|
||||||
" import random\n",
|
|
||||||
" df = pd.read_csv('data/creditcard.csv')\n",
|
" df = pd.read_csv('data/creditcard.csv')\n",
|
||||||
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
||||||
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
||||||
@@ -121,20 +120,14 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 5,
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"tags": []
|
"tags": []
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
|
||||||
"text": "Score Train: 0.9985784146480154\nScore Test: 0.9981093273185617\nTook 73.27 seconds\n"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"now = time.time()\n",
|
"now = time.time()\n",
|
||||||
"clf = Stree(max_depth=3, random_state=random_state)\n",
|
"clf = Stree(max_depth=3, random_state=random_state, max_iter=1e3)\n",
|
||||||
"clf.fit(Xtrain, ytrain)\n",
|
"clf.fit(Xtrain, ytrain)\n",
|
||||||
"print(\"Score Train: \", clf.score(Xtrain, ytrain))\n",
|
"print(\"Score Train: \", clf.score(Xtrain, ytrain))\n",
|
||||||
"print(\"Score Test: \", clf.score(Xtest, ytest))\n",
|
"print(\"Score Test: \", clf.score(Xtest, ytest))\n",
|
||||||
@@ -150,7 +143,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 6,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -161,21 +154,15 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 7,
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"tags": []
|
"tags": []
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
|
||||||
"text": "Kernel: linear\tTime: 93.78 seconds\tScore Train: 0.9983083\tScore Test: 0.9983083\nKernel: rbf\tTime: 18.32 seconds\tScore Train: 0.9935602\tScore Test: 0.9935651\nKernel: poly\tTime: 69.68 seconds\tScore Train: 0.9973132\tScore Test: 0.9972801\n"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"for kernel in ['linear', 'rbf', 'poly']:\n",
|
"for kernel in ['linear', 'rbf', 'poly']:\n",
|
||||||
" now = time.time()\n",
|
" now = time.time()\n",
|
||||||
" clf = AdaBoostClassifier(base_estimator=Stree(C=C, kernel=kernel, max_depth=max_depth, random_state=random_state), algorithm=\"SAMME\", n_estimators=n_estimators, random_state=random_state)\n",
|
" clf = AdaBoostClassifier(base_estimator=Stree(C=C, kernel=kernel, max_depth=max_depth, random_state=random_state, max_iter=1e3), algorithm=\"SAMME\", n_estimators=n_estimators, random_state=random_state)\n",
|
||||||
" clf.fit(Xtrain, ytrain)\n",
|
" clf.fit(Xtrain, ytrain)\n",
|
||||||
" score_train = clf.score(Xtrain, ytrain)\n",
|
" score_train = clf.score(Xtrain, ytrain)\n",
|
||||||
" score_test = clf.score(Xtest, ytest)\n",
|
" score_test = clf.score(Xtest, ytest)\n",
|
||||||
@@ -191,7 +178,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 8,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -202,21 +189,15 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 9,
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"tags": []
|
"tags": []
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
|
||||||
"text": "Kernel: linear\tTime: 387.06 seconds\tScore Train: 0.9985784\tScore Test: 0.9981093\nKernel: rbf\tTime: 144.00 seconds\tScore Train: 0.9992750\tScore Test: 0.9983415\nKernel: poly\tTime: 101.78 seconds\tScore Train: 0.9992466\tScore Test: 0.9981757\n"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"for kernel in ['linear', 'rbf', 'poly']:\n",
|
"for kernel in ['linear', 'rbf', 'poly']:\n",
|
||||||
" now = time.time()\n",
|
" now = time.time()\n",
|
||||||
" clf = BaggingClassifier(base_estimator=Stree(C=C, kernel=kernel, max_depth=max_depth, random_state=random_state), n_estimators=n_estimators, random_state=random_state)\n",
|
" clf = BaggingClassifier(base_estimator=Stree(C=C, kernel=kernel, max_depth=max_depth, random_state=random_state, max_iter=1e3), n_estimators=n_estimators, random_state=random_state)\n",
|
||||||
" clf.fit(Xtrain, ytrain)\n",
|
" clf.fit(Xtrain, ytrain)\n",
|
||||||
" score_train = clf.score(Xtrain, ytrain)\n",
|
" score_train = clf.score(Xtrain, ytrain)\n",
|
||||||
" score_test = clf.score(Xtest, ytest)\n",
|
" score_test = clf.score(Xtest, ytest)\n",
|
||||||
@@ -225,6 +206,11 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
"name": "ipython",
|
"name": "ipython",
|
||||||
@@ -235,14 +221,9 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.7.6-final"
|
"version": "3.8.2-final"
|
||||||
},
|
|
||||||
"orig_nbformat": 2,
|
|
||||||
"kernelspec": {
|
|
||||||
"name": "python37664bitgeneralvenve3128601eb614c5da59c5055670b6040",
|
|
||||||
"display_name": "Python 3.7.6 64-bit ('general': venv)"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 2
|
"nbformat_minor": 4
|
||||||
}
|
}
|
File diff suppressed because one or more lines are too long
@@ -18,64 +18,67 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 1,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"#\n",
|
"#\n",
|
||||||
"# Google Colab setup\n",
|
"# Google Colab setup\n",
|
||||||
"#\n",
|
"#\n",
|
||||||
"#!pip install git+https://github.com/doctorado-ml/stree"
|
"#!pip install git+https://github.com/doctorado-ml/stree\n",
|
||||||
|
"!pip install pandas"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "zIHKVxthDZEa",
|
"colab": {},
|
||||||
"colab_type": "code",
|
"colab_type": "code",
|
||||||
"colab": {}
|
"id": "zIHKVxthDZEa"
|
||||||
},
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
|
"import random\n",
|
||||||
|
"import os\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"import numpy as np\n",
|
||||||
"from sklearn.ensemble import AdaBoostClassifier\n",
|
"from sklearn.ensemble import AdaBoostClassifier\n",
|
||||||
"from sklearn.svm import LinearSVC\n",
|
"from sklearn.svm import LinearSVC\n",
|
||||||
"from sklearn.model_selection import GridSearchCV, train_test_split\n",
|
"from sklearn.model_selection import GridSearchCV, train_test_split\n",
|
||||||
"from stree import Stree"
|
"from stree import Stree"
|
||||||
],
|
]
|
||||||
"execution_count": 2,
|
|
||||||
"outputs": []
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "IEmq50QgDZEi",
|
"colab": {},
|
||||||
"colab_type": "code",
|
"colab_type": "code",
|
||||||
"colab": {}
|
"id": "IEmq50QgDZEi"
|
||||||
},
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import os\n",
|
|
||||||
"if not os.path.isfile('data/creditcard.csv'):\n",
|
"if not os.path.isfile('data/creditcard.csv'):\n",
|
||||||
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
||||||
" !tar xzf creditcard.tgz"
|
" !tar xzf creditcard.tgz"
|
||||||
],
|
]
|
||||||
"execution_count": 3,
|
|
||||||
"outputs": []
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "z9Q-YUfBDZEq",
|
|
||||||
"colab_type": "code",
|
|
||||||
"colab": {},
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "z9Q-YUfBDZEq",
|
||||||
"outputId": "afc822fb-f16a-4302-8a67-2b9e2880159b",
|
"outputId": "afc822fb-f16a-4302-8a67-2b9e2880159b",
|
||||||
"tags": []
|
"tags": []
|
||||||
},
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"random_state=1\n",
|
"random_state=1\n",
|
||||||
"\n",
|
"\n",
|
||||||
"def load_creditcard(n_examples=0):\n",
|
"def load_creditcard(n_examples=0):\n",
|
||||||
" import pandas as pd\n",
|
|
||||||
" import numpy as np\n",
|
|
||||||
" import random\n",
|
|
||||||
" df = pd.read_csv('data/creditcard.csv')\n",
|
" df = pd.read_csv('data/creditcard.csv')\n",
|
||||||
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
||||||
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
||||||
@@ -107,14 +110,6 @@
|
|||||||
"Xtest = data[1]\n",
|
"Xtest = data[1]\n",
|
||||||
"ytrain = data[2]\n",
|
"ytrain = data[2]\n",
|
||||||
"ytest = data[3]"
|
"ytest = data[3]"
|
||||||
],
|
|
||||||
"execution_count": 4,
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
|
||||||
"text": "Fraud: 0.173% 492\nValid: 99.827% 284315\nX.shape (1492, 28) y.shape (1492,)\nFraud: 32.976% 492\nValid: 67.024% 1000\n"
|
|
||||||
}
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -126,100 +121,120 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "HmX3kR4PDZEw",
|
"colab": {},
|
||||||
"colab_type": "code",
|
"colab_type": "code",
|
||||||
"colab": {}
|
"id": "HmX3kR4PDZEw"
|
||||||
},
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"parameters = {\n",
|
"parameters = [{\n",
|
||||||
" 'base_estimator': [Stree()],\n",
|
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
||||||
" 'n_estimators': [10, 25],\n",
|
" 'n_estimators': [10, 25],\n",
|
||||||
" 'learning_rate': [.5, 1],\n",
|
" 'learning_rate': [.5, 1],\n",
|
||||||
|
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||||
" 'base_estimator__tol': [.1, 1e-02],\n",
|
" 'base_estimator__tol': [.1, 1e-02],\n",
|
||||||
" 'base_estimator__max_depth': [3, 5],\n",
|
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
||||||
" 'base_estimator__C': [7, 55],\n",
|
" 'base_estimator__C': [1, 7, 55],\n",
|
||||||
" 'base_estimator__kernel': ['linear', 'poly', 'rbf']\n",
|
" 'base_estimator__kernel': ['linear']\n",
|
||||||
"}"
|
"},\n",
|
||||||
],
|
"{\n",
|
||||||
"execution_count": 5,
|
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
||||||
"outputs": []
|
" 'n_estimators': [10, 25],\n",
|
||||||
|
" 'learning_rate': [.5, 1],\n",
|
||||||
|
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||||
|
" 'base_estimator__tol': [.1, 1e-02],\n",
|
||||||
|
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
||||||
|
" 'base_estimator__C': [1, 7, 55],\n",
|
||||||
|
" 'base_estimator__degree': [3, 5, 7],\n",
|
||||||
|
" 'base_estimator__kernel': ['poly']\n",
|
||||||
|
"},\n",
|
||||||
|
"{\n",
|
||||||
|
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
||||||
|
" 'n_estimators': [10, 25],\n",
|
||||||
|
" 'learning_rate': [.5, 1],\n",
|
||||||
|
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||||
|
" 'base_estimator__tol': [.1, 1e-02],\n",
|
||||||
|
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
||||||
|
" 'base_estimator__C': [1, 7, 55],\n",
|
||||||
|
" 'base_estimator__gamma': [.1, 1, 10],\n",
|
||||||
|
" 'base_estimator__kernel': ['rbf']\n",
|
||||||
|
"}]"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 6,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"output_type": "execute_result",
|
|
||||||
"data": {
|
|
||||||
"text/plain": "{'C': 1.0,\n 'criterion': 'gini',\n 'degree': 3,\n 'gamma': 'scale',\n 'kernel': 'linear',\n 'max_depth': None,\n 'max_features': None,\n 'max_iter': 1000,\n 'min_samples_split': 0,\n 'random_state': None,\n 'split_criteria': 'max_samples',\n 'splitter': 'random',\n 'tol': 0.0001}"
|
|
||||||
},
|
|
||||||
"metadata": {},
|
|
||||||
"execution_count": 6
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"Stree().get_params()"
|
"Stree().get_params()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "CrcB8o6EDZE5",
|
|
||||||
"colab_type": "code",
|
|
||||||
"colab": {},
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "CrcB8o6EDZE5",
|
||||||
"outputId": "7703413a-d563-4289-a13b-532f38f82762",
|
"outputId": "7703413a-d563-4289-a13b-532f38f82762",
|
||||||
"tags": []
|
"tags": []
|
||||||
},
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"random_state=2020\n",
|
|
||||||
"clf = AdaBoostClassifier(random_state=random_state, algorithm=\"SAMME\")\n",
|
"clf = AdaBoostClassifier(random_state=random_state, algorithm=\"SAMME\")\n",
|
||||||
"grid = GridSearchCV(clf, parameters, verbose=10, n_jobs=-1, return_train_score=True)\n",
|
"grid = GridSearchCV(clf, parameters, verbose=5, n_jobs=-1, return_train_score=True)\n",
|
||||||
"grid.fit(Xtrain, ytrain)"
|
"grid.fit(Xtrain, ytrain)"
|
||||||
],
|
|
||||||
"execution_count": 7,
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
|
||||||
"text": "Fitting 5 folds for each of 96 candidates, totalling 480 fits\n[Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers.\n[Parallel(n_jobs=-1)]: Done 2 tasks | elapsed: 2.0s\n[Parallel(n_jobs=-1)]: Done 9 tasks | elapsed: 2.4s\n[Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 2.7s\n[Parallel(n_jobs=-1)]: Done 25 tasks | elapsed: 3.3s\n[Parallel(n_jobs=-1)]: Done 34 tasks | elapsed: 4.3s\n[Parallel(n_jobs=-1)]: Done 45 tasks | elapsed: 5.3s\n[Parallel(n_jobs=-1)]: Done 56 tasks | elapsed: 6.6s\n[Parallel(n_jobs=-1)]: Done 69 tasks | elapsed: 8.1s\n[Parallel(n_jobs=-1)]: Done 82 tasks | elapsed: 9.4s\n[Parallel(n_jobs=-1)]: Done 97 tasks | elapsed: 10.1s\n[Parallel(n_jobs=-1)]: Done 112 tasks | elapsed: 11.1s\n[Parallel(n_jobs=-1)]: Done 129 tasks | elapsed: 12.3s\n[Parallel(n_jobs=-1)]: Done 146 tasks | elapsed: 13.6s\n[Parallel(n_jobs=-1)]: Done 165 tasks | elapsed: 14.9s\n[Parallel(n_jobs=-1)]: Done 184 tasks | elapsed: 16.2s\n[Parallel(n_jobs=-1)]: Done 205 tasks | elapsed: 17.6s\n[Parallel(n_jobs=-1)]: Done 226 tasks | elapsed: 19.1s\n[Parallel(n_jobs=-1)]: Done 249 tasks | elapsed: 21.6s\n[Parallel(n_jobs=-1)]: Done 272 tasks | elapsed: 25.9s\n[Parallel(n_jobs=-1)]: Done 297 tasks | elapsed: 30.4s\n[Parallel(n_jobs=-1)]: Done 322 tasks | elapsed: 36.7s\n[Parallel(n_jobs=-1)]: Done 349 tasks | elapsed: 38.1s\n[Parallel(n_jobs=-1)]: Done 376 tasks | elapsed: 39.6s\n[Parallel(n_jobs=-1)]: Done 405 tasks | elapsed: 41.9s\n[Parallel(n_jobs=-1)]: Done 434 tasks | elapsed: 44.9s\n[Parallel(n_jobs=-1)]: Done 465 tasks | elapsed: 48.2s\n[Parallel(n_jobs=-1)]: Done 480 out of 480 | elapsed: 49.2s finished\n"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"output_type": "execute_result",
|
|
||||||
"data": {
|
|
||||||
"text/plain": "GridSearchCV(estimator=AdaBoostClassifier(algorithm='SAMME', random_state=2020),\n n_jobs=-1,\n param_grid={'base_estimator': [Stree(C=55, max_depth=3, tol=0.01)],\n 'base_estimator__C': [7, 55],\n 'base_estimator__kernel': ['linear', 'poly', 'rbf'],\n 'base_estimator__max_depth': [3, 5],\n 'base_estimator__tol': [0.1, 0.01],\n 'learning_rate': [0.5, 1], 'n_estimators': [10, 25]},\n return_train_score=True, verbose=10)"
|
|
||||||
},
|
|
||||||
"metadata": {},
|
|
||||||
"execution_count": 7
|
|
||||||
}
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "ZjX88NoYDZE8",
|
|
||||||
"colab_type": "code",
|
|
||||||
"colab": {},
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "ZjX88NoYDZE8",
|
||||||
"outputId": "285163c8-fa33-4915-8ae7-61c4f7844344",
|
"outputId": "285163c8-fa33-4915-8ae7-61c4f7844344",
|
||||||
"tags": []
|
"tags": []
|
||||||
},
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"Best estimator: \", grid.best_estimator_)\n",
|
"print(\"Best estimator: \", grid.best_estimator_)\n",
|
||||||
"print(\"Best hyperparameters: \", grid.best_params_)\n",
|
"print(\"Best hyperparameters: \", grid.best_params_)\n",
|
||||||
"print(\"Best accuracy: \", grid.best_score_)"
|
"print(\"Best accuracy: \", grid.best_score_)"
|
||||||
],
|
]
|
||||||
"execution_count": 8,
|
},
|
||||||
"outputs": [
|
|
||||||
{
|
{
|
||||||
"output_type": "stream",
|
"cell_type": "markdown",
|
||||||
"name": "stdout",
|
"metadata": {},
|
||||||
"text": "Best estimator: AdaBoostClassifier(algorithm='SAMME',\n base_estimator=Stree(C=55, max_depth=3, tol=0.01),\n learning_rate=0.5, n_estimators=25, random_state=2020)\nBest hyperparameters: {'base_estimator': Stree(C=55, max_depth=3, tol=0.01), 'base_estimator__C': 55, 'base_estimator__kernel': 'linear', 'base_estimator__max_depth': 3, 'base_estimator__tol': 0.01, 'learning_rate': 0.5, 'n_estimators': 25}\nBest accuracy: 0.9559440559440558\n"
|
"source": [
|
||||||
}
|
"Best estimator: AdaBoostClassifier(algorithm='SAMME',\n",
|
||||||
|
" base_estimator=Stree(C=55, max_depth=7, random_state=1,\n",
|
||||||
|
" split_criteria='max_samples', tol=0.1),\n",
|
||||||
|
" learning_rate=0.5, n_estimators=25, random_state=1)\n",
|
||||||
|
"Best hyperparameters: {'base_estimator': Stree(C=55, max_depth=7, random_state=1, split_criteria='max_samples', tol=0.1), 'base_estimator__C': 55, 'base_estimator__kernel': 'linear', 'base_estimator__max_depth': 7, 'base_estimator__split_criteria': 'max_samples', 'base_estimator__tol': 0.1, 'learning_rate': 0.5, 'n_estimators': 25}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Best accuracy: 0.9511777695988222"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
"colab": {
|
||||||
|
"name": "gridsearch.ipynb",
|
||||||
|
"provenance": []
|
||||||
|
},
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
"name": "ipython",
|
"name": "ipython",
|
||||||
@@ -230,18 +245,9 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.7.6-final"
|
"version": "3.8.2-final"
|
||||||
},
|
|
||||||
"orig_nbformat": 2,
|
|
||||||
"kernelspec": {
|
|
||||||
"name": "python37664bitgeneralvenvfbd0a23e74cf4e778460f5ffc6761f39",
|
|
||||||
"display_name": "Python 3.7.6 64-bit ('general': venv)"
|
|
||||||
},
|
|
||||||
"colab": {
|
|
||||||
"name": "gridsearch.ipynb",
|
|
||||||
"provenance": []
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 0
|
"nbformat_minor": 4
|
||||||
}
|
}
|
@@ -1,4 +1 @@
|
|||||||
numpy
|
scikit-learn>0.24
|
||||||
scikit-learn
|
|
||||||
pandas
|
|
||||||
ipympl
|
|
1
runtime.txt
Normal file
1
runtime.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
python-3.8
|
6
setup.py
6
setup.py
@@ -1,6 +1,6 @@
|
|||||||
import setuptools
|
import setuptools
|
||||||
|
|
||||||
__version__ = "0.9rc5"
|
__version__ = "1.0rc1"
|
||||||
__author__ = "Ricardo Montañana Gómez"
|
__author__ = "Ricardo Montañana Gómez"
|
||||||
|
|
||||||
|
|
||||||
@@ -25,12 +25,12 @@ setuptools.setup(
|
|||||||
classifiers=[
|
classifiers=[
|
||||||
"Development Status :: 4 - Beta",
|
"Development Status :: 4 - Beta",
|
||||||
"License :: OSI Approved :: MIT License",
|
"License :: OSI Approved :: MIT License",
|
||||||
"Programming Language :: Python :: 3.7",
|
"Programming Language :: Python :: 3.8",
|
||||||
"Natural Language :: English",
|
"Natural Language :: English",
|
||||||
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
||||||
"Intended Audience :: Science/Research",
|
"Intended Audience :: Science/Research",
|
||||||
],
|
],
|
||||||
install_requires=["scikit-learn>=0.23.0", "numpy", "ipympl"],
|
install_requires=["scikit-learn", "numpy", "ipympl"],
|
||||||
test_suite="stree.tests",
|
test_suite="stree.tests",
|
||||||
zip_safe=False,
|
zip_safe=False,
|
||||||
)
|
)
|
||||||
|
588
stree/Strees.py
588
stree/Strees.py
@@ -3,18 +3,20 @@ __author__ = "Ricardo Montañana Gómez"
|
|||||||
__copyright__ = "Copyright 2020, Ricardo Montañana Gómez"
|
__copyright__ = "Copyright 2020, Ricardo Montañana Gómez"
|
||||||
__license__ = "MIT"
|
__license__ = "MIT"
|
||||||
__version__ = "0.9"
|
__version__ = "0.9"
|
||||||
Build an oblique tree classifier based on SVM Trees
|
Build an oblique tree classifier based on SVM nodes
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import numbers
|
import numbers
|
||||||
import random
|
import random
|
||||||
import warnings
|
import warnings
|
||||||
from math import log
|
from math import log, factorial
|
||||||
from itertools import combinations
|
from typing import Optional
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from sklearn.base import BaseEstimator, ClassifierMixin
|
from sklearn.base import BaseEstimator, ClassifierMixin
|
||||||
from sklearn.svm import SVC, LinearSVC
|
from sklearn.svm import SVC, LinearSVC
|
||||||
|
from sklearn.feature_selection import SelectKBest
|
||||||
|
from sklearn.preprocessing import StandardScaler
|
||||||
from sklearn.utils import check_consistent_length
|
from sklearn.utils import check_consistent_length
|
||||||
from sklearn.utils.multiclass import check_classification_targets
|
from sklearn.utils.multiclass import check_classification_targets
|
||||||
from sklearn.exceptions import ConvergenceWarning
|
from sklearn.exceptions import ConvergenceWarning
|
||||||
@@ -41,6 +43,7 @@ class Snode:
|
|||||||
impurity: float,
|
impurity: float,
|
||||||
title: str,
|
title: str,
|
||||||
weight: np.ndarray = None,
|
weight: np.ndarray = None,
|
||||||
|
scaler: StandardScaler = None,
|
||||||
):
|
):
|
||||||
self._clf = clf
|
self._clf = clf
|
||||||
self._title = title
|
self._title = title
|
||||||
@@ -57,6 +60,8 @@ class Snode:
|
|||||||
)
|
)
|
||||||
self._features = features
|
self._features = features
|
||||||
self._impurity = impurity
|
self._impurity = impurity
|
||||||
|
self._partition_column: int = -1
|
||||||
|
self._scaler = scaler
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def copy(cls, node: "Snode") -> "Snode":
|
def copy(cls, node: "Snode") -> "Snode":
|
||||||
@@ -67,11 +72,43 @@ class Snode:
|
|||||||
node._features,
|
node._features,
|
||||||
node._impurity,
|
node._impurity,
|
||||||
node._title,
|
node._title,
|
||||||
|
node._sample_weight,
|
||||||
|
node._scaler,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def set_partition_column(self, col: int):
|
||||||
|
self._partition_column = col
|
||||||
|
|
||||||
|
def get_partition_column(self) -> int:
|
||||||
|
return self._partition_column
|
||||||
|
|
||||||
def set_down(self, son):
|
def set_down(self, son):
|
||||||
self._down = son
|
self._down = son
|
||||||
|
|
||||||
|
def set_title(self, title):
|
||||||
|
self._title = title
|
||||||
|
|
||||||
|
def set_classifier(self, clf):
|
||||||
|
self._clf = clf
|
||||||
|
|
||||||
|
def set_features(self, features):
|
||||||
|
self._features = features
|
||||||
|
|
||||||
|
def set_impurity(self, impurity):
|
||||||
|
self._impurity = impurity
|
||||||
|
|
||||||
|
def get_title(self) -> str:
|
||||||
|
return self._title
|
||||||
|
|
||||||
|
def get_classifier(self) -> SVC:
|
||||||
|
return self._clf
|
||||||
|
|
||||||
|
def get_impurity(self) -> float:
|
||||||
|
return self._impurity
|
||||||
|
|
||||||
|
def get_features(self) -> np.array:
|
||||||
|
return self._features
|
||||||
|
|
||||||
def set_up(self, son):
|
def set_up(self, son):
|
||||||
self._up = son
|
self._up = son
|
||||||
|
|
||||||
@@ -93,9 +130,8 @@ class Snode:
|
|||||||
classes, card = np.unique(self._y, return_counts=True)
|
classes, card = np.unique(self._y, return_counts=True)
|
||||||
if len(classes) > 1:
|
if len(classes) > 1:
|
||||||
max_card = max(card)
|
max_card = max(card)
|
||||||
min_card = min(card)
|
|
||||||
self._class = classes[card == max_card][0]
|
self._class = classes[card == max_card][0]
|
||||||
self._belief = max_card / (max_card + min_card)
|
self._belief = max_card / np.sum(card)
|
||||||
else:
|
else:
|
||||||
self._belief = 1
|
self._belief = 1
|
||||||
try:
|
try:
|
||||||
@@ -104,24 +140,23 @@ class Snode:
|
|||||||
self._class = None
|
self._class = None
|
||||||
|
|
||||||
def __str__(self) -> str:
|
def __str__(self) -> str:
|
||||||
if self.is_leaf():
|
|
||||||
count_values = np.unique(self._y, return_counts=True)
|
count_values = np.unique(self._y, return_counts=True)
|
||||||
result = (
|
if self.is_leaf():
|
||||||
|
return (
|
||||||
f"{self._title} - Leaf class={self._class} belief="
|
f"{self._title} - Leaf class={self._class} belief="
|
||||||
f"{self._belief: .6f} impurity={self._impurity:.4f} "
|
f"{self._belief: .6f} impurity={self._impurity:.4f} "
|
||||||
f"counts={count_values}"
|
f"counts={count_values}"
|
||||||
)
|
)
|
||||||
return result
|
|
||||||
else:
|
else:
|
||||||
return (
|
return (
|
||||||
f"{self._title} feaures={self._features} impurity="
|
f"{self._title} feaures={self._features} impurity="
|
||||||
f"{self._impurity:.4f}"
|
f"{self._impurity:.4f} "
|
||||||
|
f"counts={count_values}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class Siterator:
|
class Siterator:
|
||||||
"""Stree preorder iterator
|
"""Stree preorder iterator"""
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, tree: Snode):
|
def __init__(self, tree: Snode):
|
||||||
self._stack = []
|
self._stack = []
|
||||||
@@ -145,10 +180,11 @@ class Splitter:
|
|||||||
self,
|
self,
|
||||||
clf: SVC = None,
|
clf: SVC = None,
|
||||||
criterion: str = None,
|
criterion: str = None,
|
||||||
splitter_type: str = None,
|
feature_select: str = None,
|
||||||
criteria: str = None,
|
criteria: str = None,
|
||||||
min_samples_split: int = None,
|
min_samples_split: int = None,
|
||||||
random_state=None,
|
random_state=None,
|
||||||
|
normalize=False,
|
||||||
):
|
):
|
||||||
self._clf = clf
|
self._clf = clf
|
||||||
self._random_state = random_state
|
self._random_state = random_state
|
||||||
@@ -157,7 +193,8 @@ class Splitter:
|
|||||||
self._criterion = criterion
|
self._criterion = criterion
|
||||||
self._min_samples_split = min_samples_split
|
self._min_samples_split = min_samples_split
|
||||||
self._criteria = criteria
|
self._criteria = criteria
|
||||||
self._splitter_type = splitter_type
|
self._feature_select = feature_select
|
||||||
|
self._normalize = normalize
|
||||||
|
|
||||||
if clf is None:
|
if clf is None:
|
||||||
raise ValueError(f"clf has to be a sklearn estimator, got({clf})")
|
raise ValueError(f"clf has to be a sklearn estimator, got({clf})")
|
||||||
@@ -167,20 +204,23 @@ class Splitter:
|
|||||||
f"criterion must be gini or entropy got({criterion})"
|
f"criterion must be gini or entropy got({criterion})"
|
||||||
)
|
)
|
||||||
|
|
||||||
if criteria not in ["min_distance", "max_samples", "max_distance"]:
|
if criteria not in [
|
||||||
|
"max_samples",
|
||||||
|
"impurity",
|
||||||
|
]:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"split_criteria has to be min_distance "
|
f"criteria has to be max_samples or impurity; got ({criteria})"
|
||||||
f"max_distance or max_samples got ({criteria})"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if splitter_type not in ["random", "best"]:
|
if feature_select not in ["random", "best"]:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"splitter must be either random or best got({splitter_type})"
|
"splitter must be either random or best, got "
|
||||||
|
f"({feature_select})"
|
||||||
)
|
)
|
||||||
self.criterion_function = getattr(self, f"_{self._criterion}")
|
self.criterion_function = getattr(self, f"_{self._criterion}")
|
||||||
self.decision_criteria = getattr(self, f"_{self._criteria}")
|
self.decision_criteria = getattr(self, f"_{self._criteria}")
|
||||||
|
|
||||||
def impurity(self, y: np.array) -> np.array:
|
def partition_impurity(self, y: np.array) -> np.array:
|
||||||
return self.criterion_function(y)
|
return self.criterion_function(y)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -190,6 +230,18 @@ class Splitter:
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _entropy(y: np.array) -> float:
|
def _entropy(y: np.array) -> float:
|
||||||
|
"""Compute entropy of a labels set
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
y : np.array
|
||||||
|
set of labels
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
float
|
||||||
|
entropy
|
||||||
|
"""
|
||||||
n_labels = len(y)
|
n_labels = len(y)
|
||||||
if n_labels <= 1:
|
if n_labels <= 1:
|
||||||
return 0
|
return 0
|
||||||
@@ -208,6 +260,22 @@ class Splitter:
|
|||||||
def information_gain(
|
def information_gain(
|
||||||
self, labels: np.array, labels_up: np.array, labels_dn: np.array
|
self, labels: np.array, labels_up: np.array, labels_dn: np.array
|
||||||
) -> float:
|
) -> float:
|
||||||
|
"""Compute information gain of a split candidate
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
labels : np.array
|
||||||
|
labels of the dataset
|
||||||
|
labels_up : np.array
|
||||||
|
labels of one side
|
||||||
|
labels_dn : np.array
|
||||||
|
labels on the other side
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
float
|
||||||
|
information gain
|
||||||
|
"""
|
||||||
imp_prev = self.criterion_function(labels)
|
imp_prev = self.criterion_function(labels)
|
||||||
card_up = card_dn = imp_up = imp_dn = 0
|
card_up = card_dn = imp_up = imp_dn = 0
|
||||||
if labels_up is not None:
|
if labels_up is not None:
|
||||||
@@ -238,7 +306,7 @@ class Splitter:
|
|||||||
node = Snode(
|
node = Snode(
|
||||||
self._clf, dataset, labels, feature_set, 0.0, "subset"
|
self._clf, dataset, labels, feature_set, 0.0, "subset"
|
||||||
)
|
)
|
||||||
self.partition(dataset, node)
|
self.partition(dataset, node, train=True)
|
||||||
y1, y2 = self.part(labels)
|
y1, y2 = self.part(labels)
|
||||||
gain = self.information_gain(labels, y1, y2)
|
gain = self.information_gain(labels, y1, y2)
|
||||||
if gain > max_gain:
|
if gain > max_gain:
|
||||||
@@ -246,124 +314,210 @@ class Splitter:
|
|||||||
selected = feature_set
|
selected = feature_set
|
||||||
return selected if selected is not None else feature_set
|
return selected if selected is not None else feature_set
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _generate_spaces(features: int, max_features: int) -> list:
|
||||||
|
"""Generate at most 5 feature random combinations
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
features : int
|
||||||
|
number of features in each combination
|
||||||
|
max_features : int
|
||||||
|
number of features in dataset
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
list
|
||||||
|
list with up to 5 combination of features randomly selected
|
||||||
|
"""
|
||||||
|
comb = set()
|
||||||
|
# Generate at most 5 combinations
|
||||||
|
number = factorial(features) / (
|
||||||
|
factorial(max_features) * factorial(features - max_features)
|
||||||
|
)
|
||||||
|
set_length = min(5, number)
|
||||||
|
while len(comb) < set_length:
|
||||||
|
comb.add(
|
||||||
|
tuple(sorted(random.sample(range(features), max_features)))
|
||||||
|
)
|
||||||
|
return list(comb)
|
||||||
|
|
||||||
def _get_subspaces_set(
|
def _get_subspaces_set(
|
||||||
self, dataset: np.array, labels: np.array, max_features: int
|
self, dataset: np.array, labels: np.array, max_features: int
|
||||||
) -> np.array:
|
) -> tuple:
|
||||||
features = range(dataset.shape[1])
|
"""Compute the indices of the features selected by splitter depending
|
||||||
features_sets = list(combinations(features, max_features))
|
on the self._feature_select hyper parameter
|
||||||
if len(features_sets) > 1:
|
|
||||||
if self._splitter_type == "random":
|
Parameters
|
||||||
index = random.randint(0, len(features_sets) - 1)
|
----------
|
||||||
return features_sets[index]
|
dataset : np.array
|
||||||
else:
|
array of samples
|
||||||
# get only 3 sets at most
|
labels : np.array
|
||||||
if len(features_sets) > 3:
|
labels of the dataset
|
||||||
features_sets = random.sample(features_sets, 3)
|
max_features : int
|
||||||
|
number of features of the subspace
|
||||||
|
(<= number of features in dataset)
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
tuple
|
||||||
|
indices of the features selected
|
||||||
|
"""
|
||||||
|
if dataset.shape[1] == max_features:
|
||||||
|
# No feature reduction applies
|
||||||
|
return tuple(range(dataset.shape[1]))
|
||||||
|
if self._feature_select == "random":
|
||||||
|
features_sets = self._generate_spaces(
|
||||||
|
dataset.shape[1], max_features
|
||||||
|
)
|
||||||
return self._select_best_set(dataset, labels, features_sets)
|
return self._select_best_set(dataset, labels, features_sets)
|
||||||
else:
|
# Take KBest features
|
||||||
return features_sets[0]
|
return (
|
||||||
|
SelectKBest(k=max_features)
|
||||||
|
.fit(dataset, labels)
|
||||||
|
.get_support(indices=True)
|
||||||
|
)
|
||||||
|
|
||||||
def get_subspace(
|
def get_subspace(
|
||||||
self, dataset: np.array, labels: np.array, max_features: int
|
self, dataset: np.array, labels: np.array, max_features: int
|
||||||
) -> list:
|
) -> tuple:
|
||||||
"""Return the best subspace to make a split
|
"""Re3turn a subspace of the selected dataset of max_features length.
|
||||||
|
Depending on hyperparmeter
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
dataset : np.array
|
||||||
|
array of samples (# samples, # features)
|
||||||
|
labels : np.array
|
||||||
|
labels of the dataset
|
||||||
|
max_features : int
|
||||||
|
number of features to form the subspace
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
tuple
|
||||||
|
tuple with the dataset with only the features selected and the
|
||||||
|
indices of the features selected
|
||||||
"""
|
"""
|
||||||
indices = self._get_subspaces_set(dataset, labels, max_features)
|
indices = self._get_subspaces_set(dataset, labels, max_features)
|
||||||
return dataset[:, indices], indices
|
return dataset[:, indices], indices
|
||||||
|
|
||||||
@staticmethod
|
def _impurity(self, data: np.array, y: np.array) -> np.array:
|
||||||
def _min_distance(data: np.array, _) -> np.array:
|
"""return column of dataset to be taken into account to split dataset
|
||||||
"""Assign class to min distances
|
|
||||||
|
|
||||||
return a vector of classes so partition can separate class 0 from
|
Parameters
|
||||||
the rest of classes, ie. class 0 goes to one splitted node and the
|
----------
|
||||||
rest of classes go to the other
|
data : np.array
|
||||||
:param data: distances to hyper plane of every class
|
distances to hyper plane of every class
|
||||||
:type data: np.array (m, n_classes)
|
y : np.array
|
||||||
:param _: enable call compat with other measures
|
vector of labels (classes)
|
||||||
:type _: None
|
|
||||||
:return: vector with the class assigned to each sample
|
Returns
|
||||||
:rtype: np.array shape (m,)
|
-------
|
||||||
|
np.array
|
||||||
|
column of dataset to be taken into account to split dataset
|
||||||
"""
|
"""
|
||||||
return np.argmin(data, axis=1)
|
max_gain = 0
|
||||||
|
selected = -1
|
||||||
@staticmethod
|
for col in range(data.shape[1]):
|
||||||
def _max_distance(data: np.array, _) -> np.array:
|
tup = y[data[:, col] > 0]
|
||||||
"""Assign class to max distances
|
tdn = y[data[:, col] <= 0]
|
||||||
|
info_gain = self.information_gain(y, tup, tdn)
|
||||||
return a vector of classes so partition can separate class 0 from
|
if info_gain > max_gain:
|
||||||
the rest of classes, ie. class 0 goes to one splitted node and the
|
selected = col
|
||||||
rest of classes go to the other
|
max_gain = info_gain
|
||||||
:param data: distances to hyper plane of every class
|
return selected
|
||||||
:type data: np.array (m, n_classes)
|
|
||||||
:param _: enable call compat with other measures
|
|
||||||
:type _: None
|
|
||||||
:return: vector with the class assigned to each sample values
|
|
||||||
(can be 0, 1, ...)
|
|
||||||
:rtype: np.array shape (m,)
|
|
||||||
"""
|
|
||||||
return np.argmax(data, axis=1)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _max_samples(data: np.array, y: np.array) -> np.array:
|
def _max_samples(data: np.array, y: np.array) -> np.array:
|
||||||
"""return distances of the class with more samples
|
"""return column of dataset to be taken into account to split dataset
|
||||||
|
|
||||||
:param data: distances to hyper plane of every class
|
Parameters
|
||||||
:type data: np.array (m, n_classes)
|
----------
|
||||||
:param y: vector of labels (classes)
|
data : np.array
|
||||||
:type y: np.array (m,)
|
distances to hyper plane of every class
|
||||||
:return: vector with distances to hyperplane (can be positive or neg.)
|
y : np.array
|
||||||
:rtype: np.array shape (m,)
|
column of dataset to be taken into account to split dataset
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
np.array
|
||||||
|
column of dataset to be taken into account to split dataset
|
||||||
"""
|
"""
|
||||||
# select the class with max number of samples
|
# select the class with max number of samples
|
||||||
_, samples = np.unique(y, return_counts=True)
|
_, samples = np.unique(y, return_counts=True)
|
||||||
selected = np.argmax(samples)
|
return np.argmax(samples)
|
||||||
return data[:, selected]
|
|
||||||
|
|
||||||
def partition(self, samples: np.array, node: Snode):
|
def partition(self, samples: np.array, node: Snode, train: bool):
|
||||||
"""Set the criteria to split arrays. Compute the indices of the samples
|
"""Set the criteria to split arrays. Compute the indices of the samples
|
||||||
that should go to one side of the tree (down)
|
that should go to one side of the tree (up)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
# data contains the distances of every sample to every class hyperplane
|
||||||
|
# array of (m, nc) nc = # classes
|
||||||
data = self._distances(node, samples)
|
data = self._distances(node, samples)
|
||||||
if data.shape[0] < self._min_samples_split:
|
if data.shape[0] < self._min_samples_split:
|
||||||
self._down = np.ones((data.shape[0]), dtype=bool)
|
# there aren't enough samples to split
|
||||||
|
self._up = np.ones((data.shape[0]), dtype=bool)
|
||||||
return
|
return
|
||||||
if data.ndim > 1:
|
if data.ndim > 1:
|
||||||
# split criteria for multiclass
|
# split criteria for multiclass
|
||||||
data = self.decision_criteria(data, node._y)
|
# Convert data to a (m, 1) array selecting values for samples
|
||||||
self._down = data > 0
|
if train:
|
||||||
|
# in train time we have to compute the column to take into
|
||||||
@staticmethod
|
# account to split the dataset
|
||||||
def _distances(node: Snode, data: np.ndarray) -> np.array:
|
col = self.decision_criteria(data, node._y)
|
||||||
"""Compute distances of the samples to the hyperplane of the node
|
node.set_partition_column(col)
|
||||||
|
else:
|
||||||
:param node: node containing the svm classifier
|
# in predcit time just use the column computed in train time
|
||||||
:type node: Snode
|
# is taking the classifier of class <col>
|
||||||
:param data: samples to find out distance to hyperplane
|
col = node.get_partition_column()
|
||||||
:type data: np.ndarray
|
if col == -1:
|
||||||
:return: array of shape (m, 1) with the distances of every sample to
|
# No partition is producing information gain
|
||||||
the hyperplane of the node
|
data = np.ones(data.shape)
|
||||||
:rtype: np.array
|
data = data[:, col]
|
||||||
"""
|
self._up = data > 0
|
||||||
return node._clf.decision_function(data[:, node._features])
|
|
||||||
|
|
||||||
def part(self, origin: np.array) -> list:
|
def part(self, origin: np.array) -> list:
|
||||||
"""Split an array in two based on indices (down) and its complement
|
"""Split an array in two based on indices (self._up) and its complement
|
||||||
|
partition has to be called first to establish up indices
|
||||||
|
|
||||||
:param origin: dataset to split
|
Parameters
|
||||||
:type origin: np.array
|
----------
|
||||||
:param down: indices to use to split array
|
origin : np.array
|
||||||
:type down: np.array
|
dataset to split
|
||||||
:return: list with two splits of the array
|
|
||||||
:rtype: list
|
Returns
|
||||||
|
-------
|
||||||
|
list
|
||||||
|
list with two splits of the array
|
||||||
"""
|
"""
|
||||||
up = ~self._down
|
down = ~self._up
|
||||||
return [
|
return [
|
||||||
origin[up] if any(up) else None,
|
origin[self._up] if any(self._up) else None,
|
||||||
origin[self._down] if any(self._down) else None,
|
origin[down] if any(down) else None,
|
||||||
]
|
]
|
||||||
|
|
||||||
|
def _distances(self, node: Snode, data: np.ndarray) -> np.array:
|
||||||
|
"""Compute distances of the samples to the hyperplane of the node
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
node : Snode
|
||||||
|
node containing the svm classifier
|
||||||
|
data : np.ndarray
|
||||||
|
samples to compute distance to hyperplane
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
np.array
|
||||||
|
array of shape (m, nc) with the distances of every sample to
|
||||||
|
the hyperplane of every class. nc = # of classes
|
||||||
|
"""
|
||||||
|
X_transformed = data[:, node._features]
|
||||||
|
if self._normalize:
|
||||||
|
X_transformed = node._scaler.transform(X_transformed)
|
||||||
|
return node._clf.decision_function(X_transformed)
|
||||||
|
|
||||||
|
|
||||||
class Stree(BaseEstimator, ClassifierMixin):
|
class Stree(BaseEstimator, ClassifierMixin):
|
||||||
"""Estimator that is based on binary trees of svm nodes
|
"""Estimator that is based on binary trees of svm nodes
|
||||||
@@ -377,17 +531,18 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
self,
|
self,
|
||||||
C: float = 1.0,
|
C: float = 1.0,
|
||||||
kernel: str = "linear",
|
kernel: str = "linear",
|
||||||
max_iter: int = 1000,
|
max_iter: int = 1e5,
|
||||||
random_state: int = None,
|
random_state: int = None,
|
||||||
max_depth: int = None,
|
max_depth: int = None,
|
||||||
tol: float = 1e-4,
|
tol: float = 1e-4,
|
||||||
degree: int = 3,
|
degree: int = 3,
|
||||||
gamma="scale",
|
gamma="scale",
|
||||||
split_criteria: str = "max_samples",
|
split_criteria: str = "impurity",
|
||||||
criterion: str = "gini",
|
criterion: str = "entropy",
|
||||||
min_samples_split: int = 0,
|
min_samples_split: int = 0,
|
||||||
max_features=None,
|
max_features=None,
|
||||||
splitter: str = "random",
|
splitter: str = "random",
|
||||||
|
normalize: bool = False,
|
||||||
):
|
):
|
||||||
self.max_iter = max_iter
|
self.max_iter = max_iter
|
||||||
self.C = C
|
self.C = C
|
||||||
@@ -402,9 +557,11 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
self.max_features = max_features
|
self.max_features = max_features
|
||||||
self.criterion = criterion
|
self.criterion = criterion
|
||||||
self.splitter = splitter
|
self.splitter = splitter
|
||||||
|
self.normalize = normalize
|
||||||
|
|
||||||
def _more_tags(self) -> dict:
|
def _more_tags(self) -> dict:
|
||||||
"""Required by sklearn to supply features of the classifier
|
"""Required by sklearn to supply features of the classifier
|
||||||
|
make mandatory the labels array
|
||||||
|
|
||||||
:return: the tag required
|
:return: the tag required
|
||||||
:rtype: dict
|
:rtype: dict
|
||||||
@@ -416,16 +573,19 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
) -> "Stree":
|
) -> "Stree":
|
||||||
"""Build the tree based on the dataset of samples and its labels
|
"""Build the tree based on the dataset of samples and its labels
|
||||||
|
|
||||||
:param X: dataset of samples to make predictions
|
Returns
|
||||||
:type X: np.array
|
-------
|
||||||
:param y: samples labels
|
Stree
|
||||||
:type y: np.array
|
itself to be able to chain actions: fit().predict() ...
|
||||||
:param sample_weight: weights of the samples. Rescale C per sample.
|
|
||||||
Hi' weights force the classifier to put more emphasis on these points
|
Raises
|
||||||
:type sample_weight: np.array optional
|
------
|
||||||
:raises ValueError: if parameters C or max_depth are out of bounds
|
ValueError
|
||||||
:return: itself to be able to chain actions: fit().predict() ...
|
if C < 0
|
||||||
:rtype: Stree
|
ValueError
|
||||||
|
if max_depth < 1
|
||||||
|
ValueError
|
||||||
|
if all samples have 0 or negative weights
|
||||||
"""
|
"""
|
||||||
# Check parameters are Ok.
|
# Check parameters are Ok.
|
||||||
if self.C < 0:
|
if self.C < 0:
|
||||||
@@ -448,15 +608,20 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
sample_weight = _check_sample_weight(
|
sample_weight = _check_sample_weight(
|
||||||
sample_weight, X, dtype=np.float64
|
sample_weight, X, dtype=np.float64
|
||||||
)
|
)
|
||||||
|
if not any(sample_weight):
|
||||||
|
raise ValueError(
|
||||||
|
"Invalid input - all samples have zero or negative weights."
|
||||||
|
)
|
||||||
check_classification_targets(y)
|
check_classification_targets(y)
|
||||||
# Initialize computed parameters
|
# Initialize computed parameters
|
||||||
self.splitter_ = Splitter(
|
self.splitter_ = Splitter(
|
||||||
clf=self._build_clf(),
|
clf=self._build_clf(),
|
||||||
criterion=self.criterion,
|
criterion=self.criterion,
|
||||||
splitter_type=self.splitter,
|
feature_select=self.splitter,
|
||||||
criteria=self.split_criteria,
|
criteria=self.split_criteria,
|
||||||
random_state=self.random_state,
|
random_state=self.random_state,
|
||||||
min_samples_split=self.min_samples_split,
|
min_samples_split=self.min_samples_split,
|
||||||
|
normalize=self.normalize,
|
||||||
)
|
)
|
||||||
if self.random_state is not None:
|
if self.random_state is not None:
|
||||||
random.seed(self.random_state)
|
random.seed(self.random_state)
|
||||||
@@ -469,6 +634,8 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
self.max_features_ = self._initialize_max_features()
|
self.max_features_ = self._initialize_max_features()
|
||||||
self.tree_ = self.train(X, y, sample_weight, 1, "root")
|
self.tree_ = self.train(X, y, sample_weight, 1, "root")
|
||||||
self._build_predictor()
|
self._build_predictor()
|
||||||
|
self.X_ = X
|
||||||
|
self.y_ = y
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def train(
|
def train(
|
||||||
@@ -478,74 +645,73 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
sample_weight: np.ndarray,
|
sample_weight: np.ndarray,
|
||||||
depth: int,
|
depth: int,
|
||||||
title: str,
|
title: str,
|
||||||
) -> Snode:
|
) -> Optional[Snode]:
|
||||||
"""Recursive function to split the original dataset into predictor
|
"""Recursive function to split the original dataset into predictor
|
||||||
nodes (leaves)
|
nodes (leaves)
|
||||||
|
|
||||||
:param X: samples dataset
|
Parameters
|
||||||
:type X: np.ndarray
|
----------
|
||||||
:param y: samples labels
|
X : np.ndarray
|
||||||
:type y: np.ndarray
|
samples dataset
|
||||||
:param sample_weight: weight of samples. Rescale C per sample.
|
y : np.ndarray
|
||||||
Hi weights force the classifier to put more emphasis on these points.
|
samples labels
|
||||||
:type sample_weight: np.ndarray
|
sample_weight : np.ndarray
|
||||||
:param depth: actual depth in the tree
|
weight of samples. Rescale C per sample.
|
||||||
:type depth: int
|
depth : int
|
||||||
:param title: description of the node
|
actual depth in the tree
|
||||||
:type title: str
|
title : str
|
||||||
:return: binary tree
|
description of the node
|
||||||
:rtype: Snode
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
Optional[Snode]
|
||||||
|
binary tree
|
||||||
"""
|
"""
|
||||||
if depth > self.__max_depth:
|
if depth > self.__max_depth:
|
||||||
return None
|
return None
|
||||||
|
# Mask samples with 0 weight
|
||||||
|
if any(sample_weight == 0):
|
||||||
|
indices_zero = sample_weight == 0
|
||||||
|
X = X[~indices_zero, :]
|
||||||
|
y = y[~indices_zero]
|
||||||
|
sample_weight = sample_weight[~indices_zero]
|
||||||
|
self.depth_ = max(depth, self.depth_)
|
||||||
|
scaler = StandardScaler()
|
||||||
|
node = Snode(None, X, y, X.shape[1], 0.0, title, sample_weight, scaler)
|
||||||
if np.unique(y).shape[0] == 1:
|
if np.unique(y).shape[0] == 1:
|
||||||
# only 1 class => pure dataset
|
# only 1 class => pure dataset
|
||||||
return Snode(
|
node.set_title(title + ", <pure>")
|
||||||
clf=None,
|
return node
|
||||||
X=X,
|
|
||||||
y=y,
|
|
||||||
features=X.shape[1],
|
|
||||||
impurity=0.0,
|
|
||||||
title=title + ", <pure>",
|
|
||||||
weight=sample_weight,
|
|
||||||
)
|
|
||||||
# Train the model
|
# Train the model
|
||||||
clf = self._build_clf()
|
clf = self._build_clf()
|
||||||
Xs, features = self.splitter_.get_subspace(X, y, self.max_features_)
|
Xs, features = self.splitter_.get_subspace(X, y, self.max_features_)
|
||||||
# solve WARNING: class label 0 specified in weight is not found
|
if self.normalize:
|
||||||
# in bagging
|
scaler.fit(Xs)
|
||||||
if any(sample_weight == 0):
|
Xs = scaler.transform(Xs)
|
||||||
indices = sample_weight == 0
|
|
||||||
y_next = y[~indices]
|
|
||||||
# touch weights if removing any class
|
|
||||||
if np.unique(y_next).shape[0] != self.n_classes_:
|
|
||||||
sample_weight += 1e-5
|
|
||||||
clf.fit(Xs, y, sample_weight=sample_weight)
|
clf.fit(Xs, y, sample_weight=sample_weight)
|
||||||
impurity = self.splitter_.impurity(y)
|
node.set_impurity(self.splitter_.partition_impurity(y))
|
||||||
node = Snode(clf, X, y, features, impurity, title, sample_weight)
|
node.set_classifier(clf)
|
||||||
self.depth_ = max(depth, self.depth_)
|
node.set_features(features)
|
||||||
self.splitter_.partition(X, node)
|
self.splitter_.partition(X, node, True)
|
||||||
X_U, X_D = self.splitter_.part(X)
|
X_U, X_D = self.splitter_.part(X)
|
||||||
y_u, y_d = self.splitter_.part(y)
|
y_u, y_d = self.splitter_.part(y)
|
||||||
sw_u, sw_d = self.splitter_.part(sample_weight)
|
sw_u, sw_d = self.splitter_.part(sample_weight)
|
||||||
if X_U is None or X_D is None:
|
if X_U is None or X_D is None:
|
||||||
# didn't part anything
|
# didn't part anything
|
||||||
return Snode(
|
node.set_title(title + ", <cgaf>")
|
||||||
clf,
|
return node
|
||||||
X,
|
node.set_up(
|
||||||
y,
|
self.train(X_U, y_u, sw_u, depth + 1, title + f" - Up({depth+1})")
|
||||||
features=X.shape[1],
|
)
|
||||||
impurity=impurity,
|
node.set_down(
|
||||||
title=title + ", <cgaf>",
|
self.train(
|
||||||
weight=sample_weight,
|
X_D, y_d, sw_d, depth + 1, title + f" - Down({depth+1})"
|
||||||
|
)
|
||||||
)
|
)
|
||||||
node.set_up(self.train(X_U, y_u, sw_u, depth + 1, title + " - Up"))
|
|
||||||
node.set_down(self.train(X_D, y_d, sw_d, depth + 1, title + " - Down"))
|
|
||||||
return node
|
return node
|
||||||
|
|
||||||
def _build_predictor(self):
|
def _build_predictor(self):
|
||||||
"""Process the leaves to make them predictors
|
"""Process the leaves to make them predictors"""
|
||||||
"""
|
|
||||||
|
|
||||||
def run_tree(node: Snode):
|
def run_tree(node: Snode):
|
||||||
if node.is_leaf():
|
if node.is_leaf():
|
||||||
@@ -557,8 +723,7 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
run_tree(self.tree_)
|
run_tree(self.tree_)
|
||||||
|
|
||||||
def _build_clf(self):
|
def _build_clf(self):
|
||||||
""" Build the correct classifier for the node
|
"""Build the correct classifier for the node"""
|
||||||
"""
|
|
||||||
return (
|
return (
|
||||||
LinearSVC(
|
LinearSVC(
|
||||||
max_iter=self.max_iter,
|
max_iter=self.max_iter,
|
||||||
@@ -581,12 +746,17 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
def _reorder_results(y: np.array, indices: np.array) -> np.array:
|
def _reorder_results(y: np.array, indices: np.array) -> np.array:
|
||||||
"""Reorder an array based on the array of indices passed
|
"""Reorder an array based on the array of indices passed
|
||||||
|
|
||||||
:param y: data untidy
|
Parameters
|
||||||
:type y: np.array
|
----------
|
||||||
:param indices: indices used to set order
|
y : np.array
|
||||||
:type indices: np.array
|
data untidy
|
||||||
:return: array y ordered
|
indices : np.array
|
||||||
:rtype: np.array
|
indices used to set order
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
np.array
|
||||||
|
array y ordered
|
||||||
"""
|
"""
|
||||||
# return array of same type given in y
|
# return array of same type given in y
|
||||||
y_ordered = y.copy()
|
y_ordered = y.copy()
|
||||||
@@ -598,10 +768,22 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
def predict(self, X: np.array) -> np.array:
|
def predict(self, X: np.array) -> np.array:
|
||||||
"""Predict labels for each sample in dataset passed
|
"""Predict labels for each sample in dataset passed
|
||||||
|
|
||||||
:param X: dataset of samples
|
Parameters
|
||||||
:type X: np.array
|
----------
|
||||||
:return: array of labels
|
X : np.array
|
||||||
:rtype: np.array
|
dataset of samples
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
np.array
|
||||||
|
array of labels
|
||||||
|
|
||||||
|
Raises
|
||||||
|
------
|
||||||
|
ValueError
|
||||||
|
if dataset with inconsistent number of features
|
||||||
|
NotFittedError
|
||||||
|
if model is not fitted
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def predict_class(
|
def predict_class(
|
||||||
@@ -613,7 +795,7 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
# set a class for every sample in dataset
|
# set a class for every sample in dataset
|
||||||
prediction = np.full((xp.shape[0], 1), node._class)
|
prediction = np.full((xp.shape[0], 1), node._class)
|
||||||
return prediction, indices
|
return prediction, indices
|
||||||
self.splitter_.partition(xp, node)
|
self.splitter_.partition(xp, node, train=False)
|
||||||
x_u, x_d = self.splitter_.part(xp)
|
x_u, x_d = self.splitter_.part(xp)
|
||||||
i_u, i_d = self.splitter_.part(indices)
|
i_u, i_d = self.splitter_.part(indices)
|
||||||
prx_u, prin_u = predict_class(x_u, i_u, node.get_up())
|
prx_u, prin_u = predict_class(x_u, i_u, node.get_up())
|
||||||
@@ -643,15 +825,19 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
) -> float:
|
) -> float:
|
||||||
"""Compute accuracy of the prediction
|
"""Compute accuracy of the prediction
|
||||||
|
|
||||||
:param X: dataset of samples to make predictions
|
Parameters
|
||||||
:type X: np.array
|
----------
|
||||||
:param y_true: samples labels
|
X : np.array
|
||||||
:type y_true: np.array
|
dataset of samples to make predictions
|
||||||
:param sample_weight: weights of the samples. Rescale C per sample.
|
y : np.array
|
||||||
Hi' weights force the classifier to put more emphasis on these points
|
samples labels
|
||||||
:type sample_weight: np.array optional
|
sample_weight : np.array, optional
|
||||||
:return: accuracy of the prediction
|
weights of the samples. Rescale C per sample, by default None
|
||||||
:rtype: float
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
float
|
||||||
|
accuracy of the prediction
|
||||||
"""
|
"""
|
||||||
# sklearn check
|
# sklearn check
|
||||||
check_is_fitted(self)
|
check_is_fitted(self)
|
||||||
@@ -664,12 +850,30 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
score = y_true == y_pred
|
score = y_true == y_pred
|
||||||
return _weighted_sum(score, sample_weight, normalize=True)
|
return _weighted_sum(score, sample_weight, normalize=True)
|
||||||
|
|
||||||
|
def nodes_leaves(self) -> tuple:
|
||||||
|
"""Compute the number of nodes and leaves in the built tree
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
[tuple]
|
||||||
|
tuple with the number of nodes and the number of leaves
|
||||||
|
"""
|
||||||
|
nodes = 0
|
||||||
|
leaves = 0
|
||||||
|
for node in self:
|
||||||
|
nodes += 1
|
||||||
|
if node.is_leaf():
|
||||||
|
leaves += 1
|
||||||
|
return nodes, leaves
|
||||||
|
|
||||||
def __iter__(self) -> Siterator:
|
def __iter__(self) -> Siterator:
|
||||||
"""Create an iterator to be able to visit the nodes of the tree in
|
"""Create an iterator to be able to visit the nodes of the tree in
|
||||||
preorder, can make a list with all the nodes in preorder
|
preorder, can make a list with all the nodes in preorder
|
||||||
|
|
||||||
:return: an iterator, can for i in... and list(...)
|
Returns
|
||||||
:rtype: Siterator
|
-------
|
||||||
|
Siterator
|
||||||
|
an iterator, can for i in... and list(...)
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
tree = self.tree_
|
tree = self.tree_
|
||||||
@@ -680,8 +884,10 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
def __str__(self) -> str:
|
def __str__(self) -> str:
|
||||||
"""String representation of the tree
|
"""String representation of the tree
|
||||||
|
|
||||||
:return: description of nodes in the tree in preorder
|
Returns
|
||||||
:rtype: str
|
-------
|
||||||
|
str
|
||||||
|
description of nodes in the tree in preorder
|
||||||
"""
|
"""
|
||||||
output = ""
|
output = ""
|
||||||
for i in self:
|
for i in self:
|
||||||
|
@@ -1,8 +1,6 @@
|
|||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from stree import Stree, Snode
|
from stree import Stree, Snode
|
||||||
from .utils import load_dataset
|
from .utils import load_dataset
|
||||||
|
|
||||||
@@ -40,12 +38,13 @@ class Snode_test(unittest.TestCase):
|
|||||||
# Check Class
|
# Check Class
|
||||||
class_computed = classes[card == max_card]
|
class_computed = classes[card == max_card]
|
||||||
self.assertEqual(class_computed, node._class)
|
self.assertEqual(class_computed, node._class)
|
||||||
|
# Check Partition column
|
||||||
|
self.assertEqual(node._partition_column, -1)
|
||||||
|
|
||||||
check_leave(self._clf.tree_)
|
check_leave(self._clf.tree_)
|
||||||
|
|
||||||
def test_nodes_coefs(self):
|
def test_nodes_coefs(self):
|
||||||
"""Check if the nodes of the tree have the right attributes filled
|
"""Check if the nodes of the tree have the right attributes filled"""
|
||||||
"""
|
|
||||||
|
|
||||||
def run_tree(node: Snode):
|
def run_tree(node: Snode):
|
||||||
if node._belief < 1:
|
if node._belief < 1:
|
||||||
@@ -54,16 +53,44 @@ class Snode_test(unittest.TestCase):
|
|||||||
self.assertIsNotNone(node._clf.coef_)
|
self.assertIsNotNone(node._clf.coef_)
|
||||||
if node.is_leaf():
|
if node.is_leaf():
|
||||||
return
|
return
|
||||||
run_tree(node.get_down())
|
|
||||||
run_tree(node.get_up())
|
run_tree(node.get_up())
|
||||||
|
run_tree(node.get_down())
|
||||||
|
|
||||||
run_tree(self._clf.tree_)
|
model = Stree(self._random_state)
|
||||||
|
model.fit(*load_dataset(self._random_state, 3, 4))
|
||||||
|
run_tree(model.tree_)
|
||||||
|
|
||||||
def test_make_predictor_on_leaf(self):
|
def test_make_predictor_on_leaf(self):
|
||||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||||
test.make_predictor()
|
test.make_predictor()
|
||||||
self.assertEqual(1, test._class)
|
self.assertEqual(1, test._class)
|
||||||
self.assertEqual(0.75, test._belief)
|
self.assertEqual(0.75, test._belief)
|
||||||
|
self.assertEqual(-1, test._partition_column)
|
||||||
|
|
||||||
|
def test_set_title(self):
|
||||||
|
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||||
|
self.assertEqual("test", test.get_title())
|
||||||
|
test.set_title("another")
|
||||||
|
self.assertEqual("another", test.get_title())
|
||||||
|
|
||||||
|
def test_set_classifier(self):
|
||||||
|
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||||
|
clf = Stree()
|
||||||
|
self.assertIsNone(test.get_classifier())
|
||||||
|
test.set_classifier(clf)
|
||||||
|
self.assertEqual(clf, test.get_classifier())
|
||||||
|
|
||||||
|
def test_set_impurity(self):
|
||||||
|
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||||
|
self.assertEqual(0.0, test.get_impurity())
|
||||||
|
test.set_impurity(54.7)
|
||||||
|
self.assertEqual(54.7, test.get_impurity())
|
||||||
|
|
||||||
|
def test_set_features(self):
|
||||||
|
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [0, 1], 0.0, "test")
|
||||||
|
self.assertListEqual([0, 1], test.get_features())
|
||||||
|
test.set_features([1, 2])
|
||||||
|
self.assertListEqual([1, 2], test.get_features())
|
||||||
|
|
||||||
def test_make_predictor_on_not_leaf(self):
|
def test_make_predictor_on_not_leaf(self):
|
||||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||||
@@ -71,11 +98,14 @@ class Snode_test(unittest.TestCase):
|
|||||||
test.make_predictor()
|
test.make_predictor()
|
||||||
self.assertIsNone(test._class)
|
self.assertIsNone(test._class)
|
||||||
self.assertEqual(0, test._belief)
|
self.assertEqual(0, test._belief)
|
||||||
|
self.assertEqual(-1, test._partition_column)
|
||||||
|
self.assertEqual(-1, test.get_up()._partition_column)
|
||||||
|
|
||||||
def test_make_predictor_on_leaf_bogus_data(self):
|
def test_make_predictor_on_leaf_bogus_data(self):
|
||||||
test = Snode(None, [1, 2, 3, 4], [], [], 0.0, "test")
|
test = Snode(None, [1, 2, 3, 4], [], [], 0.0, "test")
|
||||||
test.make_predictor()
|
test.make_predictor()
|
||||||
self.assertIsNone(test._class)
|
self.assertIsNone(test._class)
|
||||||
|
self.assertEqual(-1, test._partition_column)
|
||||||
|
|
||||||
def test_copy_node(self):
|
def test_copy_node(self):
|
||||||
px = [1, 2, 3, 4]
|
px = [1, 2, 3, 4]
|
||||||
@@ -86,3 +116,6 @@ class Snode_test(unittest.TestCase):
|
|||||||
self.assertListEqual(computed._y, py)
|
self.assertListEqual(computed._y, py)
|
||||||
self.assertEqual("test", computed._title)
|
self.assertEqual("test", computed._title)
|
||||||
self.assertIsInstance(computed._clf, Stree)
|
self.assertIsInstance(computed._clf, Stree)
|
||||||
|
self.assertEqual(test._partition_column, computed._partition_column)
|
||||||
|
self.assertEqual(test._sample_weight, computed._sample_weight)
|
||||||
|
self.assertEqual(test._scaler, computed._scaler)
|
||||||
|
@@ -6,6 +6,7 @@ import numpy as np
|
|||||||
from sklearn.svm import SVC
|
from sklearn.svm import SVC
|
||||||
from sklearn.datasets import load_wine, load_iris
|
from sklearn.datasets import load_wine, load_iris
|
||||||
from stree import Splitter
|
from stree import Splitter
|
||||||
|
from .utils import load_dataset
|
||||||
|
|
||||||
|
|
||||||
class Splitter_test(unittest.TestCase):
|
class Splitter_test(unittest.TestCase):
|
||||||
@@ -17,15 +18,15 @@ class Splitter_test(unittest.TestCase):
|
|||||||
def build(
|
def build(
|
||||||
clf=SVC,
|
clf=SVC,
|
||||||
min_samples_split=0,
|
min_samples_split=0,
|
||||||
splitter_type="random",
|
feature_select="random",
|
||||||
criterion="gini",
|
criterion="gini",
|
||||||
criteria="min_distance",
|
criteria="max_samples",
|
||||||
random_state=None,
|
random_state=None,
|
||||||
):
|
):
|
||||||
return Splitter(
|
return Splitter(
|
||||||
clf=clf(random_state=random_state, kernel="rbf"),
|
clf=clf(random_state=random_state, kernel="rbf"),
|
||||||
min_samples_split=min_samples_split,
|
min_samples_split=min_samples_split,
|
||||||
splitter_type=splitter_type,
|
feature_select=feature_select,
|
||||||
criterion=criterion,
|
criterion=criterion,
|
||||||
criteria=criteria,
|
criteria=criteria,
|
||||||
random_state=random_state,
|
random_state=random_state,
|
||||||
@@ -39,24 +40,20 @@ class Splitter_test(unittest.TestCase):
|
|||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
self.build(criterion="duck")
|
self.build(criterion="duck")
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
self.build(splitter_type="duck")
|
self.build(feature_select="duck")
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
self.build(criteria="duck")
|
self.build(criteria="duck")
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
_ = Splitter(clf=None)
|
_ = Splitter(clf=None)
|
||||||
for splitter_type in ["best", "random"]:
|
for feature_select in ["best", "random"]:
|
||||||
for criterion in ["gini", "entropy"]:
|
for criterion in ["gini", "entropy"]:
|
||||||
for criteria in [
|
for criteria in ["max_samples", "impurity"]:
|
||||||
"min_distance",
|
|
||||||
"max_samples",
|
|
||||||
"max_distance",
|
|
||||||
]:
|
|
||||||
tcl = self.build(
|
tcl = self.build(
|
||||||
splitter_type=splitter_type,
|
feature_select=feature_select,
|
||||||
criterion=criterion,
|
criterion=criterion,
|
||||||
criteria=criteria,
|
criteria=criteria,
|
||||||
)
|
)
|
||||||
self.assertEqual(splitter_type, tcl._splitter_type)
|
self.assertEqual(feature_select, tcl._feature_select)
|
||||||
self.assertEqual(criterion, tcl._criterion)
|
self.assertEqual(criterion, tcl._criterion)
|
||||||
self.assertEqual(criteria, tcl._criteria)
|
self.assertEqual(criteria, tcl._criteria)
|
||||||
|
|
||||||
@@ -138,78 +135,77 @@ class Splitter_test(unittest.TestCase):
|
|||||||
[0.7, 0.01, -0.1],
|
[0.7, 0.01, -0.1],
|
||||||
[0.7, -0.9, 0.5],
|
[0.7, -0.9, 0.5],
|
||||||
[0.1, 0.2, 0.3],
|
[0.1, 0.2, 0.3],
|
||||||
|
[-0.1, 0.2, 0.3],
|
||||||
|
[-0.1, 0.2, 0.3],
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
expected = np.array([0.2, 0.01, -0.9, 0.2])
|
expected = data[:, 0]
|
||||||
y = [1, 2, 1, 0]
|
y = [1, 2, 1, 0, 0, 0]
|
||||||
computed = tcl._max_samples(data, y)
|
computed = tcl._max_samples(data, y)
|
||||||
self.assertEqual((4,), computed.shape)
|
self.assertEqual(0, computed)
|
||||||
self.assertListEqual(expected.tolist(), computed.tolist())
|
computed_data = data[:, computed]
|
||||||
|
self.assertEqual((6,), computed_data.shape)
|
||||||
|
self.assertListEqual(expected.tolist(), computed_data.tolist())
|
||||||
|
|
||||||
def test_min_distance(self):
|
def test_impurity(self):
|
||||||
tcl = self.build()
|
tcl = self.build(criteria="impurity")
|
||||||
data = np.array(
|
data = np.array(
|
||||||
[
|
[
|
||||||
[-0.1, 0.2, -0.3],
|
[-0.1, 0.2, -0.3],
|
||||||
[0.7, 0.01, -0.1],
|
[0.7, 0.01, -0.1],
|
||||||
[0.7, -0.9, 0.5],
|
[0.7, -0.9, 0.5],
|
||||||
[0.1, 0.2, 0.3],
|
[0.1, 0.2, 0.3],
|
||||||
|
[-0.1, 0.2, 0.3],
|
||||||
|
[-0.1, 0.2, 0.3],
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
expected = np.array([2, 2, 1, 0])
|
expected = data[:, 2]
|
||||||
computed = tcl._min_distance(data, None)
|
y = np.array([1, 2, 1, 0, 0, 0])
|
||||||
self.assertEqual((4,), computed.shape)
|
computed = tcl._impurity(data, y)
|
||||||
self.assertListEqual(expected.tolist(), computed.tolist())
|
self.assertEqual(2, computed)
|
||||||
|
computed_data = data[:, computed]
|
||||||
|
self.assertEqual((6,), computed_data.shape)
|
||||||
|
self.assertListEqual(expected.tolist(), computed_data.tolist())
|
||||||
|
|
||||||
def test_max_distance(self):
|
def test_generate_subspaces(self):
|
||||||
tcl = self.build(criteria="max_distance")
|
features = 250
|
||||||
data = np.array(
|
for max_features in range(2, features):
|
||||||
[
|
num = len(Splitter._generate_spaces(features, max_features))
|
||||||
[-0.1, 0.2, -0.3],
|
self.assertEqual(5, num)
|
||||||
[0.7, 0.01, -0.1],
|
self.assertEqual(3, len(Splitter._generate_spaces(3, 2)))
|
||||||
[0.7, -0.9, 0.5],
|
self.assertEqual(4, len(Splitter._generate_spaces(4, 3)))
|
||||||
[0.1, 0.2, 0.3],
|
|
||||||
]
|
|
||||||
)
|
|
||||||
expected = np.array([1, 0, 0, 2])
|
|
||||||
computed = tcl._max_distance(data, None)
|
|
||||||
self.assertEqual((4,), computed.shape)
|
|
||||||
self.assertListEqual(expected.tolist(), computed.tolist())
|
|
||||||
|
|
||||||
def test_best_splitter_few_sets(self):
|
def test_best_splitter_few_sets(self):
|
||||||
X, y = load_iris(return_X_y=True)
|
X, y = load_iris(return_X_y=True)
|
||||||
X = np.delete(X, 3, 1)
|
X = np.delete(X, 3, 1)
|
||||||
tcl = self.build(splitter_type="best", random_state=self._random_state)
|
tcl = self.build(
|
||||||
|
feature_select="best", random_state=self._random_state
|
||||||
|
)
|
||||||
dataset, computed = tcl.get_subspace(X, y, max_features=2)
|
dataset, computed = tcl.get_subspace(X, y, max_features=2)
|
||||||
self.assertListEqual([0, 2], list(computed))
|
self.assertListEqual([0, 2], list(computed))
|
||||||
self.assertListEqual(X[:, computed].tolist(), dataset.tolist())
|
self.assertListEqual(X[:, computed].tolist(), dataset.tolist())
|
||||||
|
|
||||||
def test_splitter_parameter(self):
|
def test_splitter_parameter(self):
|
||||||
expected_values = [
|
expected_values = [
|
||||||
[2, 3, 5, 7], # best entropy min_distance
|
[0, 6, 11, 12], # best entropy max_samples
|
||||||
[0, 2, 4, 5], # best entropy max_samples
|
[0, 6, 11, 12], # best entropy impurity
|
||||||
[0, 2, 8, 12], # best entropy max_distance
|
[0, 6, 11, 12], # best gini max_samples
|
||||||
[1, 2, 5, 12], # best gini min_distance
|
[0, 6, 11, 12], # best gini impurity
|
||||||
[0, 3, 4, 10], # best gini max_samples
|
[0, 3, 8, 12], # random entropy max_samples
|
||||||
[1, 2, 9, 12], # best gini max_distance
|
[0, 3, 7, 12], # random entropy impurity
|
||||||
[3, 9, 11, 12], # random entropy min_distance
|
[1, 7, 9, 12], # random gini max_samples
|
||||||
[1, 5, 6, 9], # random entropy max_samples
|
[1, 5, 8, 12], # random gini impurity
|
||||||
[1, 2, 4, 8], # random entropy max_distance
|
|
||||||
[2, 6, 7, 12], # random gini min_distance
|
|
||||||
[3, 9, 10, 11], # random gini max_samples
|
|
||||||
[2, 5, 8, 12], # random gini max_distance
|
|
||||||
]
|
]
|
||||||
X, y = load_wine(return_X_y=True)
|
X, y = load_wine(return_X_y=True)
|
||||||
rn = 0
|
rn = 0
|
||||||
for splitter_type in ["best", "random"]:
|
for feature_select in ["best", "random"]:
|
||||||
for criterion in ["entropy", "gini"]:
|
for criterion in ["entropy", "gini"]:
|
||||||
for criteria in [
|
for criteria in [
|
||||||
"min_distance",
|
|
||||||
"max_samples",
|
"max_samples",
|
||||||
"max_distance",
|
"impurity",
|
||||||
]:
|
]:
|
||||||
tcl = self.build(
|
tcl = self.build(
|
||||||
splitter_type=splitter_type,
|
feature_select=feature_select,
|
||||||
criterion=criterion,
|
criterion=criterion,
|
||||||
criteria=criteria,
|
criteria=criteria,
|
||||||
)
|
)
|
||||||
@@ -219,7 +215,9 @@ class Splitter_test(unittest.TestCase):
|
|||||||
dataset, computed = tcl.get_subspace(X, y, max_features=4)
|
dataset, computed = tcl.get_subspace(X, y, max_features=4)
|
||||||
# print(
|
# print(
|
||||||
# "{}, # {:7s}{:8s}{:15s}".format(
|
# "{}, # {:7s}{:8s}{:15s}".format(
|
||||||
# list(computed), splitter_type, criterion,
|
# list(computed),
|
||||||
|
# feature_select,
|
||||||
|
# criterion,
|
||||||
# criteria,
|
# criteria,
|
||||||
# )
|
# )
|
||||||
# )
|
# )
|
||||||
@@ -227,3 +225,18 @@ class Splitter_test(unittest.TestCase):
|
|||||||
self.assertListEqual(
|
self.assertListEqual(
|
||||||
X[:, computed].tolist(), dataset.tolist()
|
X[:, computed].tolist(), dataset.tolist()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def test_get_best_subspaces(self):
|
||||||
|
results = [
|
||||||
|
(4, [3, 4, 11, 13]),
|
||||||
|
(7, [1, 3, 4, 5, 11, 13, 16]),
|
||||||
|
(9, [1, 3, 4, 5, 7, 10, 11, 13, 16]),
|
||||||
|
]
|
||||||
|
X, y = load_dataset(n_features=20)
|
||||||
|
for k, expected in results:
|
||||||
|
tcl = self.build(
|
||||||
|
feature_select="best",
|
||||||
|
)
|
||||||
|
Xs, computed = tcl.get_subspace(X, y, k)
|
||||||
|
self.assertListEqual(expected, list(computed))
|
||||||
|
self.assertListEqual(X[:, expected].tolist(), Xs.tolist())
|
||||||
|
@@ -5,6 +5,7 @@ import warnings
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
from sklearn.datasets import load_iris, load_wine
|
from sklearn.datasets import load_iris, load_wine
|
||||||
from sklearn.exceptions import ConvergenceWarning
|
from sklearn.exceptions import ConvergenceWarning
|
||||||
|
from sklearn.svm import LinearSVC
|
||||||
|
|
||||||
from stree import Stree, Snode
|
from stree import Stree, Snode
|
||||||
from .utils import load_dataset
|
from .utils import load_dataset
|
||||||
@@ -25,8 +26,10 @@ class Stree_test(unittest.TestCase):
|
|||||||
correct number of labels and its sons have the right number of elements
|
correct number of labels and its sons have the right number of elements
|
||||||
in their dataset
|
in their dataset
|
||||||
|
|
||||||
Arguments:
|
Parameters
|
||||||
node {Snode} -- node to check
|
----------
|
||||||
|
node : Snode
|
||||||
|
node to check
|
||||||
"""
|
"""
|
||||||
if node.is_leaf():
|
if node.is_leaf():
|
||||||
return
|
return
|
||||||
@@ -41,23 +44,22 @@ class Stree_test(unittest.TestCase):
|
|||||||
_, count_u = np.unique(y_up, return_counts=True)
|
_, count_u = np.unique(y_up, return_counts=True)
|
||||||
#
|
#
|
||||||
for i in unique_y:
|
for i in unique_y:
|
||||||
number_down = count_d[i]
|
|
||||||
try:
|
|
||||||
number_up = count_u[i]
|
number_up = count_u[i]
|
||||||
|
try:
|
||||||
|
number_down = count_d[i]
|
||||||
except IndexError:
|
except IndexError:
|
||||||
number_up = 0
|
number_down = 0
|
||||||
self.assertEqual(count_y[i], number_down + number_up)
|
self.assertEqual(count_y[i], number_down + number_up)
|
||||||
# Is the partition made the same as the prediction?
|
# Is the partition made the same as the prediction?
|
||||||
# as the node is not a leaf...
|
# as the node is not a leaf...
|
||||||
_, count_yp = np.unique(y_prediction, return_counts=True)
|
_, count_yp = np.unique(y_prediction, return_counts=True)
|
||||||
self.assertEqual(count_yp[0], y_up.shape[0])
|
self.assertEqual(count_yp[1], y_up.shape[0])
|
||||||
self.assertEqual(count_yp[1], y_down.shape[0])
|
self.assertEqual(count_yp[0], y_down.shape[0])
|
||||||
self._check_tree(node.get_down())
|
self._check_tree(node.get_down())
|
||||||
self._check_tree(node.get_up())
|
self._check_tree(node.get_up())
|
||||||
|
|
||||||
def test_build_tree(self):
|
def test_build_tree(self):
|
||||||
"""Check if the tree is built the same way as predictions of models
|
"""Check if the tree is built the same way as predictions of models"""
|
||||||
"""
|
|
||||||
warnings.filterwarnings("ignore")
|
warnings.filterwarnings("ignore")
|
||||||
for kernel in self._kernels:
|
for kernel in self._kernels:
|
||||||
clf = Stree(kernel=kernel, random_state=self._random_state)
|
clf = Stree(kernel=kernel, random_state=self._random_state)
|
||||||
@@ -99,20 +101,22 @@ class Stree_test(unittest.TestCase):
|
|||||||
self.assertListEqual(yp_line.tolist(), yp_once.tolist())
|
self.assertListEqual(yp_line.tolist(), yp_once.tolist())
|
||||||
|
|
||||||
def test_iterator_and_str(self):
|
def test_iterator_and_str(self):
|
||||||
"""Check preorder iterator
|
"""Check preorder iterator"""
|
||||||
"""
|
|
||||||
expected = [
|
expected = [
|
||||||
"root feaures=(0, 1, 2) impurity=0.5000",
|
"root feaures=(0, 1, 2) impurity=1.0000 counts=(array([0, 1]), "
|
||||||
"root - Down feaures=(0, 1, 2) impurity=0.0671",
|
"array([750, 750]))",
|
||||||
"root - Down - Down, <cgaf> - Leaf class=1 belief= 0.975989 "
|
"root - Down(2), <cgaf> - Leaf class=0 belief= 0.928297 impurity="
|
||||||
"impurity=0.0469 counts=(array([0, 1]), array([ 17, 691]))",
|
"0.3722 counts=(array([0, 1]), array([725, 56]))",
|
||||||
"root - Down - Up feaures=(0, 1, 2) impurity=0.3967",
|
"root - Up(2) feaures=(0, 1, 2) impurity=0.2178 counts=(array([0, "
|
||||||
"root - Down - Up - Down, <cgaf> - Leaf class=1 belief= 0.750000 "
|
"1]), array([ 25, 694]))",
|
||||||
"impurity=0.3750 counts=(array([0, 1]), array([1, 3]))",
|
"root - Up(2) - Down(3) feaures=(0, 1, 2) impurity=0.8454 counts="
|
||||||
"root - Down - Up - Up, <pure> - Leaf class=0 belief= 1.000000 "
|
"(array([0, 1]), array([8, 3]))",
|
||||||
"impurity=0.0000 counts=(array([0]), array([7]))",
|
"root - Up(2) - Down(3) - Down(4), <pure> - Leaf class=0 belief= "
|
||||||
"root - Up, <cgaf> - Leaf class=0 belief= 0.928297 impurity=0.1331"
|
"1.000000 impurity=0.0000 counts=(array([0]), array([7]))",
|
||||||
" counts=(array([0, 1]), array([725, 56]))",
|
"root - Up(2) - Down(3) - Up(4), <cgaf> - Leaf class=1 belief= "
|
||||||
|
"0.750000 impurity=0.8113 counts=(array([0, 1]), array([1, 3]))",
|
||||||
|
"root - Up(2) - Up(3), <cgaf> - Leaf class=1 belief= 0.975989 "
|
||||||
|
"impurity=0.1634 counts=(array([0, 1]), array([ 17, 691]))",
|
||||||
]
|
]
|
||||||
computed = []
|
computed = []
|
||||||
expected_string = ""
|
expected_string = ""
|
||||||
@@ -188,44 +192,43 @@ class Stree_test(unittest.TestCase):
|
|||||||
def test_muticlass_dataset(self):
|
def test_muticlass_dataset(self):
|
||||||
datasets = {
|
datasets = {
|
||||||
"Synt": load_dataset(random_state=self._random_state, n_classes=3),
|
"Synt": load_dataset(random_state=self._random_state, n_classes=3),
|
||||||
"Iris": load_iris(return_X_y=True),
|
"Iris": load_wine(return_X_y=True),
|
||||||
}
|
}
|
||||||
outcomes = {
|
outcomes = {
|
||||||
"Synt": {
|
"Synt": {
|
||||||
"max_samples linear": 0.9533333333333334,
|
"max_samples linear": 0.9606666666666667,
|
||||||
"max_samples rbf": 0.836,
|
"max_samples rbf": 0.7133333333333334,
|
||||||
"max_samples poly": 0.9473333333333334,
|
"max_samples poly": 0.618,
|
||||||
"min_distance linear": 0.9533333333333334,
|
"impurity linear": 0.9606666666666667,
|
||||||
"min_distance rbf": 0.836,
|
"impurity rbf": 0.7133333333333334,
|
||||||
"min_distance poly": 0.9473333333333334,
|
"impurity poly": 0.618,
|
||||||
"max_distance linear": 0.9533333333333334,
|
|
||||||
"max_distance rbf": 0.836,
|
|
||||||
"max_distance poly": 0.9473333333333334,
|
|
||||||
},
|
},
|
||||||
"Iris": {
|
"Iris": {
|
||||||
"max_samples linear": 0.98,
|
"max_samples linear": 1.0,
|
||||||
"max_samples rbf": 1.0,
|
"max_samples rbf": 0.6910112359550562,
|
||||||
"max_samples poly": 1.0,
|
"max_samples poly": 0.6966292134831461,
|
||||||
"min_distance linear": 0.98,
|
"impurity linear": 1,
|
||||||
"min_distance rbf": 1.0,
|
"impurity rbf": 0.6910112359550562,
|
||||||
"min_distance poly": 1.0,
|
"impurity poly": 0.6966292134831461,
|
||||||
"max_distance linear": 0.98,
|
|
||||||
"max_distance rbf": 1.0,
|
|
||||||
"max_distance poly": 1.0,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, dataset in datasets.items():
|
for name, dataset in datasets.items():
|
||||||
px, py = dataset
|
px, py = dataset
|
||||||
for criteria in ["max_samples", "min_distance", "max_distance"]:
|
for criteria in ["max_samples", "impurity"]:
|
||||||
for kernel in self._kernels:
|
for kernel in self._kernels:
|
||||||
clf = Stree(
|
clf = Stree(
|
||||||
C=1e4,
|
C=55,
|
||||||
max_iter=1e4,
|
max_iter=1e5,
|
||||||
kernel=kernel,
|
kernel=kernel,
|
||||||
random_state=self._random_state,
|
random_state=self._random_state,
|
||||||
)
|
)
|
||||||
clf.fit(px, py)
|
clf.fit(px, py)
|
||||||
outcome = outcomes[name][f"{criteria} {kernel}"]
|
outcome = outcomes[name][f"{criteria} {kernel}"]
|
||||||
|
# print(
|
||||||
|
# f"{name} {criteria} {kernel} {outcome} {clf.score(px"
|
||||||
|
# ", py)}"
|
||||||
|
# )
|
||||||
self.assertAlmostEqual(outcome, clf.score(px, py))
|
self.assertAlmostEqual(outcome, clf.score(px, py))
|
||||||
|
|
||||||
def test_max_features(self):
|
def test_max_features(self):
|
||||||
@@ -297,7 +300,10 @@ class Stree_test(unittest.TestCase):
|
|||||||
0.9433333333333334,
|
0.9433333333333334,
|
||||||
]
|
]
|
||||||
for kernel, accuracy_expected in zip(self._kernels, accuracies):
|
for kernel, accuracy_expected in zip(self._kernels, accuracies):
|
||||||
clf = Stree(random_state=self._random_state, kernel=kernel,)
|
clf = Stree(
|
||||||
|
random_state=self._random_state,
|
||||||
|
kernel=kernel,
|
||||||
|
)
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
accuracy_score = clf.score(X, y)
|
accuracy_score = clf.score(X, y)
|
||||||
yp = clf.predict(X)
|
yp = clf.predict(X)
|
||||||
@@ -309,108 +315,197 @@ class Stree_test(unittest.TestCase):
|
|||||||
X, y = load_dataset(self._random_state)
|
X, y = load_dataset(self._random_state)
|
||||||
clf = Stree(random_state=self._random_state, max_features=2)
|
clf = Stree(random_state=self._random_state, max_features=2)
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
self.assertAlmostEqual(0.9426666666666667, clf.score(X, y))
|
self.assertAlmostEqual(0.9453333333333334, clf.score(X, y))
|
||||||
|
|
||||||
def test_score_multi_class(self):
|
|
||||||
warnings.filterwarnings("ignore")
|
|
||||||
accuracies = [
|
|
||||||
0.8258427, # Wine linear min_distance
|
|
||||||
0.6741573, # Wine linear max_distance
|
|
||||||
0.8314607, # Wine linear max_samples
|
|
||||||
0.6629213, # Wine rbf min_distance
|
|
||||||
1.0000000, # Wine rbf max_distance
|
|
||||||
0.4044944, # Wine rbf max_samples
|
|
||||||
0.9157303, # Wine poly min_distance
|
|
||||||
1.0000000, # Wine poly max_distance
|
|
||||||
0.7640449, # Wine poly max_samples
|
|
||||||
0.9933333, # Iris linear min_distance
|
|
||||||
0.9666667, # Iris linear max_distance
|
|
||||||
0.9666667, # Iris linear max_samples
|
|
||||||
0.9800000, # Iris rbf min_distance
|
|
||||||
0.9800000, # Iris rbf max_distance
|
|
||||||
0.9800000, # Iris rbf max_samples
|
|
||||||
1.0000000, # Iris poly min_distance
|
|
||||||
1.0000000, # Iris poly max_distance
|
|
||||||
1.0000000, # Iris poly max_samples
|
|
||||||
0.8993333, # Synthetic linear min_distance
|
|
||||||
0.6533333, # Synthetic linear max_distance
|
|
||||||
0.9313333, # Synthetic linear max_samples
|
|
||||||
0.8320000, # Synthetic rbf min_distance
|
|
||||||
0.6660000, # Synthetic rbf max_distance
|
|
||||||
0.8320000, # Synthetic rbf max_samples
|
|
||||||
0.6066667, # Synthetic poly min_distance
|
|
||||||
0.6840000, # Synthetic poly max_distance
|
|
||||||
0.6340000, # Synthetic poly max_samples
|
|
||||||
]
|
|
||||||
datasets = [
|
|
||||||
("Wine", load_wine(return_X_y=True)),
|
|
||||||
("Iris", load_iris(return_X_y=True)),
|
|
||||||
(
|
|
||||||
"Synthetic",
|
|
||||||
load_dataset(self._random_state, n_classes=3, n_features=5),
|
|
||||||
),
|
|
||||||
]
|
|
||||||
for dataset_name, dataset in datasets:
|
|
||||||
X, y = dataset
|
|
||||||
for kernel in self._kernels:
|
|
||||||
for criteria in [
|
|
||||||
"min_distance",
|
|
||||||
"max_distance",
|
|
||||||
"max_samples",
|
|
||||||
]:
|
|
||||||
clf = Stree(
|
|
||||||
C=17,
|
|
||||||
random_state=self._random_state,
|
|
||||||
kernel=kernel,
|
|
||||||
split_criteria=criteria,
|
|
||||||
degree=5,
|
|
||||||
gamma="auto",
|
|
||||||
)
|
|
||||||
clf.fit(X, y)
|
|
||||||
accuracy_score = clf.score(X, y)
|
|
||||||
yp = clf.predict(X)
|
|
||||||
accuracy_computed = np.mean(yp == y)
|
|
||||||
# print(
|
|
||||||
# "{:.7f}, # {:7} {:5} {}".format(
|
|
||||||
# accuracy_score, dataset_name, kernel, criteria
|
|
||||||
# )
|
|
||||||
# )
|
|
||||||
accuracy_expected = accuracies.pop(0)
|
|
||||||
self.assertEqual(accuracy_score, accuracy_computed)
|
|
||||||
self.assertAlmostEqual(accuracy_expected, accuracy_score)
|
|
||||||
|
|
||||||
def test_bogus_splitter_parameter(self):
|
def test_bogus_splitter_parameter(self):
|
||||||
clf = Stree(splitter="duck")
|
clf = Stree(splitter="duck")
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
clf.fit(*load_dataset())
|
clf.fit(*load_dataset())
|
||||||
|
|
||||||
def test_weights_removing_class(self):
|
def test_multiclass_classifier_integrity(self):
|
||||||
# This patch solves an stderr message from sklearn svm lib
|
"""Checks if the multiclass operation is done right"""
|
||||||
# "WARNING: class label x specified in weight is not found"
|
X, y = load_iris(return_X_y=True)
|
||||||
|
clf = Stree(random_state=0)
|
||||||
|
clf.fit(X, y)
|
||||||
|
score = clf.score(X, y)
|
||||||
|
# Check accuracy of the whole model
|
||||||
|
self.assertAlmostEquals(0.98, score, 5)
|
||||||
|
svm = LinearSVC(random_state=0)
|
||||||
|
svm.fit(X, y)
|
||||||
|
self.assertAlmostEquals(0.9666666666666667, svm.score(X, y), 5)
|
||||||
|
data = svm.decision_function(X)
|
||||||
|
expected = [
|
||||||
|
0.4444444444444444,
|
||||||
|
0.35777777777777775,
|
||||||
|
0.4569777777777778,
|
||||||
|
]
|
||||||
|
ty = data.copy()
|
||||||
|
ty[data <= 0] = 0
|
||||||
|
ty[data > 0] = 1
|
||||||
|
ty = ty.astype(int)
|
||||||
|
for i in range(3):
|
||||||
|
self.assertAlmostEquals(
|
||||||
|
expected[i],
|
||||||
|
clf.splitter_._gini(ty[:, i]),
|
||||||
|
)
|
||||||
|
# 1st Branch
|
||||||
|
# up has to have 50 samples of class 0
|
||||||
|
# down should have 100 [50, 50]
|
||||||
|
up = data[:, 2] > 0
|
||||||
|
resup = np.unique(y[up], return_counts=True)
|
||||||
|
resdn = np.unique(y[~up], return_counts=True)
|
||||||
|
self.assertListEqual([1, 2], resup[0].tolist())
|
||||||
|
self.assertListEqual([3, 50], resup[1].tolist())
|
||||||
|
self.assertListEqual([0, 1], resdn[0].tolist())
|
||||||
|
self.assertListEqual([50, 47], resdn[1].tolist())
|
||||||
|
# 2nd Branch
|
||||||
|
# up should have 53 samples of classes [1, 2] [3, 50]
|
||||||
|
# down shoud have 47 samples of class 1
|
||||||
|
node_up = clf.tree_.get_down().get_up()
|
||||||
|
node_dn = clf.tree_.get_down().get_down()
|
||||||
|
resup = np.unique(node_up._y, return_counts=True)
|
||||||
|
resdn = np.unique(node_dn._y, return_counts=True)
|
||||||
|
self.assertListEqual([1, 2], resup[0].tolist())
|
||||||
|
self.assertListEqual([3, 50], resup[1].tolist())
|
||||||
|
self.assertListEqual([1], resdn[0].tolist())
|
||||||
|
self.assertListEqual([47], resdn[1].tolist())
|
||||||
|
|
||||||
|
def test_score_multiclass_rbf(self):
|
||||||
|
X, y = load_dataset(
|
||||||
|
random_state=self._random_state,
|
||||||
|
n_classes=3,
|
||||||
|
n_features=5,
|
||||||
|
n_samples=500,
|
||||||
|
)
|
||||||
|
clf = Stree(kernel="rbf", random_state=self._random_state)
|
||||||
|
clf2 = Stree(
|
||||||
|
kernel="rbf", random_state=self._random_state, normalize=True
|
||||||
|
)
|
||||||
|
self.assertEqual(0.768, clf.fit(X, y).score(X, y))
|
||||||
|
self.assertEqual(0.814, clf2.fit(X, y).score(X, y))
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
self.assertEqual(0.6741573033707865, clf.fit(X, y).score(X, y))
|
||||||
|
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
||||||
|
|
||||||
|
def test_score_multiclass_poly(self):
|
||||||
|
X, y = load_dataset(
|
||||||
|
random_state=self._random_state,
|
||||||
|
n_classes=3,
|
||||||
|
n_features=5,
|
||||||
|
n_samples=500,
|
||||||
|
)
|
||||||
|
clf = Stree(
|
||||||
|
kernel="poly", random_state=self._random_state, C=10, degree=5
|
||||||
|
)
|
||||||
|
clf2 = Stree(
|
||||||
|
kernel="poly",
|
||||||
|
random_state=self._random_state,
|
||||||
|
normalize=True,
|
||||||
|
)
|
||||||
|
self.assertEqual(0.786, clf.fit(X, y).score(X, y))
|
||||||
|
self.assertEqual(0.818, clf2.fit(X, y).score(X, y))
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
self.assertEqual(0.702247191011236, clf.fit(X, y).score(X, y))
|
||||||
|
self.assertEqual(0.6067415730337079, clf2.fit(X, y).score(X, y))
|
||||||
|
|
||||||
|
def test_score_multiclass_linear(self):
|
||||||
|
X, y = load_dataset(
|
||||||
|
random_state=self._random_state,
|
||||||
|
n_classes=3,
|
||||||
|
n_features=5,
|
||||||
|
n_samples=1500,
|
||||||
|
)
|
||||||
|
clf = Stree(kernel="linear", random_state=self._random_state)
|
||||||
|
self.assertEqual(0.9533333333333334, clf.fit(X, y).score(X, y))
|
||||||
|
# Check with context based standardization
|
||||||
|
clf2 = Stree(
|
||||||
|
kernel="linear", random_state=self._random_state, normalize=True
|
||||||
|
)
|
||||||
|
self.assertEqual(0.9526666666666667, clf2.fit(X, y).score(X, y))
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
self.assertEqual(0.9831460674157303, clf.fit(X, y).score(X, y))
|
||||||
|
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
||||||
|
|
||||||
|
def test_zero_all_sample_weights(self):
|
||||||
|
X, y = load_dataset(self._random_state)
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
Stree().fit(X, y, np.zeros(len(y)))
|
||||||
|
|
||||||
|
def test_mask_samples_weighted_zero(self):
|
||||||
X = np.array(
|
X = np.array(
|
||||||
[
|
[
|
||||||
[0.1, 0.1],
|
[1, 1],
|
||||||
[0.1, 0.2],
|
[1, 1],
|
||||||
[0.2, 0.1],
|
[1, 1],
|
||||||
[5, 6],
|
[2, 2],
|
||||||
[8, 9],
|
[2, 2],
|
||||||
[6, 7],
|
[2, 2],
|
||||||
[0.2, 0.2],
|
[3, 3],
|
||||||
|
[3, 3],
|
||||||
|
[3, 3],
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
y = np.array([0, 0, 0, 1, 1, 1, 0])
|
y = np.array([1, 1, 1, 2, 2, 2, 5, 5, 5])
|
||||||
epsilon = 1e-5
|
yw = np.array([1, 1, 1, 5, 5, 5, 5, 5, 5])
|
||||||
weights = [1, 1, 1, 0, 0, 0, 1]
|
w = [1, 1, 1, 0, 0, 0, 1, 1, 1]
|
||||||
weights = np.array(weights, dtype="float64")
|
model1 = Stree().fit(X, y)
|
||||||
weights_epsilon = [x + epsilon for x in weights]
|
model2 = Stree().fit(X, y, w)
|
||||||
weights_no_zero = np.array([1, 1, 1, 0, 0, 2, 1])
|
predict1 = model1.predict(X)
|
||||||
original = weights_no_zero.copy()
|
predict2 = model2.predict(X)
|
||||||
clf = Stree()
|
self.assertListEqual(y.tolist(), predict1.tolist())
|
||||||
|
self.assertListEqual(yw.tolist(), predict2.tolist())
|
||||||
|
self.assertEqual(model1.score(X, y), 1)
|
||||||
|
self.assertAlmostEqual(model2.score(X, y), 0.66666667)
|
||||||
|
self.assertEqual(model2.score(X, y, w), 1)
|
||||||
|
|
||||||
|
def test_depth(self):
|
||||||
|
X, y = load_dataset(
|
||||||
|
random_state=self._random_state,
|
||||||
|
n_classes=3,
|
||||||
|
n_features=5,
|
||||||
|
n_samples=1500,
|
||||||
|
)
|
||||||
|
clf = Stree(random_state=self._random_state)
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
node = clf.train(X, y, weights, 1, "test",)
|
self.assertEqual(6, clf.depth_)
|
||||||
# if a class is lost with zero weights the patch adds epsilon
|
X, y = load_wine(return_X_y=True)
|
||||||
self.assertListEqual(weights.tolist(), weights_epsilon)
|
clf = Stree(random_state=self._random_state)
|
||||||
self.assertListEqual(node._sample_weight.tolist(), weights_epsilon)
|
clf.fit(X, y)
|
||||||
# zero weights are ok when they don't erase a class
|
self.assertEqual(4, clf.depth_)
|
||||||
_ = clf.train(X, y, weights_no_zero, 1, "test")
|
|
||||||
self.assertListEqual(weights_no_zero.tolist(), original.tolist())
|
def test_nodes_leaves(self):
|
||||||
|
X, y = load_dataset(
|
||||||
|
random_state=self._random_state,
|
||||||
|
n_classes=3,
|
||||||
|
n_features=5,
|
||||||
|
n_samples=1500,
|
||||||
|
)
|
||||||
|
clf = Stree(random_state=self._random_state)
|
||||||
|
clf.fit(X, y)
|
||||||
|
nodes, leaves = clf.nodes_leaves()
|
||||||
|
self.assertEqual(25, nodes)
|
||||||
|
self.assertEquals(13, leaves)
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
clf = Stree(random_state=self._random_state)
|
||||||
|
clf.fit(X, y)
|
||||||
|
nodes, leaves = clf.nodes_leaves()
|
||||||
|
self.assertEqual(9, nodes)
|
||||||
|
self.assertEquals(5, leaves)
|
||||||
|
|
||||||
|
def test_nodes_leaves_artificial(self):
|
||||||
|
n1 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test1")
|
||||||
|
n2 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test2")
|
||||||
|
n3 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test3")
|
||||||
|
n4 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test4")
|
||||||
|
n5 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test5")
|
||||||
|
n6 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test6")
|
||||||
|
n1.set_up(n2)
|
||||||
|
n2.set_up(n3)
|
||||||
|
n2.set_down(n4)
|
||||||
|
n3.set_up(n5)
|
||||||
|
n4.set_down(n6)
|
||||||
|
clf = Stree(random_state=self._random_state)
|
||||||
|
clf.tree_ = n1
|
||||||
|
nodes, leaves = clf.nodes_leaves()
|
||||||
|
self.assertEqual(6, nodes)
|
||||||
|
self.assertEqual(2, leaves)
|
||||||
|
@@ -1,9 +1,9 @@
|
|||||||
from sklearn.datasets import make_classification
|
from sklearn.datasets import make_classification
|
||||||
|
|
||||||
|
|
||||||
def load_dataset(random_state=0, n_classes=2, n_features=3):
|
def load_dataset(random_state=0, n_classes=2, n_features=3, n_samples=1500):
|
||||||
X, y = make_classification(
|
X, y = make_classification(
|
||||||
n_samples=1500,
|
n_samples=n_samples,
|
||||||
n_features=n_features,
|
n_features=n_features,
|
||||||
n_informative=3,
|
n_informative=3,
|
||||||
n_redundant=0,
|
n_redundant=0,
|
||||||
|
Reference in New Issue
Block a user