mirror of
https://github.com/Doctorado-ML/STree.git
synced 2025-08-17 16:36:01 +00:00
Compare commits
16 Commits
add_subspa
...
codeql-ana
Author | SHA1 | Date | |
---|---|---|---|
|
601b5e04d3 | ||
|
147dad684c | ||
|
3bdac9bd60 | ||
|
e4ac5075e5 | ||
|
36816074ff | ||
475ad7e752
|
|||
|
1c869e154e | ||
f5706c3159
|
|||
be552fdd6c
|
|||
5e3a8e3ec5
|
|||
554ec03c32
|
|||
4b7e4a3fb0
|
|||
76723993fd
|
|||
ecd0b86f4d
|
|||
3e52a4746c
|
|||
|
a20e45e8e7 |
@@ -10,5 +10,4 @@ exclude_lines =
|
|||||||
if __name__ == .__main__.:
|
if __name__ == .__main__.:
|
||||||
ignore_errors = True
|
ignore_errors = True
|
||||||
omit =
|
omit =
|
||||||
stree/tests/*
|
|
||||||
stree/__init__.py
|
stree/__init__.py
|
56
.github/workflows/codeql-analysis.yml
vendored
Normal file
56
.github/workflows/codeql-analysis.yml
vendored
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
name: "CodeQL"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ master ]
|
||||||
|
pull_request:
|
||||||
|
# The branches below must be a subset of the branches above
|
||||||
|
branches: [ master ]
|
||||||
|
schedule:
|
||||||
|
- cron: '16 17 * * 3'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
name: Analyze
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
language: [ 'python' ]
|
||||||
|
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||||
|
# Learn more:
|
||||||
|
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
# Initializes the CodeQL tools for scanning.
|
||||||
|
- name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v1
|
||||||
|
with:
|
||||||
|
languages: ${{ matrix.language }}
|
||||||
|
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||||
|
# By default, queries listed here will override any specified in a config file.
|
||||||
|
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||||
|
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||||
|
|
||||||
|
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||||
|
# If this step fails, then you should remove it and run the build manually (see below)
|
||||||
|
- name: Autobuild
|
||||||
|
uses: github/codeql-action/autobuild@v1
|
||||||
|
|
||||||
|
# ℹ️ Command-line programs to run using the OS shell.
|
||||||
|
# 📚 https://git.io/JvXDl
|
||||||
|
|
||||||
|
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||||
|
# and modify them (or add more) to build your code if your project
|
||||||
|
# uses a compiled language
|
||||||
|
|
||||||
|
#- run: |
|
||||||
|
# make bootstrap
|
||||||
|
# make release
|
||||||
|
|
||||||
|
- name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@v1
|
47
.github/workflows/main.yml
vendored
Normal file
47
.github/workflows/main.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
name: CI
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [master]
|
||||||
|
pull_request:
|
||||||
|
branches: [master]
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [macos-latest, ubuntu-latest]
|
||||||
|
python: [3.8]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Set up Python ${{ matrix.python }}
|
||||||
|
uses: actions/setup-python@v2
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python }}
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
pip install -q --upgrade pip
|
||||||
|
pip install -q -r requirements.txt
|
||||||
|
pip install -q --upgrade codecov coverage black flake8 codacy-coverage
|
||||||
|
- name: Lint
|
||||||
|
run: |
|
||||||
|
black --check --diff stree
|
||||||
|
flake8 --count stree
|
||||||
|
- name: Tests
|
||||||
|
run: |
|
||||||
|
coverage run -m unittest -v stree.tests
|
||||||
|
coverage xml
|
||||||
|
- name: Upload coverage to Codecov
|
||||||
|
uses: codecov/codecov-action@v1
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
files: ./coverage.xml
|
||||||
|
- name: Run codacy-coverage-reporter
|
||||||
|
if: runner.os == 'Linux'
|
||||||
|
uses: codacy/codacy-coverage-reporter-action@master
|
||||||
|
with:
|
||||||
|
project-token: ${{ secrets.CODACY_PROJECT_TOKEN }}
|
||||||
|
coverage-reports: coverage.xml
|
5
.gitignore
vendored
5
.gitignore
vendored
@@ -130,4 +130,7 @@ dmypy.json
|
|||||||
|
|
||||||
.idea
|
.idea
|
||||||
.vscode
|
.vscode
|
||||||
.pre-commit-config.yaml
|
.pre-commit-config.yaml
|
||||||
|
|
||||||
|
**.csv
|
||||||
|
.virtual_documents
|
46
README.md
46
README.md
@@ -1,6 +1,6 @@
|
|||||||
[](https://app.codeship.com/projects/399170)
|

|
||||||
[](https://codecov.io/gh/doctorado-ml/stree)
|
[](https://codecov.io/gh/doctorado-ml/stree)
|
||||||
[](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
|
[](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
|
||||||
|
|
||||||
# Stree
|
# Stree
|
||||||
|
|
||||||
@@ -18,23 +18,45 @@ pip install git+https://github.com/doctorado-ml/stree
|
|||||||
|
|
||||||
### Jupyter notebooks
|
### Jupyter notebooks
|
||||||
|
|
||||||
* [](https://mybinder.org/v2/gh/Doctorado-ML/STree/master?urlpath=lab/tree/notebooks/benchmark.ipynb) Benchmark
|
- [](https://mybinder.org/v2/gh/Doctorado-ML/STree/master?urlpath=lab/tree/notebooks/benchmark.ipynb) Benchmark
|
||||||
|
|
||||||
* [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/benchmark.ipynb) Benchmark
|
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/benchmark.ipynb) Benchmark
|
||||||
|
|
||||||
* [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/features.ipynb) Test features
|
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/features.ipynb) Test features
|
||||||
|
|
||||||
* [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/adaboost.ipynb) Adaboost
|
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/adaboost.ipynb) Adaboost
|
||||||
|
|
||||||
* [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/gridsearch.ipynb) Gridsearch
|
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/gridsearch.ipynb) Gridsearch
|
||||||
|
|
||||||
* [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/test_graphs.ipynb) Test Graphics
|
- [](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/test_graphs.ipynb) Test Graphics
|
||||||
|
|
||||||
### Command line
|
## Hyperparameters
|
||||||
|
|
||||||
```bash
|
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
|
||||||
python main.py
|
| --- | ------------------ | ------------------------------------------------------ | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
```
|
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
|
||||||
|
| \* | kernel | {"linear", "poly", "rbf"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of ‘linear’, ‘poly’ or ‘rbf’. |
|
||||||
|
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
|
||||||
|
| \* | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls |
|
||||||
|
| | max_depth | \<int\> | None | Specifies the maximum depth of the tree |
|
||||||
|
| \* | tol | \<float\> | 1e-4 | Tolerance for stopping criterion. |
|
||||||
|
| \* | degree | \<int\> | 3 | Degree of the polynomial kernel function (‘poly’). Ignored by all other kernels. |
|
||||||
|
| \* | gamma | {"scale", "auto"} or \<float\> | scale | Kernel coefficient for ‘rbf’ and ‘poly’.<br>if gamma='scale' (default) is passed then it uses 1 / (n_features \* X.var()) as value of gamma,<br>if ‘auto’, uses 1 / n_features. |
|
||||||
|
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\* |
|
||||||
|
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
|
||||||
|
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
|
||||||
|
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
|
||||||
|
| | splitter | {"best", "random"} | random | The strategy used to choose the feature set at each node (only used if max_features != num_features). <br>Supported strategies are “best” to choose the best feature set and “random” to choose a random combination. <br>The algorithm generates 5 candidates at most to choose from in both strategies. |
|
||||||
|
|
||||||
|
\* Hyperparameter used by the support vector classifier of every node
|
||||||
|
|
||||||
|
\*\* **Splitting in a STree node**
|
||||||
|
|
||||||
|
The decision function is applied to the dataset and distances from samples to hyperplanes are computed in a matrix. This matrix has as many columns as classes the samples belongs to (if more than two, i.e. multiclass classification) or 1 column if it's a binary class dataset. In binary classification only one hyperplane is computed and therefore only one column is needed to store the distances of the samples to it. If three or more classes are present in the dataset we need as many hyperplanes as classes are there, and therefore one column per hyperplane is needed.
|
||||||
|
|
||||||
|
In case of multiclass classification we have to decide which column take into account to make the split, that depends on hyperparameter _split_criteria_, if "impurity" is chosen then STree computes information gain of every split candidate using each column and chooses the one that maximize the information gain, otherwise STree choses the column with more samples with a predicted class (the column with more positive numbers in it).
|
||||||
|
|
||||||
|
Once we have the column to take into account for the split, the algorithm splits samples with positive distances to hyperplane from the rest.
|
||||||
|
|
||||||
## Tests
|
## Tests
|
||||||
|
|
||||||
|
2
main.py
2
main.py
@@ -8,7 +8,7 @@ random_state = 1
|
|||||||
X, y = load_iris(return_X_y=True)
|
X, y = load_iris(return_X_y=True)
|
||||||
|
|
||||||
Xtrain, Xtest, ytrain, ytest = train_test_split(
|
Xtrain, Xtest, ytrain, ytest = train_test_split(
|
||||||
X, y, test_size=0.2, random_state=random_state
|
X, y, test_size=0.3, random_state=random_state
|
||||||
)
|
)
|
||||||
|
|
||||||
now = time.time()
|
now = time.time()
|
||||||
|
@@ -17,7 +17,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 3,
|
"execution_count": 1,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -29,7 +29,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 4,
|
"execution_count": 2,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -37,15 +37,17 @@
|
|||||||
"import numpy as np\n",
|
"import numpy as np\n",
|
||||||
"import pandas as pd\n",
|
"import pandas as pd\n",
|
||||||
"from sklearn.model_selection import train_test_split\n",
|
"from sklearn.model_selection import train_test_split\n",
|
||||||
"from sklearn import tree\n",
|
|
||||||
"from sklearn.metrics import classification_report, confusion_matrix, f1_score\n",
|
"from sklearn.metrics import classification_report, confusion_matrix, f1_score\n",
|
||||||
"from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier\n",
|
"from sklearn.tree import DecisionTreeClassifier\n",
|
||||||
|
"from sklearn.naive_bayes import GaussianNB\n",
|
||||||
|
"from sklearn.neural_network import MLPClassifier\n",
|
||||||
|
"from sklearn.svm import LinearSVC\n",
|
||||||
"from stree import Stree"
|
"from stree import Stree"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 5,
|
"execution_count": 3,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -64,13 +66,17 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 6,
|
"execution_count": 4,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"text": "2020-06-15 10:17:17\n"
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"2021-01-14 11:30:51\n"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -86,7 +92,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 7,
|
"execution_count": 5,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -98,13 +104,18 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 8,
|
"execution_count": 6,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"text": "Fraud: 0.173% 492\nValid: 99.827% 284,315\n"
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Fraud: 0.173% 492\n",
|
||||||
|
"Valid: 99.827% 284,315\n"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -114,7 +125,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 9,
|
"execution_count": 7,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -126,13 +137,18 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 10,
|
"execution_count": 8,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"text": "X shape: (284807, 29)\ny shape: (284807,)\n"
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"X shape: (284807, 29)\n",
|
||||||
|
"y shape: (284807,)\n"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -151,7 +167,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 11,
|
"execution_count": 9,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -162,12 +178,32 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 12,
|
"execution_count": 10,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Linear Tree\n",
|
"# Linear Tree\n",
|
||||||
"linear_tree = tree.DecisionTreeClassifier(random_state=random_state)"
|
"linear_tree = DecisionTreeClassifier(random_state=random_state)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 11,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Naive Bayes\n",
|
||||||
|
"naive_bayes = GaussianNB()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 12,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Stree\n",
|
||||||
|
"stree = Stree(random_state=random_state, C=.01, max_iter=1e3)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -176,8 +212,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Random Forest\n",
|
"# Neural Network\n",
|
||||||
"random_forest = RandomForestClassifier(random_state=random_state)"
|
"mlp = MLPClassifier(random_state=random_state, alpha=1)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -186,28 +222,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Stree\n",
|
"# SVC (linear)\n",
|
||||||
"stree = Stree(random_state=random_state, C=.01)"
|
"svc = LinearSVC(random_state=random_state, C=.01, max_iter=1e3)"
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 15,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# AdaBoost\n",
|
|
||||||
"adaboost = AdaBoostClassifier(random_state=random_state)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 16,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Gradient Boosting\n",
|
|
||||||
"gradient = GradientBoostingClassifier(random_state=random_state)"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -219,7 +235,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 17,
|
"execution_count": 15,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -244,20 +260,195 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 18,
|
"execution_count": 16,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"text": "************************** Linear Tree **********************\nTrain Model Linear Tree took: 13.91 seconds\n=========== Linear Tree - Train 199,364 samples =============\n precision recall f1-score support\n\n 0 1.000000 1.000000 1.000000 199020\n 1 1.000000 1.000000 1.000000 344\n\n accuracy 1.000000 199364\n macro avg 1.000000 1.000000 1.000000 199364\nweighted avg 1.000000 1.000000 1.000000 199364\n\n=========== Linear Tree - Test 85,443 samples =============\n precision recall f1-score support\n\n 0 0.999578 0.999613 0.999596 85295\n 1 0.772414 0.756757 0.764505 148\n\n accuracy 0.999192 85443\n macro avg 0.885996 0.878185 0.882050 85443\nweighted avg 0.999184 0.999192 0.999188 85443\n\nConfusion Matrix in Train\n[[199020 0]\n [ 0 344]]\nConfusion Matrix in Test\n[[85262 33]\n [ 36 112]]\n************************** Random Forest **********************\nTrain Model Random Forest took: 173.1 seconds\n=========== Random Forest - Train 199,364 samples =============\n precision recall f1-score support\n\n 0 1.000000 1.000000 1.000000 199020\n 1 1.000000 1.000000 1.000000 344\n\n accuracy 1.000000 199364\n macro avg 1.000000 1.000000 1.000000 199364\nweighted avg 1.000000 1.000000 1.000000 199364\n\n=========== Random Forest - Test 85,443 samples =============\n precision recall f1-score support\n\n 0 0.999660 0.999965 0.999812 85295\n 1 0.975410 0.804054 0.881481 148\n\n accuracy 0.999625 85443\n macro avg 0.987535 0.902009 0.940647 85443\nweighted avg 0.999618 0.999625 0.999607 85443\n\nConfusion Matrix in Train\n[[199020 0]\n [ 0 344]]\nConfusion Matrix in Test\n[[85292 3]\n [ 29 119]]\n************************** Stree (SVM Tree) **********************\nTrain Model Stree (SVM Tree) took: 38.4 seconds\n=========== Stree (SVM Tree) - Train 199,364 samples =============\n precision recall f1-score support\n\n 0 0.999623 0.999864 0.999744 199020\n 1 0.908784 0.781977 0.840625 344\n\n accuracy 0.999488 199364\n macro avg 0.954204 0.890921 0.920184 199364\nweighted avg 0.999467 0.999488 0.999469 199364\n\n=========== Stree (SVM Tree) - Test 85,443 samples =============\n precision recall f1-score support\n\n 0 0.999637 0.999918 0.999777 85295\n 1 0.943548 0.790541 0.860294 148\n\n accuracy 0.999555 85443\n macro avg 0.971593 0.895229 0.930036 85443\nweighted avg 0.999540 0.999555 0.999536 85443\n\nConfusion Matrix in Train\n[[198993 27]\n [ 75 269]]\nConfusion Matrix in Test\n[[85288 7]\n [ 31 117]]\n************************** AdaBoost model **********************\nTrain Model AdaBoost model took: 47.21 seconds\n=========== AdaBoost model - Train 199,364 samples =============\n precision recall f1-score support\n\n 0 0.999392 0.999678 0.999535 199020\n 1 0.777003 0.648256 0.706815 344\n\n accuracy 0.999072 199364\n macro avg 0.888198 0.823967 0.853175 199364\nweighted avg 0.999008 0.999072 0.999030 199364\n\n=========== AdaBoost model - Test 85,443 samples =============\n precision recall f1-score support\n\n 0 0.999484 0.999707 0.999596 85295\n 1 0.806202 0.702703 0.750903 148\n\n accuracy 0.999192 85443\n macro avg 0.902843 0.851205 0.875249 85443\nweighted avg 0.999149 0.999192 0.999165 85443\n\nConfusion Matrix in Train\n[[198956 64]\n [ 121 223]]\nConfusion Matrix in Test\n[[85270 25]\n [ 44 104]]\n"
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"************************** Linear Tree **********************\n",
|
||||||
|
"Train Model Linear Tree took: 10.25 seconds\n",
|
||||||
|
"=========== Linear Tree - Train 199,364 samples =============\n",
|
||||||
|
" precision recall f1-score support\n",
|
||||||
|
"\n",
|
||||||
|
" 0 1.000000 1.000000 1.000000 199020\n",
|
||||||
|
" 1 1.000000 1.000000 1.000000 344\n",
|
||||||
|
"\n",
|
||||||
|
" accuracy 1.000000 199364\n",
|
||||||
|
" macro avg 1.000000 1.000000 1.000000 199364\n",
|
||||||
|
"weighted avg 1.000000 1.000000 1.000000 199364\n",
|
||||||
|
"\n",
|
||||||
|
"=========== Linear Tree - Test 85,443 samples =============\n",
|
||||||
|
" precision recall f1-score support\n",
|
||||||
|
"\n",
|
||||||
|
" 0 0.999578 0.999613 0.999596 85295\n",
|
||||||
|
" 1 0.772414 0.756757 0.764505 148\n",
|
||||||
|
"\n",
|
||||||
|
" accuracy 0.999192 85443\n",
|
||||||
|
" macro avg 0.885996 0.878185 0.882050 85443\n",
|
||||||
|
"weighted avg 0.999184 0.999192 0.999188 85443\n",
|
||||||
|
"\n",
|
||||||
|
"Confusion Matrix in Train\n",
|
||||||
|
"[[199020 0]\n",
|
||||||
|
" [ 0 344]]\n",
|
||||||
|
"Confusion Matrix in Test\n",
|
||||||
|
"[[85262 33]\n",
|
||||||
|
" [ 36 112]]\n",
|
||||||
|
"************************** Naive Bayes **********************\n",
|
||||||
|
"Train Model Naive Bayes took: 0.09943 seconds\n",
|
||||||
|
"=========== Naive Bayes - Train 199,364 samples =============\n",
|
||||||
|
" precision recall f1-score support\n",
|
||||||
|
"\n",
|
||||||
|
" 0 0.999692 0.978238 0.988849 199020\n",
|
||||||
|
" 1 0.061538 0.825581 0.114539 344\n",
|
||||||
|
"\n",
|
||||||
|
" accuracy 0.977975 199364\n",
|
||||||
|
" macro avg 0.530615 0.901910 0.551694 199364\n",
|
||||||
|
"weighted avg 0.998073 0.977975 0.987340 199364\n",
|
||||||
|
"\n",
|
||||||
|
"=========== Naive Bayes - Test 85,443 samples =============\n",
|
||||||
|
" precision recall f1-score support\n",
|
||||||
|
"\n",
|
||||||
|
" 0 0.999712 0.977994 0.988734 85295\n",
|
||||||
|
" 1 0.061969 0.837838 0.115403 148\n",
|
||||||
|
"\n",
|
||||||
|
" accuracy 0.977751 85443\n",
|
||||||
|
" macro avg 0.530841 0.907916 0.552068 85443\n",
|
||||||
|
"weighted avg 0.998088 0.977751 0.987221 85443\n",
|
||||||
|
"\n",
|
||||||
|
"Confusion Matrix in Train\n",
|
||||||
|
"[[194689 4331]\n",
|
||||||
|
" [ 60 284]]\n",
|
||||||
|
"Confusion Matrix in Test\n",
|
||||||
|
"[[83418 1877]\n",
|
||||||
|
" [ 24 124]]\n",
|
||||||
|
"************************** Stree (SVM Tree) **********************\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"/Users/rmontanana/.virtualenvs/general/lib/python3.8/site-packages/sklearn/svm/_base.py:976: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n",
|
||||||
|
" warnings.warn(\"Liblinear failed to converge, increase \"\n",
|
||||||
|
"/Users/rmontanana/.virtualenvs/general/lib/python3.8/site-packages/sklearn/svm/_base.py:976: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n",
|
||||||
|
" warnings.warn(\"Liblinear failed to converge, increase \"\n",
|
||||||
|
"/Users/rmontanana/.virtualenvs/general/lib/python3.8/site-packages/sklearn/svm/_base.py:976: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n",
|
||||||
|
" warnings.warn(\"Liblinear failed to converge, increase \"\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Train Model Stree (SVM Tree) took: 28.47 seconds\n",
|
||||||
|
"=========== Stree (SVM Tree) - Train 199,364 samples =============\n",
|
||||||
|
" precision recall f1-score support\n",
|
||||||
|
"\n",
|
||||||
|
" 0 0.999623 0.999864 0.999744 199020\n",
|
||||||
|
" 1 0.908784 0.781977 0.840625 344\n",
|
||||||
|
"\n",
|
||||||
|
" accuracy 0.999488 199364\n",
|
||||||
|
" macro avg 0.954204 0.890921 0.920184 199364\n",
|
||||||
|
"weighted avg 0.999467 0.999488 0.999469 199364\n",
|
||||||
|
"\n",
|
||||||
|
"=========== Stree (SVM Tree) - Test 85,443 samples =============\n",
|
||||||
|
" precision recall f1-score support\n",
|
||||||
|
"\n",
|
||||||
|
" 0 0.999637 0.999918 0.999777 85295\n",
|
||||||
|
" 1 0.943548 0.790541 0.860294 148\n",
|
||||||
|
"\n",
|
||||||
|
" accuracy 0.999555 85443\n",
|
||||||
|
" macro avg 0.971593 0.895229 0.930036 85443\n",
|
||||||
|
"weighted avg 0.999540 0.999555 0.999536 85443\n",
|
||||||
|
"\n",
|
||||||
|
"Confusion Matrix in Train\n",
|
||||||
|
"[[198993 27]\n",
|
||||||
|
" [ 75 269]]\n",
|
||||||
|
"Confusion Matrix in Test\n",
|
||||||
|
"[[85288 7]\n",
|
||||||
|
" [ 31 117]]\n",
|
||||||
|
"************************** Neural Network **********************\n",
|
||||||
|
"Train Model Neural Network took: 9.76 seconds\n",
|
||||||
|
"=========== Neural Network - Train 199,364 samples =============\n",
|
||||||
|
" precision recall f1-score support\n",
|
||||||
|
"\n",
|
||||||
|
" 0 0.999247 0.999844 0.999545 199020\n",
|
||||||
|
" 1 0.862222 0.563953 0.681898 344\n",
|
||||||
|
"\n",
|
||||||
|
" accuracy 0.999092 199364\n",
|
||||||
|
" macro avg 0.930734 0.781899 0.840722 199364\n",
|
||||||
|
"weighted avg 0.999010 0.999092 0.998997 199364\n",
|
||||||
|
"\n",
|
||||||
|
"=========== Neural Network - Test 85,443 samples =============\n",
|
||||||
|
" precision recall f1-score support\n",
|
||||||
|
"\n",
|
||||||
|
" 0 0.999356 0.999871 0.999613 85295\n",
|
||||||
|
" 1 0.894231 0.628378 0.738095 148\n",
|
||||||
|
"\n",
|
||||||
|
" accuracy 0.999228 85443\n",
|
||||||
|
" macro avg 0.946793 0.814125 0.868854 85443\n",
|
||||||
|
"weighted avg 0.999173 0.999228 0.999160 85443\n",
|
||||||
|
"\n",
|
||||||
|
"Confusion Matrix in Train\n",
|
||||||
|
"[[198989 31]\n",
|
||||||
|
" [ 150 194]]\n",
|
||||||
|
"Confusion Matrix in Test\n",
|
||||||
|
"[[85284 11]\n",
|
||||||
|
" [ 55 93]]\n",
|
||||||
|
"************************** SVC (linear) **********************\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"/Users/rmontanana/.virtualenvs/general/lib/python3.8/site-packages/sklearn/svm/_base.py:976: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n",
|
||||||
|
" warnings.warn(\"Liblinear failed to converge, increase \"\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Train Model SVC (linear) took: 8.207 seconds\n",
|
||||||
|
"=========== SVC (linear) - Train 199,364 samples =============\n",
|
||||||
|
" precision recall f1-score support\n",
|
||||||
|
"\n",
|
||||||
|
" 0 0.999237 0.999859 0.999548 199020\n",
|
||||||
|
" 1 0.872727 0.558140 0.680851 344\n",
|
||||||
|
"\n",
|
||||||
|
" accuracy 0.999097 199364\n",
|
||||||
|
" macro avg 0.935982 0.778999 0.840199 199364\n",
|
||||||
|
"weighted avg 0.999018 0.999097 0.998998 199364\n",
|
||||||
|
"\n",
|
||||||
|
"=========== SVC (linear) - Test 85,443 samples =============\n",
|
||||||
|
" precision recall f1-score support\n",
|
||||||
|
"\n",
|
||||||
|
" 0 0.999344 0.999894 0.999619 85295\n",
|
||||||
|
" 1 0.910891 0.621622 0.738956 148\n",
|
||||||
|
"\n",
|
||||||
|
" accuracy 0.999239 85443\n",
|
||||||
|
" macro avg 0.955117 0.810758 0.869287 85443\n",
|
||||||
|
"weighted avg 0.999191 0.999239 0.999168 85443\n",
|
||||||
|
"\n",
|
||||||
|
"Confusion Matrix in Train\n",
|
||||||
|
"[[198992 28]\n",
|
||||||
|
" [ 152 192]]\n",
|
||||||
|
"Confusion Matrix in Test\n",
|
||||||
|
"[[85286 9]\n",
|
||||||
|
" [ 56 92]]\n"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"# Train & Test models\n",
|
"# Train & Test models\n",
|
||||||
"models = {\n",
|
"models = {\n",
|
||||||
" 'Linear Tree':linear_tree, 'Random Forest': random_forest, 'Stree (SVM Tree)': stree, \n",
|
" 'Linear Tree':linear_tree, 'Naive Bayes': naive_bayes, 'Stree (SVM Tree)': stree, \n",
|
||||||
" 'AdaBoost model': adaboost\n",
|
" 'Neural Network': mlp, 'SVC (linear)': svc\n",
|
||||||
"}\n",
|
"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"best_f1 = 0\n",
|
"best_f1 = 0\n",
|
||||||
@@ -273,13 +464,24 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 19,
|
"execution_count": 17,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"text": "**************************************************************************************************************\n*The best f1 model is Random Forest, with a f1 score: 0.8815 in 173.095 seconds with 0.7 samples in train dataset\n**************************************************************************************************************\nModel: Linear Tree\t Time: 13.91 seconds\t f1: 0.7645\nModel: Random Forest\t Time: 173.09 seconds\t f1: 0.8815\nModel: Stree (SVM Tree)\t Time: 38.40 seconds\t f1: 0.8603\nModel: AdaBoost model\t Time: 47.21 seconds\t f1: 0.7509\n"
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"**************************************************************************************************************\n",
|
||||||
|
"*The best f1 model is Stree (SVM Tree), with a f1 score: 0.8603 in 28.4743 seconds with 0.7 samples in train dataset\n",
|
||||||
|
"**************************************************************************************************************\n",
|
||||||
|
"Model: Linear Tree\t Time: 10.25 seconds\t f1: 0.7645\n",
|
||||||
|
"Model: Naive Bayes\t Time: 0.10 seconds\t f1: 0.1154\n",
|
||||||
|
"Model: Stree (SVM Tree)\t Time: 28.47 seconds\t f1: 0.8603\n",
|
||||||
|
"Model: Neural Network\t Time: 9.76 seconds\t f1: 0.7381\n",
|
||||||
|
"Model: SVC (linear)\t Time: 8.21 seconds\t f1: 0.739\n"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -295,39 +497,54 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"**************************************************************************************************************\n",
|
"**************************************************************************************************************\n",
|
||||||
"*The best f1 model is Random Forest, with a f1 score: 0.8815 in 152.54 seconds with 0.7 samples in train dataset\n",
|
"*The best f1 model is Stree (SVM Tree), with a f1 score: 0.8603 in 28.4743 seconds with 0.7 samples in train dataset\n",
|
||||||
"**************************************************************************************************************\n",
|
"**************************************************************************************************************\n",
|
||||||
"Model: Linear Tree\t Time: 13.52 seconds\t f1: 0.7645\n",
|
"Model: Linear Tree\t Time: 10.25 seconds\t f1: 0.7645\n",
|
||||||
"Model: Random Forest\t Time: 152.54 seconds\t f1: 0.8815\n",
|
"Model: Naive Bayes\t Time: 0.10 seconds\t f1: 0.1154\n",
|
||||||
"Model: Stree (SVM Tree)\t Time: 32.55 seconds\t f1: 0.8603\n",
|
"Model: Stree (SVM Tree)\t Time: 28.47 seconds\t f1: 0.8603\n",
|
||||||
"Model: AdaBoost model\t Time: 47.34 seconds\t f1: 0.7509\n",
|
"Model: Neural Network\t Time: 9.76 seconds\t f1: 0.7381\n",
|
||||||
"Model: Gradient Boost.\t Time: 244.12 seconds\t f1: 0.5259"
|
"Model: SVC (linear)\t Time: 8.21 seconds\t f1: 0.739"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "code",
|
||||||
|
"execution_count": 18,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"{'C': 0.01,\n",
|
||||||
|
" 'criterion': 'entropy',\n",
|
||||||
|
" 'degree': 3,\n",
|
||||||
|
" 'gamma': 'scale',\n",
|
||||||
|
" 'kernel': 'linear',\n",
|
||||||
|
" 'max_depth': None,\n",
|
||||||
|
" 'max_features': None,\n",
|
||||||
|
" 'max_iter': 1000.0,\n",
|
||||||
|
" 'min_samples_split': 0,\n",
|
||||||
|
" 'random_state': 2020,\n",
|
||||||
|
" 'split_criteria': 'impurity',\n",
|
||||||
|
" 'splitter': 'random',\n",
|
||||||
|
" 'tol': 0.0001}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 18,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"```\n",
|
"stree.get_params()"
|
||||||
"******************************************************************************************************************\n",
|
|
||||||
"*The best f1 model is Random Forest, with a f1 score: 0.8815 in 218.966 seconds with 0.7 samples in train dataset\n",
|
|
||||||
"******************************************************************************************************************\n",
|
|
||||||
"Model: Linear Tree Time: 23.05 seconds\t f1: 0.7645\n",
|
|
||||||
"Model: Random Forest\t Time: 218.97 seconds\t f1: 0.8815\n",
|
|
||||||
"Model: Stree (SVM Tree)\t Time: 49.45 seconds\t f1: 0.8467\n",
|
|
||||||
"Model: AdaBoost model\t Time: 73.83 seconds\t f1: 0.7509\n",
|
|
||||||
"Model: Gradient Boost.\t Time: 388.69 seconds\t f1: 0.5259\n",
|
|
||||||
"Model: Neural Network\t Time: 25.47 seconds\t f1: 0.8328\n",
|
|
||||||
"```"
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"hide_input": false,
|
"hide_input": false,
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3.7.6 64-bit ('general': venv)",
|
"display_name": "Python 3",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python37664bitgeneralvenvfbd0a23e74cf4e778460f5ffc6761f39"
|
"name": "python3"
|
||||||
},
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
@@ -339,7 +556,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.7.6-final"
|
"version": "3.8.2"
|
||||||
},
|
},
|
||||||
"toc": {
|
"toc": {
|
||||||
"base_numbering": 1,
|
"base_numbering": 1,
|
||||||
@@ -393,4 +610,4 @@
|
|||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 4
|
"nbformat_minor": 4
|
||||||
}
|
}
|
||||||
|
@@ -4,7 +4,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"# Test AdaBoost with different configurations"
|
"# Test Stree with AdaBoost and Bagging with different configurations"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -34,12 +34,12 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import time\n",
|
"import time\n",
|
||||||
"from sklearn.ensemble import AdaBoostClassifier\n",
|
"import warnings\n",
|
||||||
"from sklearn.tree import DecisionTreeClassifier\n",
|
"from sklearn.ensemble import AdaBoostClassifier, BaggingClassifier\n",
|
||||||
"from sklearn.svm import LinearSVC, SVC\n",
|
"from sklearn.model_selection import train_test_split\n",
|
||||||
"from sklearn.model_selection import GridSearchCV, train_test_split\n",
|
"from sklearn.exceptions import ConvergenceWarning\n",
|
||||||
"from sklearn.datasets import load_iris\n",
|
"from stree import Stree\n",
|
||||||
"from stree import Stree"
|
"warnings.filterwarnings(\"ignore\", category=ConvergenceWarning)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -57,12 +57,20 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 4,
|
"execution_count": 4,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"text": "Fraud: 0.173% 492\nValid: 99.827% 284315\nX.shape (100492, 28) y.shape (100492,)\nFraud: 0.659% 662\nValid: 99.341% 99830\n"
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Fraud: 0.173% 492\n",
|
||||||
|
"Valid: 99.827% 284315\n",
|
||||||
|
"X.shape (100492, 28) y.shape (100492,)\n",
|
||||||
|
"Fraud: 0.651% 654\n",
|
||||||
|
"Valid: 99.349% 99838\n"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -117,23 +125,29 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## STree alone on the whole dataset and linear kernel"
|
"## STree alone with 100.000 samples and linear kernel"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 5,
|
"execution_count": 5,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"text": "Score Train: 0.9985499829409757\nScore Test: 0.998407854584052\nTook 39.45 seconds\n"
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Score Train: 0.9984504719663368\n",
|
||||||
|
"Score Test: 0.9983415151917209\n",
|
||||||
|
"Took 26.09 seconds\n"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"now = time.time()\n",
|
"now = time.time()\n",
|
||||||
"clf = Stree(max_depth=3, random_state=random_state)\n",
|
"clf = Stree(max_depth=3, random_state=random_state, max_iter=1e3)\n",
|
||||||
"clf.fit(Xtrain, ytrain)\n",
|
"clf.fit(Xtrain, ytrain)\n",
|
||||||
"print(\"Score Train: \", clf.score(Xtrain, ytrain))\n",
|
"print(\"Score Train: \", clf.score(Xtrain, ytrain))\n",
|
||||||
"print(\"Score Test: \", clf.score(Xtest, ytest))\n",
|
"print(\"Score Test: \", clf.score(Xtest, ytest))\n",
|
||||||
@@ -144,7 +158,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Different kernels with different configuations"
|
"## Adaboost"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -161,18 +175,24 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 7,
|
"execution_count": 7,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"text": "Kernel: linear\tTime: 87.00 seconds\tScore Train: 0.9982372\tScore Test: 0.9981425\nKernel: rbf\tTime: 60.60 seconds\tScore Train: 0.9934181\tScore Test: 0.9933992\nKernel: poly\tTime: 88.08 seconds\tScore Train: 0.9937450\tScore Test: 0.9938968\n"
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Kernel: linear\tTime: 43.49 seconds\tScore Train: 0.9980098\tScore Test: 0.9980762\n",
|
||||||
|
"Kernel: rbf\tTime: 8.86 seconds\tScore Train: 0.9934891\tScore Test: 0.9934987\n",
|
||||||
|
"Kernel: poly\tTime: 41.14 seconds\tScore Train: 0.9972279\tScore Test: 0.9973133\n"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"for kernel in ['linear', 'rbf', 'poly']:\n",
|
"for kernel in ['linear', 'rbf', 'poly']:\n",
|
||||||
" now = time.time()\n",
|
" now = time.time()\n",
|
||||||
" clf = AdaBoostClassifier(Stree(C=7, kernel=kernel, max_depth=max_depth, random_state=random_state), n_estimators=n_estimators, random_state=random_state)\n",
|
" clf = AdaBoostClassifier(base_estimator=Stree(C=C, kernel=kernel, max_depth=max_depth, random_state=random_state, max_iter=1e3), algorithm=\"SAMME\", n_estimators=n_estimators, random_state=random_state)\n",
|
||||||
" clf.fit(Xtrain, ytrain)\n",
|
" clf.fit(Xtrain, ytrain)\n",
|
||||||
" score_train = clf.score(Xtrain, ytrain)\n",
|
" score_train = clf.score(Xtrain, ytrain)\n",
|
||||||
" score_test = clf.score(Xtest, ytest)\n",
|
" score_test = clf.score(Xtest, ytest)\n",
|
||||||
@@ -183,24 +203,41 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Test algorithm SAMME in AdaBoost to check speed/accuracy"
|
"## Bagging"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 8,
|
"execution_count": 8,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"n_estimators = 10\n",
|
||||||
|
"C = 7\n",
|
||||||
|
"max_depth = 3"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 9,
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"text": "Kernel: linear\tTime: 58.75 seconds\tScore Train: 0.9980524\tScore Test: 0.9978771\nKernel: rbf\tTime: 12.49 seconds\tScore Train: 0.9934181\tScore Test: 0.9933992\nKernel: poly\tTime: 97.85 seconds\tScore Train: 0.9972137\tScore Test: 0.9971806\n"
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Kernel: linear\tTime: 187.51 seconds\tScore Train: 0.9984505\tScore Test: 0.9983083\n",
|
||||||
|
"Kernel: rbf\tTime: 73.65 seconds\tScore Train: 0.9993461\tScore Test: 0.9985074\n",
|
||||||
|
"Kernel: poly\tTime: 52.19 seconds\tScore Train: 0.9993461\tScore Test: 0.9987727\n"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"for kernel in ['linear', 'rbf', 'poly']:\n",
|
"for kernel in ['linear', 'rbf', 'poly']:\n",
|
||||||
" now = time.time()\n",
|
" now = time.time()\n",
|
||||||
" clf = AdaBoostClassifier(Stree(C=7, kernel=kernel, max_depth=max_depth, random_state=random_state), n_estimators=n_estimators, random_state=random_state, algorithm=\"SAMME\")\n",
|
" clf = BaggingClassifier(base_estimator=Stree(C=C, kernel=kernel, max_depth=max_depth, random_state=random_state, max_iter=1e3), n_estimators=n_estimators, random_state=random_state)\n",
|
||||||
" clf.fit(Xtrain, ytrain)\n",
|
" clf.fit(Xtrain, ytrain)\n",
|
||||||
" score_train = clf.score(Xtrain, ytrain)\n",
|
" score_train = clf.score(Xtrain, ytrain)\n",
|
||||||
" score_test = clf.score(Xtest, ytest)\n",
|
" score_test = clf.score(Xtest, ytest)\n",
|
||||||
@@ -209,6 +246,11 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
"name": "ipython",
|
"name": "ipython",
|
||||||
@@ -219,14 +261,9 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.7.6-final"
|
"version": "3.8.2"
|
||||||
},
|
|
||||||
"orig_nbformat": 2,
|
|
||||||
"kernelspec": {
|
|
||||||
"name": "python37664bitgeneralvenvfbd0a23e74cf4e778460f5ffc6761f39",
|
|
||||||
"display_name": "Python 3.7.6 64-bit ('general': venv)"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 2
|
"nbformat_minor": 4
|
||||||
}
|
}
|
@@ -4,7 +4,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"# Test smple_weight, kernels, C, sklearn estimator"
|
"# Test sample_weight, kernels, C, sklearn estimator"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -33,6 +33,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
|
"import time\n",
|
||||||
|
"import warnings\n",
|
||||||
"import numpy as np\n",
|
"import numpy as np\n",
|
||||||
"import pandas as pd\n",
|
"import pandas as pd\n",
|
||||||
"from sklearn.svm import SVC\n",
|
"from sklearn.svm import SVC\n",
|
||||||
@@ -40,14 +42,17 @@
|
|||||||
"from sklearn.utils.estimator_checks import check_estimator\n",
|
"from sklearn.utils.estimator_checks import check_estimator\n",
|
||||||
"from sklearn.datasets import make_classification, load_iris, load_wine\n",
|
"from sklearn.datasets import make_classification, load_iris, load_wine\n",
|
||||||
"from sklearn.model_selection import train_test_split\n",
|
"from sklearn.model_selection import train_test_split\n",
|
||||||
|
"from sklearn.exceptions import ConvergenceWarning\n",
|
||||||
"from stree import Stree\n",
|
"from stree import Stree\n",
|
||||||
"import time"
|
"warnings.filterwarnings(\"ignore\", category=ConvergenceWarning)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 3,
|
"execution_count": 3,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import os\n",
|
"import os\n",
|
||||||
@@ -59,12 +64,21 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 4,
|
"execution_count": 4,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"text": "Fraud: 0.173% 492\nValid: 99.827% 284315\nX.shape (1492, 28) y.shape (1492,)\nFraud: 33.244% 496\nValid: 66.756% 996\n"
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Fraud: 0.173% 492\n",
|
||||||
|
"Valid: 99.827% 284315\n",
|
||||||
|
"X.shape (5492, 28) y.shape (5492,)\n",
|
||||||
|
"Fraud: 9.086% 499\n",
|
||||||
|
"Valid: 90.914% 4993\n",
|
||||||
|
"[0.09079084 0.09079084 0.09079084 0.09079084] [0.09101942 0.09101942 0.09101942 0.09101942]\n"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -94,22 +108,29 @@
|
|||||||
" print(\"X.shape\", X.shape, \" y.shape\", y.shape)\n",
|
" print(\"X.shape\", X.shape, \" y.shape\", y.shape)\n",
|
||||||
" print(\"Fraud: {0:.3f}% {1}\".format(len(y[y == 1])*100/X.shape[0], len(y[y == 1])))\n",
|
" print(\"Fraud: {0:.3f}% {1}\".format(len(y[y == 1])*100/X.shape[0], len(y[y == 1])))\n",
|
||||||
" print(\"Valid: {0:.3f}% {1}\".format(len(y[y == 0]) * 100 / X.shape[0], len(y[y == 0])))\n",
|
" print(\"Valid: {0:.3f}% {1}\".format(len(y[y == 0]) * 100 / X.shape[0], len(y[y == 0])))\n",
|
||||||
" Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=0.7, shuffle=True, random_state=random_state, stratify=y)\n",
|
" Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=0.7, shuffle=True, random_state=random_state)\n",
|
||||||
" return Xtrain, Xtest, ytrain, ytest\n",
|
" return Xtrain, Xtest, ytrain, ytest\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# data = load_creditcard(-5000) # Take all true samples + 5000 of the others\n",
|
"data = load_creditcard(-5000) # Take all true samples with up to 5000 of the others\n",
|
||||||
"# data = load_creditcard(5000) # Take the first 5000 samples\n",
|
"# data = load_creditcard(5000) # Take the first 5000 samples\n",
|
||||||
"data = load_creditcard(-1000) # Take all the samples\n",
|
"# data = load_creditcard(-1000) # Take 1000 samples\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Xtrain = data[0]\n",
|
"Xtrain = data[0]\n",
|
||||||
"Xtest = data[1]\n",
|
"Xtest = data[1]\n",
|
||||||
"ytrain = data[2]\n",
|
"ytrain = data[2]\n",
|
||||||
"ytest = data[3]\n",
|
"ytest = data[3]\n",
|
||||||
|
"_, data = np.unique(ytrain, return_counts=True)\n",
|
||||||
|
"wtrain = (data[1] / np.sum(data), data[0] / np.sum(data))\n",
|
||||||
|
"_, data = np.unique(ytest, return_counts=True)\n",
|
||||||
|
"wtest = (data[1] / np.sum(data), data[0] / np.sum(data))\n",
|
||||||
"# Set weights inverse to its count class in dataset\n",
|
"# Set weights inverse to its count class in dataset\n",
|
||||||
"weights = np.ones(Xtrain.shape[0],) * 1.00244\n",
|
"weights = np.ones(Xtrain.shape[0],)\n",
|
||||||
"weights[ytrain==1] = 1.99755\n",
|
"weights[ytrain==0] = wtrain[0]\n",
|
||||||
"weights_test = np.ones(Xtest.shape[0],) * 1.00244\n",
|
"weights[ytrain==1] = wtrain[1]\n",
|
||||||
"weights_test[ytest==1] = 1.99755 "
|
"weights_test = np.ones(Xtest.shape[0],)\n",
|
||||||
|
"weights_test[ytest==0] = wtest[0]\n",
|
||||||
|
"weights_test[ytest==1] = wtest[1]\n",
|
||||||
|
"print(weights[:4], weights_test[:4])"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -123,19 +144,26 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Test smple_weights\n",
|
"## Test sample_weights\n",
|
||||||
"Compute accuracy with weights in samples. The weights are set based on the inverse of the number of samples of each class"
|
"Compute accuracy with weights in samples. The weights are set based on the inverse of the number of samples of each class"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 5,
|
"execution_count": 5,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"text": "Accuracy of Train without weights 0.9808429118773946\nAccuracy of Train with weights 0.9904214559386973\nAccuracy of Tests without weights 0.9441964285714286\nAccuracy of Tests with weights 0.9375\n"
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Accuracy of Train without weights 0.9849115504682622\n",
|
||||||
|
"Accuracy of Train with weights 0.9849115504682622\n",
|
||||||
|
"Accuracy of Tests without weights 0.9848300970873787\n",
|
||||||
|
"Accuracy of Tests with weights 0.9805825242718447\n"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -157,12 +185,18 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 6,
|
"execution_count": 6,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"text": "Time: 0.13s\tKernel: linear\tAccuracy_train: 0.9693486590038314\tAccuracy_test: 0.9598214285714286\nTime: 0.09s\tKernel: rbf\tAccuracy_train: 0.9923371647509579\tAccuracy_test: 0.953125\nTime: 0.09s\tKernel: poly\tAccuracy_train: 0.9913793103448276\tAccuracy_test: 0.9375\n"
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Time: 26.59s\tKernel: linear\tAccuracy_train: 0.9846514047866806\tAccuracy_test: 0.9848300970873787\n",
|
||||||
|
"Time: 0.56s\tKernel: rbf\tAccuracy_train: 0.9947970863683663\tAccuracy_test: 0.9866504854368932\n",
|
||||||
|
"Time: 0.23s\tKernel: poly\tAccuracy_train: 0.9955775234131113\tAccuracy_test: 0.9824029126213593\n"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -187,15 +221,73 @@
|
|||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 7,
|
"execution_count": 7,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"tags": [
|
"tags": []
|
||||||
"outputPrepend"
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"text": "************** C=0.001 ****************************\nClassifier's accuracy (train): 0.9588\nClassifier's accuracy (test) : 0.9487\nroot feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4438\nroot - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0374\nroot - Down - Down, <cgaf> - Leaf class=1 belief= 0.984076 impurity=0.0313 counts=(array([0, 1]), array([ 5, 309]))\nroot - Down - Up, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([1]))\nroot - Up, <cgaf> - Leaf class=0 belief= 0.947874 impurity=0.0988 counts=(array([0, 1]), array([691, 38]))\n\n**************************************************\n************** C=0.01 ****************************\nClassifier's accuracy (train): 0.9588\nClassifier's accuracy (test) : 0.9531\nroot feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4438\nroot - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0192\nroot - Down - Down, <cgaf> - Leaf class=1 belief= 0.993506 impurity=0.0129 counts=(array([0, 1]), array([ 2, 306]))\nroot - Down - Up, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([1]))\nroot - Up, <cgaf> - Leaf class=0 belief= 0.944218 impurity=0.1053 counts=(array([0, 1]), array([694, 41]))\n\n**************************************************\n************** C=1 ****************************\nClassifier's accuracy (train): 0.9665\nClassifier's accuracy (test) : 0.9643\nroot feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4438\nroot - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0189\nroot - Down - Down, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([312]))\nroot - Down - Up, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([3]))\nroot - Up, <cgaf> - Leaf class=0 belief= 0.951989 impurity=0.0914 counts=(array([0, 1]), array([694, 35]))\n\n**************************************************\n************** C=5 ****************************\nClassifier's accuracy (train): 0.9665\nClassifier's accuracy (test) : 0.9621\nroot feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4438\nroot - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0250\nroot - Down - Down, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([312]))\nroot - Down - Up, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([4]))\nroot - Up, <cgaf> - Leaf class=0 belief= 0.951923 impurity=0.0915 counts=(array([0, 1]), array([693, 35]))\n\n**************************************************\n************** C=17 ****************************\nClassifier's accuracy (train): 0.9703\nClassifier's accuracy (test) : 0.9665\nroot feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4438\nroot - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0367\nroot - Down - Down, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([315]))\nroot - Down - Up, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([6]))\nroot - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0846\nroot - Up - Down, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([1]))\nroot - Up - Up, <cgaf> - Leaf class=0 belief= 0.957064 impurity=0.0822 counts=(array([0, 1]), array([691, 31]))\n\n**************************************************\n0.4375 secs\n"
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"************** C=0.001 ****************************\n",
|
||||||
|
"Classifier's accuracy (train): 0.9823\n",
|
||||||
|
"Classifier's accuracy (test) : 0.9836\n",
|
||||||
|
"root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4391 counts=(array([0, 1]), array([3495, 349]))\n",
|
||||||
|
"root - Down, <cgaf> - Leaf class=0 belief= 0.981455 impurity=0.1332 counts=(array([0, 1]), array([3493, 66]))\n",
|
||||||
|
"root - Up, <cgaf> - Leaf class=1 belief= 0.992982 impurity=0.0603 counts=(array([0, 1]), array([ 2, 283]))\n",
|
||||||
|
"\n",
|
||||||
|
"**************************************************\n",
|
||||||
|
"************** C=0.01 ****************************\n",
|
||||||
|
"Classifier's accuracy (train): 0.9834\n",
|
||||||
|
"Classifier's accuracy (test) : 0.9842\n",
|
||||||
|
"root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4391 counts=(array([0, 1]), array([3495, 349]))\n",
|
||||||
|
"root - Down, <cgaf> - Leaf class=0 belief= 0.982288 impurity=0.1284 counts=(array([0, 1]), array([3494, 63]))\n",
|
||||||
|
"root - Up, <cgaf> - Leaf class=1 belief= 0.996516 impurity=0.0335 counts=(array([0, 1]), array([ 1, 286]))\n",
|
||||||
|
"\n",
|
||||||
|
"**************************************************\n",
|
||||||
|
"************** C=1 ****************************\n",
|
||||||
|
"Classifier's accuracy (train): 0.9844\n",
|
||||||
|
"Classifier's accuracy (test) : 0.9848\n",
|
||||||
|
"root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4391 counts=(array([0, 1]), array([3495, 349]))\n",
|
||||||
|
"root - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1236 counts=(array([0, 1]), array([3493, 60]))\n",
|
||||||
|
"root - Down - Down, <cgaf> - Leaf class=0 belief= 0.983108 impurity=0.1236 counts=(array([0, 1]), array([3492, 60]))\n",
|
||||||
|
"root - Down - Up, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([1]))\n",
|
||||||
|
"root - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0593 counts=(array([0, 1]), array([ 2, 289]))\n",
|
||||||
|
"root - Up - Down, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([2]))\n",
|
||||||
|
"root - Up - Up, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([289]))\n",
|
||||||
|
"\n",
|
||||||
|
"**************************************************\n",
|
||||||
|
"************** C=5 ****************************\n",
|
||||||
|
"Classifier's accuracy (train): 0.9847\n",
|
||||||
|
"Classifier's accuracy (test) : 0.9848\n",
|
||||||
|
"root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4391 counts=(array([0, 1]), array([3495, 349]))\n",
|
||||||
|
"root - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1236 counts=(array([0, 1]), array([3493, 60]))\n",
|
||||||
|
"root - Down - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1236 counts=(array([0, 1]), array([3492, 60]))\n",
|
||||||
|
"root - Down - Down - Down, <cgaf> - Leaf class=0 belief= 0.983385 impurity=0.1220 counts=(array([0, 1]), array([3492, 59]))\n",
|
||||||
|
"root - Down - Down - Up, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([1]))\n",
|
||||||
|
"root - Down - Up, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([1]))\n",
|
||||||
|
"root - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0593 counts=(array([0, 1]), array([ 2, 289]))\n",
|
||||||
|
"root - Up - Down, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([2]))\n",
|
||||||
|
"root - Up - Up, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([289]))\n",
|
||||||
|
"\n",
|
||||||
|
"**************************************************\n",
|
||||||
|
"************** C=17 ****************************\n",
|
||||||
|
"Classifier's accuracy (train): 0.9847\n",
|
||||||
|
"Classifier's accuracy (test) : 0.9848\n",
|
||||||
|
"root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4391 counts=(array([0, 1]), array([3495, 349]))\n",
|
||||||
|
"root - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1236 counts=(array([0, 1]), array([3493, 60]))\n",
|
||||||
|
"root - Down - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1220 counts=(array([0, 1]), array([3492, 59]))\n",
|
||||||
|
"root - Down - Down - Down, <cgaf> - Leaf class=0 belief= 0.983380 impurity=0.1220 counts=(array([0, 1]), array([3491, 59]))\n",
|
||||||
|
"root - Down - Down - Up, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([1]))\n",
|
||||||
|
"root - Down - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=1.0000 counts=(array([0, 1]), array([1, 1]))\n",
|
||||||
|
"root - Down - Up - Down, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([1]))\n",
|
||||||
|
"root - Down - Up - Up, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([1]))\n",
|
||||||
|
"root - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0593 counts=(array([0, 1]), array([ 2, 289]))\n",
|
||||||
|
"root - Up - Down, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([2]))\n",
|
||||||
|
"root - Up - Up, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([289]))\n",
|
||||||
|
"\n",
|
||||||
|
"**************************************************\n",
|
||||||
|
"59.0161 secs\n"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -216,18 +308,32 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Test iterator\n",
|
"## Test iterator\n",
|
||||||
"Check different weays of using the iterator"
|
"Check different ways of using the iterator"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 8,
|
"execution_count": 8,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"text": "root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4438\nroot - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0367\nroot - Down - Down, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([315]))\nroot - Down - Up, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([6]))\nroot - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0846\nroot - Up - Down, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([1]))\nroot - Up - Up, <cgaf> - Leaf class=0 belief= 0.957064 impurity=0.0822 counts=(array([0, 1]), array([691, 31]))\n"
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4391 counts=(array([0, 1]), array([3495, 349]))\n",
|
||||||
|
"root - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1236 counts=(array([0, 1]), array([3493, 60]))\n",
|
||||||
|
"root - Down - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1220 counts=(array([0, 1]), array([3492, 59]))\n",
|
||||||
|
"root - Down - Down - Down, <cgaf> - Leaf class=0 belief= 0.983380 impurity=0.1220 counts=(array([0, 1]), array([3491, 59]))\n",
|
||||||
|
"root - Down - Down - Up, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([1]))\n",
|
||||||
|
"root - Down - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=1.0000 counts=(array([0, 1]), array([1, 1]))\n",
|
||||||
|
"root - Down - Up - Down, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([1]))\n",
|
||||||
|
"root - Down - Up - Up, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([1]))\n",
|
||||||
|
"root - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0593 counts=(array([0, 1]), array([ 2, 289]))\n",
|
||||||
|
"root - Up - Down, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([2]))\n",
|
||||||
|
"root - Up - Up, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([289]))\n"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -239,12 +345,26 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 9,
|
"execution_count": 9,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"text": "root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4438\nroot - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0367\nroot - Down - Down, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([315]))\nroot - Down - Up, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([6]))\nroot - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0846\nroot - Up - Down, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([1]))\nroot - Up - Up, <cgaf> - Leaf class=0 belief= 0.957064 impurity=0.0822 counts=(array([0, 1]), array([691, 31]))\n"
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4391 counts=(array([0, 1]), array([3495, 349]))\n",
|
||||||
|
"root - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1236 counts=(array([0, 1]), array([3493, 60]))\n",
|
||||||
|
"root - Down - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1220 counts=(array([0, 1]), array([3492, 59]))\n",
|
||||||
|
"root - Down - Down - Down, <cgaf> - Leaf class=0 belief= 0.983380 impurity=0.1220 counts=(array([0, 1]), array([3491, 59]))\n",
|
||||||
|
"root - Down - Down - Up, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([1]))\n",
|
||||||
|
"root - Down - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=1.0000 counts=(array([0, 1]), array([1, 1]))\n",
|
||||||
|
"root - Down - Up - Down, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([1]))\n",
|
||||||
|
"root - Down - Up - Up, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([1]))\n",
|
||||||
|
"root - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0593 counts=(array([0, 1]), array([ 2, 289]))\n",
|
||||||
|
"root - Up - Down, <pure> - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([2]))\n",
|
||||||
|
"root - Up - Up, <pure> - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([289]))\n"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -262,13 +382,62 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 10,
|
"execution_count": 14,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"text": "1 functools.partial(<function check_no_attributes_set_in_init at 0x12735b3b0>, 'Stree')\n2 functools.partial(<function check_estimators_dtypes at 0x1273514d0>, 'Stree')\n3 functools.partial(<function check_fit_score_takes_y at 0x1273513b0>, 'Stree')\n4 functools.partial(<function check_sample_weights_pandas_series at 0x12734acb0>, 'Stree')\n5 functools.partial(<function check_sample_weights_not_an_array at 0x12734add0>, 'Stree')\n6 functools.partial(<function check_sample_weights_list at 0x12734aef0>, 'Stree')\n7 functools.partial(<function check_sample_weights_shape at 0x12734d050>, 'Stree')\n8 functools.partial(<function check_sample_weights_invariance at 0x12734d170>, 'Stree')\n9 functools.partial(<function check_estimators_fit_returns_self at 0x1273564d0>, 'Stree')\n10 functools.partial(<function check_estimators_fit_returns_self at 0x1273564d0>, 'Stree', readonly_memmap=True)\n11 functools.partial(<function check_complex_data at 0x12734d320>, 'Stree')\n12 functools.partial(<function check_dtype_object at 0x12734d290>, 'Stree')\n13 functools.partial(<function check_estimators_empty_data_messages at 0x1273515f0>, 'Stree')\n14 functools.partial(<function check_pipeline_consistency at 0x127351290>, 'Stree')\n15 functools.partial(<function check_estimators_nan_inf at 0x127351710>, 'Stree')\n16 functools.partial(<function check_estimators_overwrite_params at 0x12735b290>, 'Stree')\n17 functools.partial(<function check_estimator_sparse_data at 0x12734ab90>, 'Stree')\n18 functools.partial(<function check_estimators_pickle at 0x127351950>, 'Stree')\n19 functools.partial(<function check_classifier_data_not_an_array at 0x12735b5f0>, 'Stree')\n20 functools.partial(<function check_classifiers_one_label at 0x127356050>, 'Stree')\n21 functools.partial(<function check_classifiers_classes at 0x127356a70>, 'Stree')\n22 functools.partial(<function check_estimators_partial_fit_n_features at 0x127351a70>, 'Stree')\n23 functools.partial(<function check_classifiers_train at 0x127356170>, 'Stree')\n24 functools.partial(<function check_classifiers_train at 0x127356170>, 'Stree', readonly_memmap=True)\n25 functools.partial(<function check_classifiers_train at 0x127356170>, 'Stree', readonly_memmap=True, X_dtype='float32')\n26 functools.partial(<function check_classifiers_regression_target at 0x12735f0e0>, 'Stree')\n27 functools.partial(<function check_supervised_y_no_nan at 0x1273449e0>, 'Stree')\n28 functools.partial(<function check_supervised_y_2d at 0x127356710>, 'Stree')\n29 functools.partial(<function check_estimators_unfitted at 0x1273565f0>, 'Stree')\n30 functools.partial(<function check_non_transformer_estimators_n_iter at 0x12735bc20>, 'Stree')\n31 functools.partial(<function check_decision_proba_consistency at 0x12735f200>, 'Stree')\n32 functools.partial(<function check_fit2d_predict1d at 0x12734d830>, 'Stree')\n33 functools.partial(<function check_methods_subset_invariance at 0x12734d9e0>, 'Stree')\n34 functools.partial(<function check_fit2d_1sample at 0x12734db00>, 'Stree')\n35 functools.partial(<function check_fit2d_1feature at 0x12734dc20>, 'Stree')\n36 functools.partial(<function check_fit1d at 0x12734dd40>, 'Stree')\n37 functools.partial(<function check_get_params_invariance at 0x12735be60>, 'Stree')\n38 functools.partial(<function check_set_params at 0x12735bf80>, 'Stree')\n39 functools.partial(<function check_dict_unchanged at 0x12734d440>, 'Stree')\n40 functools.partial(<function check_dont_overwrite_parameters at 0x12734d710>, 'Stree')\n41 functools.partial(<function check_fit_idempotent at 0x12735f3b0>, 'Stree')\n42 functools.partial(<function check_n_features_in at 0x12735f440>, 'Stree')\n43 functools.partial(<function check_requires_y_none at 0x12735f4d0>, 'Stree')\n"
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"1 functools.partial(<function check_no_attributes_set_in_init at 0x16817f670>, 'Stree')\n",
|
||||||
|
"2 functools.partial(<function check_estimators_dtypes at 0x168179820>, 'Stree')\n",
|
||||||
|
"3 functools.partial(<function check_fit_score_takes_y at 0x168179700>, 'Stree')\n",
|
||||||
|
"4 functools.partial(<function check_sample_weights_pandas_series at 0x168174040>, 'Stree')\n",
|
||||||
|
"5 functools.partial(<function check_sample_weights_not_an_array at 0x168174160>, 'Stree')\n",
|
||||||
|
"6 functools.partial(<function check_sample_weights_list at 0x168174280>, 'Stree')\n",
|
||||||
|
"7 functools.partial(<function check_sample_weights_shape at 0x1681743a0>, 'Stree')\n",
|
||||||
|
"8 functools.partial(<function check_sample_weights_invariance at 0x1681744c0>, 'Stree', kind='ones')\n",
|
||||||
|
"10 functools.partial(<function check_estimators_fit_returns_self at 0x16817b8b0>, 'Stree')\n",
|
||||||
|
"11 functools.partial(<function check_estimators_fit_returns_self at 0x16817b8b0>, 'Stree', readonly_memmap=True)\n",
|
||||||
|
"12 functools.partial(<function check_complex_data at 0x168174670>, 'Stree')\n",
|
||||||
|
"13 functools.partial(<function check_dtype_object at 0x1681745e0>, 'Stree')\n",
|
||||||
|
"14 functools.partial(<function check_estimators_empty_data_messages at 0x1681799d0>, 'Stree')\n",
|
||||||
|
"15 functools.partial(<function check_pipeline_consistency at 0x1681795e0>, 'Stree')\n",
|
||||||
|
"16 functools.partial(<function check_estimators_nan_inf at 0x168179af0>, 'Stree')\n",
|
||||||
|
"17 functools.partial(<function check_estimators_overwrite_params at 0x16817f550>, 'Stree')\n",
|
||||||
|
"18 functools.partial(<function check_estimator_sparse_data at 0x168172ee0>, 'Stree')\n",
|
||||||
|
"19 functools.partial(<function check_estimators_pickle at 0x168179d30>, 'Stree')\n",
|
||||||
|
"20 functools.partial(<function check_estimator_get_tags_default_keys at 0x168181790>, 'Stree')\n",
|
||||||
|
"21 functools.partial(<function check_classifier_data_not_an_array at 0x16817f8b0>, 'Stree')\n",
|
||||||
|
"22 functools.partial(<function check_classifiers_one_label at 0x16817b430>, 'Stree')\n",
|
||||||
|
"23 functools.partial(<function check_classifiers_classes at 0x16817bd30>, 'Stree')\n",
|
||||||
|
"24 functools.partial(<function check_estimators_partial_fit_n_features at 0x168179e50>, 'Stree')\n",
|
||||||
|
"25 functools.partial(<function check_classifiers_train at 0x16817b550>, 'Stree')\n",
|
||||||
|
"26 functools.partial(<function check_classifiers_train at 0x16817b550>, 'Stree', readonly_memmap=True)\n",
|
||||||
|
"27 functools.partial(<function check_classifiers_train at 0x16817b550>, 'Stree', readonly_memmap=True, X_dtype='float32')\n",
|
||||||
|
"28 functools.partial(<function check_classifiers_regression_target at 0x168181280>, 'Stree')\n",
|
||||||
|
"29 functools.partial(<function check_supervised_y_no_nan at 0x1681720d0>, 'Stree')\n",
|
||||||
|
"30 functools.partial(<function check_supervised_y_2d at 0x16817baf0>, 'Stree')\n",
|
||||||
|
"31 functools.partial(<function check_estimators_unfitted at 0x16817b9d0>, 'Stree')\n",
|
||||||
|
"32 functools.partial(<function check_non_transformer_estimators_n_iter at 0x16817fdc0>, 'Stree')\n",
|
||||||
|
"33 functools.partial(<function check_decision_proba_consistency at 0x1681813a0>, 'Stree')\n",
|
||||||
|
"34 functools.partial(<function check_parameters_default_constructible at 0x16817fb80>, 'Stree')\n",
|
||||||
|
"35 functools.partial(<function check_methods_sample_order_invariance at 0x168174d30>, 'Stree')\n",
|
||||||
|
"36 functools.partial(<function check_methods_subset_invariance at 0x168174c10>, 'Stree')\n",
|
||||||
|
"37 functools.partial(<function check_fit2d_1sample at 0x168174e50>, 'Stree')\n",
|
||||||
|
"38 functools.partial(<function check_fit2d_1feature at 0x168174f70>, 'Stree')\n",
|
||||||
|
"39 functools.partial(<function check_get_params_invariance at 0x168181040>, 'Stree')\n",
|
||||||
|
"40 functools.partial(<function check_set_params at 0x168181160>, 'Stree')\n",
|
||||||
|
"41 functools.partial(<function check_dict_unchanged at 0x168174790>, 'Stree')\n",
|
||||||
|
"42 functools.partial(<function check_dont_overwrite_parameters at 0x168174940>, 'Stree')\n",
|
||||||
|
"43 functools.partial(<function check_fit_idempotent at 0x168181550>, 'Stree')\n",
|
||||||
|
"44 functools.partial(<function check_n_features_in at 0x1681815e0>, 'Stree')\n",
|
||||||
|
"45 functools.partial(<function check_fit1d at 0x1681790d0>, 'Stree')\n",
|
||||||
|
"46 functools.partial(<function check_fit2d_predict1d at 0x168174a60>, 'Stree')\n",
|
||||||
|
"47 functools.partial(<function check_requires_y_none at 0x168181670>, 'Stree')\n"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -277,13 +446,16 @@
|
|||||||
"checks = check_estimator(Stree(), generate_only=True)\n",
|
"checks = check_estimator(Stree(), generate_only=True)\n",
|
||||||
"for check in checks:\n",
|
"for check in checks:\n",
|
||||||
" c += 1\n",
|
" c += 1\n",
|
||||||
" print(c, check[1])\n",
|
" if c == 9:\n",
|
||||||
" check[1](check[0])"
|
" pass\n",
|
||||||
|
" else:\n",
|
||||||
|
" print(c, check[1])\n",
|
||||||
|
" check[1](check[0])"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 11,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -300,15 +472,11 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 12,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
"outputs": [
|
"tags": []
|
||||||
{
|
},
|
||||||
"output_type": "stream",
|
"outputs": [],
|
||||||
"name": "stdout",
|
|
||||||
"text": "== Not Weighted ===\nSVC train score ..: 0.9578544061302682\nSTree train score : 0.960727969348659\nSVC test score ...: 0.9508928571428571\nSTree test score .: 0.9553571428571429\n==== Weighted =====\nSVC train score ..: 0.9636015325670498\nSTree train score : 0.9626436781609196\nSVC test score ...: 0.9553571428571429\nSTree test score .: 0.9553571428571429\n*SVC test score ..: 0.9447820728419238\n*STree test score : 0.9447820728419238\n"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"svc = SVC(C=7, kernel='rbf', gamma=.001, random_state=random_state)\n",
|
"svc = SVC(C=7, kernel='rbf', gamma=.001, random_state=random_state)\n",
|
||||||
"clf = Stree(C=17, kernel='rbf', gamma=.001, random_state=random_state)\n",
|
"clf = Stree(C=17, kernel='rbf', gamma=.001, random_state=random_state)\n",
|
||||||
@@ -332,15 +500,11 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 13,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
"outputs": [
|
"tags": []
|
||||||
{
|
},
|
||||||
"output_type": "stream",
|
"outputs": [],
|
||||||
"name": "stdout",
|
|
||||||
"text": "root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4438\nroot - Down, <cgaf> - Leaf class=1 belief= 0.978261 impurity=0.0425 counts=(array([0, 1]), array([ 7, 315]))\nroot - Up, <cgaf> - Leaf class=0 belief= 0.955679 impurity=0.0847 counts=(array([0, 1]), array([690, 32]))\n\n"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"print(clf)"
|
"print(clf)"
|
||||||
]
|
]
|
||||||
@@ -354,15 +518,11 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 14,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
"outputs": [
|
"tags": []
|
||||||
{
|
},
|
||||||
"output_type": "stream",
|
"outputs": [],
|
||||||
"name": "stdout",
|
|
||||||
"text": "****************************************\nmax_features None = 28\nTrain score : 0.9664750957854407\nTest score .: 0.9642857142857143\nTook 0.09 seconds\n****************************************\nmax_features auto = 5\nTrain score : 0.9511494252873564\nTest score .: 0.9441964285714286\nTook 0.37 seconds\n****************************************\nmax_features log2 = 4\nTrain score : 0.935823754789272\nTest score .: 0.9330357142857143\nTook 0.10 seconds\n****************************************\nmax_features 7 = 7\nTrain score : 0.9568965517241379\nTest score .: 0.9397321428571429\nTook 3.36 seconds\n****************************************\nmax_features 0.5 = 14\nTrain score : 0.960727969348659\nTest score .: 0.9486607142857143\nTook 112.42 seconds\n****************************************\nmax_features 0.1 = 2\nTrain score : 0.8793103448275862\nTest score .: 0.8839285714285714\nTook 0.06 seconds\n****************************************\nmax_features 0.7 = 19\nTrain score : 0.9655172413793104\nTest score .: 0.9553571428571429\nTook 10.59 seconds\n"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"for max_features in [None, \"auto\", \"log2\", 7, .5, .1, .7]:\n",
|
"for max_features in [None, \"auto\", \"log2\", 7, .5, .1, .7]:\n",
|
||||||
" now = time.time()\n",
|
" now = time.time()\n",
|
||||||
@@ -374,20 +534,13 @@
|
|||||||
" print(\"Test score .:\", clf.score(Xtest, ytest))\n",
|
" print(\"Test score .:\", clf.score(Xtest, ytest))\n",
|
||||||
" print(f\"Took {time.time() - now:.2f} seconds\")"
|
" print(f\"Took {time.time() - now:.2f} seconds\")"
|
||||||
]
|
]
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": []
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3.7.6 64-bit ('general': venv)",
|
"display_name": "Python 3",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python37664bitgeneralvenvfbd0a23e74cf4e778460f5ffc6761f39"
|
"name": "python3"
|
||||||
},
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
@@ -399,9 +552,9 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.7.6-final"
|
"version": "3.9.1"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 2
|
"nbformat_minor": 4
|
||||||
}
|
}
|
||||||
|
@@ -1,244 +1,362 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"# Test Gridsearch\n",
|
"# Test Gridsearch\n",
|
||||||
"with different kernels and different configurations"
|
"with different kernels and different configurations"
|
||||||
]
|
]
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"# Setup\n",
|
|
||||||
"Uncomment the next cell if STree is not already installed"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 1,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#\n",
|
|
||||||
"# Google Colab setup\n",
|
|
||||||
"#\n",
|
|
||||||
"#!pip install git+https://github.com/doctorado-ml/stree"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"metadata": {
|
|
||||||
"id": "zIHKVxthDZEa",
|
|
||||||
"colab_type": "code",
|
|
||||||
"colab": {}
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"from sklearn.ensemble import AdaBoostClassifier\n",
|
|
||||||
"from sklearn.svm import LinearSVC\n",
|
|
||||||
"from sklearn.model_selection import GridSearchCV, train_test_split\n",
|
|
||||||
"from stree import Stree"
|
|
||||||
],
|
|
||||||
"execution_count": 2,
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"metadata": {
|
|
||||||
"id": "IEmq50QgDZEi",
|
|
||||||
"colab_type": "code",
|
|
||||||
"colab": {}
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"import os\n",
|
|
||||||
"if not os.path.isfile('data/creditcard.csv'):\n",
|
|
||||||
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
|
||||||
" !tar xzf creditcard.tgz"
|
|
||||||
],
|
|
||||||
"execution_count": 3,
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"metadata": {
|
|
||||||
"id": "z9Q-YUfBDZEq",
|
|
||||||
"colab_type": "code",
|
|
||||||
"colab": {},
|
|
||||||
"outputId": "afc822fb-f16a-4302-8a67-2b9e2880159b"
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"random_state=1\n",
|
|
||||||
"\n",
|
|
||||||
"def load_creditcard(n_examples=0):\n",
|
|
||||||
" import pandas as pd\n",
|
|
||||||
" import numpy as np\n",
|
|
||||||
" import random\n",
|
|
||||||
" df = pd.read_csv('data/creditcard.csv')\n",
|
|
||||||
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
|
||||||
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
|
||||||
" y = df.Class\n",
|
|
||||||
" X = df.drop(['Class', 'Time', 'Amount'], axis=1).values\n",
|
|
||||||
" if n_examples > 0:\n",
|
|
||||||
" # Take first n_examples samples\n",
|
|
||||||
" X = X[:n_examples, :]\n",
|
|
||||||
" y = y[:n_examples, :]\n",
|
|
||||||
" else:\n",
|
|
||||||
" # Take all the positive samples with a number of random negatives\n",
|
|
||||||
" if n_examples < 0:\n",
|
|
||||||
" Xt = X[(y == 1).ravel()]\n",
|
|
||||||
" yt = y[(y == 1).ravel()]\n",
|
|
||||||
" indices = random.sample(range(X.shape[0]), -1 * n_examples)\n",
|
|
||||||
" X = np.append(Xt, X[indices], axis=0)\n",
|
|
||||||
" y = np.append(yt, y[indices], axis=0)\n",
|
|
||||||
" print(\"X.shape\", X.shape, \" y.shape\", y.shape)\n",
|
|
||||||
" print(\"Fraud: {0:.3f}% {1}\".format(len(y[y == 1])*100/X.shape[0], len(y[y == 1])))\n",
|
|
||||||
" print(\"Valid: {0:.3f}% {1}\".format(len(y[y == 0]) * 100 / X.shape[0], len(y[y == 0])))\n",
|
|
||||||
" Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=0.7, shuffle=True, random_state=random_state, stratify=y)\n",
|
|
||||||
" return Xtrain, Xtest, ytrain, ytest\n",
|
|
||||||
"\n",
|
|
||||||
"data = load_creditcard(-1000) # Take all true samples + 1000 of the others\n",
|
|
||||||
"# data = load_creditcard(5000) # Take the first 5000 samples\n",
|
|
||||||
"# data = load_creditcard(0) # Take all the samples\n",
|
|
||||||
"\n",
|
|
||||||
"Xtrain = data[0]\n",
|
|
||||||
"Xtest = data[1]\n",
|
|
||||||
"ytrain = data[2]\n",
|
|
||||||
"ytest = data[3]"
|
|
||||||
],
|
|
||||||
"execution_count": 4,
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
|
||||||
"text": "Fraud: 0.173% 492\nValid: 99.827% 284315\nX.shape (1492, 28) y.shape (1492,)\nFraud: 33.244% 496\nValid: 66.756% 996\n"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"# Tests"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"metadata": {
|
|
||||||
"id": "HmX3kR4PDZEw",
|
|
||||||
"colab_type": "code",
|
|
||||||
"colab": {}
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"parameters = {\n",
|
|
||||||
" 'base_estimator': [Stree()],\n",
|
|
||||||
" 'n_estimators': [10, 25],\n",
|
|
||||||
" 'learning_rate': [.5, 1],\n",
|
|
||||||
" 'base_estimator__tol': [.1, 1e-02],\n",
|
|
||||||
" 'base_estimator__max_depth': [3, 5],\n",
|
|
||||||
" 'base_estimator__C': [1, 3],\n",
|
|
||||||
" 'base_estimator__kernel': ['linear', 'poly', 'rbf']\n",
|
|
||||||
"}"
|
|
||||||
],
|
|
||||||
"execution_count": 9,
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 14,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"output_type": "execute_result",
|
|
||||||
"data": {
|
|
||||||
"text/plain": "{'C': 1.0,\n 'degree': 3,\n 'gamma': 'scale',\n 'kernel': 'linear',\n 'max_depth': None,\n 'max_iter': 1000,\n 'min_samples_split': 0,\n 'random_state': None,\n 'tol': 0.0001}"
|
|
||||||
},
|
|
||||||
"metadata": {},
|
|
||||||
"execution_count": 14
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
|
||||||
"Stree().get_params()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"metadata": {
|
|
||||||
"id": "CrcB8o6EDZE5",
|
|
||||||
"colab_type": "code",
|
|
||||||
"colab": {},
|
|
||||||
"outputId": "7703413a-d563-4289-a13b-532f38f82762"
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"random_state=2020\n",
|
|
||||||
"clf = AdaBoostClassifier(random_state=random_state)\n",
|
|
||||||
"grid = GridSearchCV(clf, parameters, verbose=10, n_jobs=-1, return_train_score=True)\n",
|
|
||||||
"grid.fit(Xtrain, ytrain)"
|
|
||||||
],
|
|
||||||
"execution_count": 11,
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
|
||||||
"text": "Fitting 5 folds for each of 96 candidates, totalling 480 fits\n[Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers.\n[Parallel(n_jobs=-1)]: Done 2 tasks | elapsed: 3.6s\n[Parallel(n_jobs=-1)]: Done 9 tasks | elapsed: 4.2s\n[Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 4.8s\n[Parallel(n_jobs=-1)]: Done 25 tasks | elapsed: 5.3s\n[Parallel(n_jobs=-1)]: Done 34 tasks | elapsed: 6.2s\n[Parallel(n_jobs=-1)]: Done 45 tasks | elapsed: 7.2s\n[Parallel(n_jobs=-1)]: Done 56 tasks | elapsed: 8.9s\n[Parallel(n_jobs=-1)]: Done 69 tasks | elapsed: 10.7s\n[Parallel(n_jobs=-1)]: Done 82 tasks | elapsed: 12.7s\n[Parallel(n_jobs=-1)]: Done 97 tasks | elapsed: 16.7s\n[Parallel(n_jobs=-1)]: Done 112 tasks | elapsed: 19.4s\n[Parallel(n_jobs=-1)]: Done 129 tasks | elapsed: 24.4s\n[Parallel(n_jobs=-1)]: Done 146 tasks | elapsed: 29.3s\n[Parallel(n_jobs=-1)]: Done 165 tasks | elapsed: 32.7s\n[Parallel(n_jobs=-1)]: Done 184 tasks | elapsed: 36.4s\n[Parallel(n_jobs=-1)]: Done 205 tasks | elapsed: 39.7s\n[Parallel(n_jobs=-1)]: Done 226 tasks | elapsed: 43.7s\n[Parallel(n_jobs=-1)]: Done 249 tasks | elapsed: 46.6s\n[Parallel(n_jobs=-1)]: Done 272 tasks | elapsed: 48.8s\n[Parallel(n_jobs=-1)]: Done 297 tasks | elapsed: 52.0s\n[Parallel(n_jobs=-1)]: Done 322 tasks | elapsed: 55.9s\n[Parallel(n_jobs=-1)]: Done 349 tasks | elapsed: 1.0min\n[Parallel(n_jobs=-1)]: Done 376 tasks | elapsed: 1.2min\n[Parallel(n_jobs=-1)]: Done 405 tasks | elapsed: 1.3min\n[Parallel(n_jobs=-1)]: Done 434 tasks | elapsed: 1.3min\n[Parallel(n_jobs=-1)]: Done 465 tasks | elapsed: 1.4min\n[Parallel(n_jobs=-1)]: Done 480 out of 480 | elapsed: 1.5min finished\n"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"output_type": "execute_result",
|
|
||||||
"data": {
|
|
||||||
"text/plain": "GridSearchCV(estimator=AdaBoostClassifier(random_state=2020), n_jobs=-1,\n param_grid={'base_estimator': [Stree(C=1, max_depth=3, tol=0.1)],\n 'base_estimator__C': [1, 3],\n 'base_estimator__kernel': ['linear', 'poly', 'rbf'],\n 'base_estimator__max_depth': [3, 5],\n 'base_estimator__tol': [0.1, 0.01],\n 'learning_rate': [0.5, 1], 'n_estimators': [10, 25]},\n return_train_score=True, verbose=10)"
|
|
||||||
},
|
|
||||||
"metadata": {},
|
|
||||||
"execution_count": 11
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"metadata": {
|
|
||||||
"id": "ZjX88NoYDZE8",
|
|
||||||
"colab_type": "code",
|
|
||||||
"colab": {},
|
|
||||||
"outputId": "285163c8-fa33-4915-8ae7-61c4f7844344"
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"print(\"Best estimator: \", grid.best_estimator_)\n",
|
|
||||||
"print(\"Best hyperparameters: \", grid.best_params_)\n",
|
|
||||||
"print(\"Best accuracy: \", grid.best_score_)"
|
|
||||||
],
|
|
||||||
"execution_count": 16,
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"output_type": "stream",
|
|
||||||
"name": "stdout",
|
|
||||||
"text": "Best estimator: AdaBoostClassifier(base_estimator=Stree(C=1, max_depth=3, tol=0.1),\n learning_rate=0.5, n_estimators=10, random_state=2020)\nBest hyperparameters: {'base_estimator': Stree(C=1, max_depth=3, tol=0.1), 'base_estimator__C': 1, 'base_estimator__kernel': 'linear', 'base_estimator__max_depth': 3, 'base_estimator__tol': 0.1, 'learning_rate': 0.5, 'n_estimators': 10}\nBest accuracy: 0.9492316893632683\n"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.7.6-final"
|
|
||||||
},
|
|
||||||
"orig_nbformat": 2,
|
|
||||||
"kernelspec": {
|
|
||||||
"name": "python37664bitgeneralvenvfbd0a23e74cf4e778460f5ffc6761f39",
|
|
||||||
"display_name": "Python 3.7.6 64-bit ('general': venv)"
|
|
||||||
},
|
|
||||||
"colab": {
|
|
||||||
"name": "gridsearch.ipynb",
|
|
||||||
"provenance": []
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
{
|
||||||
"nbformat_minor": 0
|
"cell_type": "markdown",
|
||||||
}
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Setup\n",
|
||||||
|
"Uncomment the next cell if STree is not already installed"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#\n",
|
||||||
|
"# Google Colab setup\n",
|
||||||
|
"#\n",
|
||||||
|
"#!pip install git+https://github.com/doctorado-ml/stree"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"metadata": {
|
||||||
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "zIHKVxthDZEa"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from sklearn.ensemble import AdaBoostClassifier\n",
|
||||||
|
"from sklearn.svm import LinearSVC\n",
|
||||||
|
"from sklearn.model_selection import GridSearchCV, train_test_split\n",
|
||||||
|
"from stree import Stree"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"metadata": {
|
||||||
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "IEmq50QgDZEi"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import os\n",
|
||||||
|
"if not os.path.isfile('data/creditcard.csv'):\n",
|
||||||
|
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
||||||
|
" !tar xzf creditcard.tgz"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"metadata": {
|
||||||
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "z9Q-YUfBDZEq",
|
||||||
|
"outputId": "afc822fb-f16a-4302-8a67-2b9e2880159b",
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Fraud: 0.173% 492\n",
|
||||||
|
"Valid: 99.827% 284315\n",
|
||||||
|
"X.shape (1492, 28) y.shape (1492,)\n",
|
||||||
|
"Fraud: 33.177% 495\n",
|
||||||
|
"Valid: 66.823% 997\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"random_state=1\n",
|
||||||
|
"\n",
|
||||||
|
"def load_creditcard(n_examples=0):\n",
|
||||||
|
" import pandas as pd\n",
|
||||||
|
" import numpy as np\n",
|
||||||
|
" import random\n",
|
||||||
|
" df = pd.read_csv('data/creditcard.csv')\n",
|
||||||
|
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
||||||
|
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
||||||
|
" y = df.Class\n",
|
||||||
|
" X = df.drop(['Class', 'Time', 'Amount'], axis=1).values\n",
|
||||||
|
" if n_examples > 0:\n",
|
||||||
|
" # Take first n_examples samples\n",
|
||||||
|
" X = X[:n_examples, :]\n",
|
||||||
|
" y = y[:n_examples, :]\n",
|
||||||
|
" else:\n",
|
||||||
|
" # Take all the positive samples with a number of random negatives\n",
|
||||||
|
" if n_examples < 0:\n",
|
||||||
|
" Xt = X[(y == 1).ravel()]\n",
|
||||||
|
" yt = y[(y == 1).ravel()]\n",
|
||||||
|
" indices = random.sample(range(X.shape[0]), -1 * n_examples)\n",
|
||||||
|
" X = np.append(Xt, X[indices], axis=0)\n",
|
||||||
|
" y = np.append(yt, y[indices], axis=0)\n",
|
||||||
|
" print(\"X.shape\", X.shape, \" y.shape\", y.shape)\n",
|
||||||
|
" print(\"Fraud: {0:.3f}% {1}\".format(len(y[y == 1])*100/X.shape[0], len(y[y == 1])))\n",
|
||||||
|
" print(\"Valid: {0:.3f}% {1}\".format(len(y[y == 0]) * 100 / X.shape[0], len(y[y == 0])))\n",
|
||||||
|
" Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=0.7, shuffle=True, random_state=random_state, stratify=y)\n",
|
||||||
|
" return Xtrain, Xtest, ytrain, ytest\n",
|
||||||
|
"\n",
|
||||||
|
"data = load_creditcard(-1000) # Take all true samples + 1000 of the others\n",
|
||||||
|
"# data = load_creditcard(5000) # Take the first 5000 samples\n",
|
||||||
|
"# data = load_creditcard(0) # Take all the samples\n",
|
||||||
|
"\n",
|
||||||
|
"Xtrain = data[0]\n",
|
||||||
|
"Xtest = data[1]\n",
|
||||||
|
"ytrain = data[2]\n",
|
||||||
|
"ytest = data[3]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Tests"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"metadata": {
|
||||||
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "HmX3kR4PDZEw"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"parameters = [{\n",
|
||||||
|
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
||||||
|
" 'n_estimators': [10, 25],\n",
|
||||||
|
" 'learning_rate': [.5, 1],\n",
|
||||||
|
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||||
|
" 'base_estimator__tol': [.1, 1e-02],\n",
|
||||||
|
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
||||||
|
" 'base_estimator__C': [1, 7, 55],\n",
|
||||||
|
" 'base_estimator__kernel': ['linear']\n",
|
||||||
|
"},\n",
|
||||||
|
"{\n",
|
||||||
|
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
||||||
|
" 'n_estimators': [10, 25],\n",
|
||||||
|
" 'learning_rate': [.5, 1],\n",
|
||||||
|
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||||
|
" 'base_estimator__tol': [.1, 1e-02],\n",
|
||||||
|
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
||||||
|
" 'base_estimator__C': [1, 7, 55],\n",
|
||||||
|
" 'base_estimator__degree': [3, 5, 7],\n",
|
||||||
|
" 'base_estimator__kernel': ['poly']\n",
|
||||||
|
"},\n",
|
||||||
|
"{\n",
|
||||||
|
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
||||||
|
" 'n_estimators': [10, 25],\n",
|
||||||
|
" 'learning_rate': [.5, 1],\n",
|
||||||
|
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||||
|
" 'base_estimator__tol': [.1, 1e-02],\n",
|
||||||
|
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
||||||
|
" 'base_estimator__C': [1, 7, 55],\n",
|
||||||
|
" 'base_estimator__gamma': [.1, 1, 10],\n",
|
||||||
|
" 'base_estimator__kernel': ['rbf']\n",
|
||||||
|
"}]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"{'C': 1.0,\n",
|
||||||
|
" 'criterion': 'entropy',\n",
|
||||||
|
" 'degree': 3,\n",
|
||||||
|
" 'gamma': 'scale',\n",
|
||||||
|
" 'kernel': 'linear',\n",
|
||||||
|
" 'max_depth': None,\n",
|
||||||
|
" 'max_features': None,\n",
|
||||||
|
" 'max_iter': 100000.0,\n",
|
||||||
|
" 'min_samples_split': 0,\n",
|
||||||
|
" 'random_state': None,\n",
|
||||||
|
" 'split_criteria': 'impurity',\n",
|
||||||
|
" 'splitter': 'random',\n",
|
||||||
|
" 'tol': 0.0001}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 6,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"Stree().get_params()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"metadata": {
|
||||||
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "CrcB8o6EDZE5",
|
||||||
|
"outputId": "7703413a-d563-4289-a13b-532f38f82762",
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Fitting 5 folds for each of 1008 candidates, totalling 5040 fits\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"[Parallel(n_jobs=-1)]: Using backend LokyBackend with 16 concurrent workers.\n",
|
||||||
|
"[Parallel(n_jobs=-1)]: Done 40 tasks | elapsed: 1.6s\n",
|
||||||
|
"[Parallel(n_jobs=-1)]: Done 130 tasks | elapsed: 3.1s\n",
|
||||||
|
"[Parallel(n_jobs=-1)]: Done 256 tasks | elapsed: 5.5s\n",
|
||||||
|
"[Parallel(n_jobs=-1)]: Done 418 tasks | elapsed: 9.3s\n",
|
||||||
|
"[Parallel(n_jobs=-1)]: Done 616 tasks | elapsed: 18.6s\n",
|
||||||
|
"[Parallel(n_jobs=-1)]: Done 850 tasks | elapsed: 28.2s\n",
|
||||||
|
"[Parallel(n_jobs=-1)]: Done 1120 tasks | elapsed: 35.4s\n",
|
||||||
|
"[Parallel(n_jobs=-1)]: Done 1426 tasks | elapsed: 43.5s\n",
|
||||||
|
"[Parallel(n_jobs=-1)]: Done 1768 tasks | elapsed: 51.3s\n",
|
||||||
|
"[Parallel(n_jobs=-1)]: Done 2146 tasks | elapsed: 1.0min\n",
|
||||||
|
"[Parallel(n_jobs=-1)]: Done 2560 tasks | elapsed: 1.2min\n",
|
||||||
|
"[Parallel(n_jobs=-1)]: Done 3010 tasks | elapsed: 1.4min\n",
|
||||||
|
"[Parallel(n_jobs=-1)]: Done 3496 tasks | elapsed: 1.7min\n",
|
||||||
|
"[Parallel(n_jobs=-1)]: Done 4018 tasks | elapsed: 2.1min\n",
|
||||||
|
"[Parallel(n_jobs=-1)]: Done 4576 tasks | elapsed: 2.6min\n",
|
||||||
|
"[Parallel(n_jobs=-1)]: Done 5040 out of 5040 | elapsed: 2.9min finished\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"GridSearchCV(estimator=AdaBoostClassifier(algorithm='SAMME', random_state=1),\n",
|
||||||
|
" n_jobs=-1,\n",
|
||||||
|
" param_grid=[{'base_estimator': [Stree(C=55, max_depth=7,\n",
|
||||||
|
" random_state=1,\n",
|
||||||
|
" split_criteria='max_samples',\n",
|
||||||
|
" tol=0.1)],\n",
|
||||||
|
" 'base_estimator__C': [1, 7, 55],\n",
|
||||||
|
" 'base_estimator__kernel': ['linear'],\n",
|
||||||
|
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
||||||
|
" 'base_estimator__split_criteria': ['max_samples',\n",
|
||||||
|
" 'impuri...\n",
|
||||||
|
" {'base_estimator': [Stree(random_state=1)],\n",
|
||||||
|
" 'base_estimator__C': [1, 7, 55],\n",
|
||||||
|
" 'base_estimator__gamma': [0.1, 1, 10],\n",
|
||||||
|
" 'base_estimator__kernel': ['rbf'],\n",
|
||||||
|
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
||||||
|
" 'base_estimator__split_criteria': ['max_samples',\n",
|
||||||
|
" 'impurity'],\n",
|
||||||
|
" 'base_estimator__tol': [0.1, 0.01],\n",
|
||||||
|
" 'learning_rate': [0.5, 1],\n",
|
||||||
|
" 'n_estimators': [10, 25]}],\n",
|
||||||
|
" return_train_score=True, verbose=5)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 7,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"clf = AdaBoostClassifier(random_state=random_state, algorithm=\"SAMME\")\n",
|
||||||
|
"grid = GridSearchCV(clf, parameters, verbose=5, n_jobs=-1, return_train_score=True)\n",
|
||||||
|
"grid.fit(Xtrain, ytrain)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"metadata": {
|
||||||
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "ZjX88NoYDZE8",
|
||||||
|
"outputId": "285163c8-fa33-4915-8ae7-61c4f7844344",
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Best estimator: AdaBoostClassifier(algorithm='SAMME',\n",
|
||||||
|
" base_estimator=Stree(C=55, max_depth=7, random_state=1,\n",
|
||||||
|
" split_criteria='max_samples', tol=0.1),\n",
|
||||||
|
" learning_rate=0.5, n_estimators=25, random_state=1)\n",
|
||||||
|
"Best hyperparameters: {'base_estimator': Stree(C=55, max_depth=7, random_state=1, split_criteria='max_samples', tol=0.1), 'base_estimator__C': 55, 'base_estimator__kernel': 'linear', 'base_estimator__max_depth': 7, 'base_estimator__split_criteria': 'max_samples', 'base_estimator__tol': 0.1, 'learning_rate': 0.5, 'n_estimators': 25}\n",
|
||||||
|
"Best accuracy: 0.9511777695988222\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"print(\"Best estimator: \", grid.best_estimator_)\n",
|
||||||
|
"print(\"Best hyperparameters: \", grid.best_params_)\n",
|
||||||
|
"print(\"Best accuracy: \", grid.best_score_)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Best estimator: AdaBoostClassifier(algorithm='SAMME',\n",
|
||||||
|
" base_estimator=Stree(C=55, max_depth=7, random_state=1,\n",
|
||||||
|
" split_criteria='max_samples', tol=0.1),\n",
|
||||||
|
" learning_rate=0.5, n_estimators=25, random_state=1)\n",
|
||||||
|
"Best hyperparameters: {'base_estimator': Stree(C=55, max_depth=7, random_state=1, split_criteria='max_samples', tol=0.1), 'base_estimator__C': 55, 'base_estimator__kernel': 'linear', 'base_estimator__max_depth': 7, 'base_estimator__split_criteria': 'max_samples', 'base_estimator__tol': 0.1, 'learning_rate': 0.5, 'n_estimators': 25}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Best accuracy: 0.9511777695988222"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"colab": {
|
||||||
|
"name": "gridsearch.ipynb",
|
||||||
|
"provenance": []
|
||||||
|
},
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.8.2"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 4
|
||||||
|
}
|
||||||
|
1
runtime.txt
Normal file
1
runtime.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
python-3.8
|
6
setup.py
6
setup.py
@@ -1,6 +1,6 @@
|
|||||||
import setuptools
|
import setuptools
|
||||||
|
|
||||||
__version__ = "0.9rc4"
|
__version__ = "1.0rc1"
|
||||||
__author__ = "Ricardo Montañana Gómez"
|
__author__ = "Ricardo Montañana Gómez"
|
||||||
|
|
||||||
|
|
||||||
@@ -25,12 +25,12 @@ setuptools.setup(
|
|||||||
classifiers=[
|
classifiers=[
|
||||||
"Development Status :: 4 - Beta",
|
"Development Status :: 4 - Beta",
|
||||||
"License :: OSI Approved :: MIT License",
|
"License :: OSI Approved :: MIT License",
|
||||||
"Programming Language :: Python :: 3.7",
|
"Programming Language :: Python :: 3.8",
|
||||||
"Natural Language :: English",
|
"Natural Language :: English",
|
||||||
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
||||||
"Intended Audience :: Science/Research",
|
"Intended Audience :: Science/Research",
|
||||||
],
|
],
|
||||||
install_requires=["scikit-learn>=0.23.0", "numpy", "ipympl"],
|
install_requires=["scikit-learn", "numpy", "ipympl"],
|
||||||
test_suite="stree.tests",
|
test_suite="stree.tests",
|
||||||
zip_safe=False,
|
zip_safe=False,
|
||||||
)
|
)
|
||||||
|
496
stree/Strees.py
496
stree/Strees.py
@@ -3,14 +3,15 @@ __author__ = "Ricardo Montañana Gómez"
|
|||||||
__copyright__ = "Copyright 2020, Ricardo Montañana Gómez"
|
__copyright__ = "Copyright 2020, Ricardo Montañana Gómez"
|
||||||
__license__ = "MIT"
|
__license__ = "MIT"
|
||||||
__version__ = "0.9"
|
__version__ = "0.9"
|
||||||
Build an oblique tree classifier based on SVM Trees
|
Build an oblique tree classifier based on SVM nodes
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import numbers
|
import numbers
|
||||||
import random
|
import random
|
||||||
import warnings
|
import warnings
|
||||||
from itertools import combinations
|
from math import log, factorial
|
||||||
|
from typing import Optional
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from sklearn.base import BaseEstimator, ClassifierMixin
|
from sklearn.base import BaseEstimator, ClassifierMixin
|
||||||
from sklearn.svm import SVC, LinearSVC
|
from sklearn.svm import SVC, LinearSVC
|
||||||
@@ -39,6 +40,7 @@ class Snode:
|
|||||||
features: np.array,
|
features: np.array,
|
||||||
impurity: float,
|
impurity: float,
|
||||||
title: str,
|
title: str,
|
||||||
|
weight: np.ndarray = None,
|
||||||
):
|
):
|
||||||
self._clf = clf
|
self._clf = clf
|
||||||
self._title = title
|
self._title = title
|
||||||
@@ -50,9 +52,12 @@ class Snode:
|
|||||||
self._up = None
|
self._up = None
|
||||||
self._class = None
|
self._class = None
|
||||||
self._feature = None
|
self._feature = None
|
||||||
self._sample_weight = None
|
self._sample_weight = (
|
||||||
|
weight if os.environ.get("TESTING", "NS") != "NS" else None
|
||||||
|
)
|
||||||
self._features = features
|
self._features = features
|
||||||
self._impurity = impurity
|
self._impurity = impurity
|
||||||
|
self._partition_column: int = -1
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def copy(cls, node: "Snode") -> "Snode":
|
def copy(cls, node: "Snode") -> "Snode":
|
||||||
@@ -65,6 +70,12 @@ class Snode:
|
|||||||
node._title,
|
node._title,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def set_partition_column(self, col: int):
|
||||||
|
self._partition_column = col
|
||||||
|
|
||||||
|
def get_partition_column(self) -> int:
|
||||||
|
return self._partition_column
|
||||||
|
|
||||||
def set_down(self, son):
|
def set_down(self, son):
|
||||||
self._down = son
|
self._down = son
|
||||||
|
|
||||||
@@ -89,9 +100,8 @@ class Snode:
|
|||||||
classes, card = np.unique(self._y, return_counts=True)
|
classes, card = np.unique(self._y, return_counts=True)
|
||||||
if len(classes) > 1:
|
if len(classes) > 1:
|
||||||
max_card = max(card)
|
max_card = max(card)
|
||||||
min_card = min(card)
|
|
||||||
self._class = classes[card == max_card][0]
|
self._class = classes[card == max_card][0]
|
||||||
self._belief = max_card / (max_card + min_card)
|
self._belief = max_card / np.sum(card)
|
||||||
else:
|
else:
|
||||||
self._belief = 1
|
self._belief = 1
|
||||||
try:
|
try:
|
||||||
@@ -100,24 +110,23 @@ class Snode:
|
|||||||
self._class = None
|
self._class = None
|
||||||
|
|
||||||
def __str__(self) -> str:
|
def __str__(self) -> str:
|
||||||
|
count_values = np.unique(self._y, return_counts=True)
|
||||||
if self.is_leaf():
|
if self.is_leaf():
|
||||||
count_values = np.unique(self._y, return_counts=True)
|
return (
|
||||||
result = (
|
|
||||||
f"{self._title} - Leaf class={self._class} belief="
|
f"{self._title} - Leaf class={self._class} belief="
|
||||||
f"{self._belief: .6f} impurity={self._impurity:.4f} "
|
f"{self._belief: .6f} impurity={self._impurity:.4f} "
|
||||||
f"counts={count_values}"
|
f"counts={count_values}"
|
||||||
)
|
)
|
||||||
return result
|
|
||||||
else:
|
else:
|
||||||
return (
|
return (
|
||||||
f"{self._title} feaures={self._features} impurity="
|
f"{self._title} feaures={self._features} impurity="
|
||||||
f"{self._impurity:.4f}"
|
f"{self._impurity:.4f} "
|
||||||
|
f"counts={count_values}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class Siterator:
|
class Siterator:
|
||||||
"""Stree preorder iterator
|
"""Stree preorder iterator"""
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, tree: Snode):
|
def __init__(self, tree: Snode):
|
||||||
self._stack = []
|
self._stack = []
|
||||||
@@ -163,20 +172,22 @@ class Splitter:
|
|||||||
f"criterion must be gini or entropy got({criterion})"
|
f"criterion must be gini or entropy got({criterion})"
|
||||||
)
|
)
|
||||||
|
|
||||||
if criteria not in ["min_distance", "max_samples"]:
|
if criteria not in [
|
||||||
|
"max_samples",
|
||||||
|
"impurity",
|
||||||
|
]:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"split_criteria has to be min_distance or \
|
f"criteria has to be max_samples or impurity; got ({criteria})"
|
||||||
max_samples got ({criteria})"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if splitter_type not in ["random", "best"]:
|
if splitter_type not in ["random", "best"]:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"splitter must be either random or best got({splitter_type})"
|
f"splitter must be either random or best, got({splitter_type})"
|
||||||
)
|
)
|
||||||
self.criterion_function = getattr(self, f"_{self._criterion}")
|
self.criterion_function = getattr(self, f"_{self._criterion}")
|
||||||
self.decision_criteria = getattr(self, f"_{self._criteria}")
|
self.decision_criteria = getattr(self, f"_{self._criteria}")
|
||||||
|
|
||||||
def impurity(self, y: np.array) -> np.array:
|
def partition_impurity(self, y: np.array) -> np.array:
|
||||||
return self.criterion_function(y)
|
return self.criterion_function(y)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -186,24 +197,75 @@ class Splitter:
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _entropy(y: np.array) -> float:
|
def _entropy(y: np.array) -> float:
|
||||||
_, count = np.unique(y, return_counts=True)
|
"""Compute entropy of a labels set
|
||||||
proportion = count / np.sum(count)
|
|
||||||
return -np.sum(proportion * np.log2(proportion))
|
Parameters
|
||||||
|
----------
|
||||||
|
y : np.array
|
||||||
|
set of labels
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
float
|
||||||
|
entropy
|
||||||
|
"""
|
||||||
|
n_labels = len(y)
|
||||||
|
if n_labels <= 1:
|
||||||
|
return 0
|
||||||
|
counts = np.bincount(y)
|
||||||
|
proportions = counts / n_labels
|
||||||
|
n_classes = np.count_nonzero(proportions)
|
||||||
|
if n_classes <= 1:
|
||||||
|
return 0
|
||||||
|
entropy = 0.0
|
||||||
|
# Compute standard entropy.
|
||||||
|
for prop in proportions:
|
||||||
|
if prop != 0.0:
|
||||||
|
entropy -= prop * log(prop, n_classes)
|
||||||
|
return entropy
|
||||||
|
|
||||||
def information_gain(
|
def information_gain(
|
||||||
self, labels_up: np.array, labels_dn: np.array
|
self, labels: np.array, labels_up: np.array, labels_dn: np.array
|
||||||
) -> float:
|
) -> float:
|
||||||
card_up = labels_up.shape[0] if labels_up is not None else 0
|
"""Compute information gain of a split candidate
|
||||||
card_dn = labels_dn.shape[0] if labels_dn is not None else 0
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
labels : np.array
|
||||||
|
labels of the dataset
|
||||||
|
labels_up : np.array
|
||||||
|
labels of one side
|
||||||
|
labels_dn : np.array
|
||||||
|
labels on the other side
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
float
|
||||||
|
information gain
|
||||||
|
"""
|
||||||
|
imp_prev = self.criterion_function(labels)
|
||||||
|
card_up = card_dn = imp_up = imp_dn = 0
|
||||||
|
if labels_up is not None:
|
||||||
|
card_up = labels_up.shape[0]
|
||||||
|
imp_up = self.criterion_function(labels_up)
|
||||||
|
if labels_dn is not None:
|
||||||
|
card_dn = labels_dn.shape[0] if labels_dn is not None else 0
|
||||||
|
imp_dn = self.criterion_function(labels_dn)
|
||||||
samples = card_up + card_dn
|
samples = card_up + card_dn
|
||||||
up = card_up / samples * self.criterion_function(labels_up)
|
if samples == 0:
|
||||||
dn = card_dn / samples * self.criterion_function(labels_dn)
|
return 0.0
|
||||||
return up + dn
|
else:
|
||||||
|
result = (
|
||||||
|
imp_prev
|
||||||
|
- (card_up / samples) * imp_up
|
||||||
|
- (card_dn / samples) * imp_dn
|
||||||
|
)
|
||||||
|
return result
|
||||||
|
|
||||||
def _select_best_set(
|
def _select_best_set(
|
||||||
self, dataset: np.array, labels: np.array, features_sets: list
|
self, dataset: np.array, labels: np.array, features_sets: list
|
||||||
) -> list:
|
) -> list:
|
||||||
min_impurity = 1
|
max_gain = 0
|
||||||
selected = None
|
selected = None
|
||||||
warnings.filterwarnings("ignore", category=ConvergenceWarning)
|
warnings.filterwarnings("ignore", category=ConvergenceWarning)
|
||||||
for feature_set in features_sets:
|
for feature_set in features_sets:
|
||||||
@@ -211,22 +273,71 @@ class Splitter:
|
|||||||
node = Snode(
|
node = Snode(
|
||||||
self._clf, dataset, labels, feature_set, 0.0, "subset"
|
self._clf, dataset, labels, feature_set, 0.0, "subset"
|
||||||
)
|
)
|
||||||
self.partition(dataset, node)
|
self.partition(dataset, node, train=True)
|
||||||
y1, y2 = self.part(labels)
|
y1, y2 = self.part(labels)
|
||||||
impurity = self.information_gain(y1, y2)
|
gain = self.information_gain(labels, y1, y2)
|
||||||
if impurity < min_impurity:
|
if gain > max_gain:
|
||||||
min_impurity = impurity
|
max_gain = gain
|
||||||
selected = feature_set
|
selected = feature_set
|
||||||
return selected
|
return selected if selected is not None else feature_set
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _generate_spaces(features: int, max_features: int) -> list:
|
||||||
|
"""Generate at most 5 feature random combinations
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
features : int
|
||||||
|
number of features in each combination
|
||||||
|
max_features : int
|
||||||
|
number of features in dataset
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
list
|
||||||
|
list with up to 5 combination of features randomly selected
|
||||||
|
"""
|
||||||
|
comb = set()
|
||||||
|
# Generate at most 5 combinations
|
||||||
|
if max_features == features:
|
||||||
|
set_length = 1
|
||||||
|
else:
|
||||||
|
number = factorial(features) / (
|
||||||
|
factorial(max_features) * factorial(features - max_features)
|
||||||
|
)
|
||||||
|
set_length = min(5, number)
|
||||||
|
while len(comb) < set_length:
|
||||||
|
comb.add(
|
||||||
|
tuple(sorted(random.sample(range(features), max_features)))
|
||||||
|
)
|
||||||
|
return list(comb)
|
||||||
|
|
||||||
def _get_subspaces_set(
|
def _get_subspaces_set(
|
||||||
self, dataset: np.array, labels: np.array, max_features: int
|
self, dataset: np.array, labels: np.array, max_features: int
|
||||||
) -> np.array:
|
) -> np.array:
|
||||||
features = range(dataset.shape[1])
|
"""Compute the indices of the features selected by splitter depending
|
||||||
features_sets = list(combinations(features, max_features))
|
on the self._splitter_type hyper parameter
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
dataset : np.array
|
||||||
|
array of samples
|
||||||
|
labels : np.array
|
||||||
|
labels of the dataset
|
||||||
|
max_features : int
|
||||||
|
number of features of the subspace
|
||||||
|
(<= number of features in dataset)
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
np.array
|
||||||
|
indices of the features selected
|
||||||
|
"""
|
||||||
|
features_sets = self._generate_spaces(dataset.shape[1], max_features)
|
||||||
if len(features_sets) > 1:
|
if len(features_sets) > 1:
|
||||||
if self._splitter_type == "random":
|
if self._splitter_type == "random":
|
||||||
return features_sets[random.randint(0, len(features_sets) - 1)]
|
index = random.randint(0, len(features_sets) - 1)
|
||||||
|
return features_sets[index]
|
||||||
else:
|
else:
|
||||||
return self._select_best_set(dataset, labels, features_sets)
|
return self._select_best_set(dataset, labels, features_sets)
|
||||||
else:
|
else:
|
||||||
@@ -234,70 +345,142 @@ class Splitter:
|
|||||||
|
|
||||||
def get_subspace(
|
def get_subspace(
|
||||||
self, dataset: np.array, labels: np.array, max_features: int
|
self, dataset: np.array, labels: np.array, max_features: int
|
||||||
) -> list:
|
) -> tuple:
|
||||||
"""Return the best subspace to make a split
|
"""Return a subspace of the selected dataset of max_features length.
|
||||||
|
Depending on hyperparmeter
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
dataset : np.array
|
||||||
|
array of samples (# samples, # features)
|
||||||
|
labels : np.array
|
||||||
|
labels of the dataset
|
||||||
|
max_features : int
|
||||||
|
number of features to form the subspace
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
tuple
|
||||||
|
tuple with the dataset with only the features selected and the
|
||||||
|
indices of the features selected
|
||||||
"""
|
"""
|
||||||
indices = self._get_subspaces_set(dataset, labels, max_features)
|
indices = self._get_subspaces_set(dataset, labels, max_features)
|
||||||
return dataset[:, indices], indices
|
return dataset[:, indices], indices
|
||||||
|
|
||||||
@staticmethod
|
def _impurity(self, data: np.array, y: np.array) -> np.array:
|
||||||
def _min_distance(data: np.array, _) -> np.array:
|
"""return column of dataset to be taken into account to split dataset
|
||||||
# chooses the lowest distance of every sample
|
|
||||||
indices = np.argmin(np.abs(data), axis=1)
|
Parameters
|
||||||
return np.array(
|
----------
|
||||||
[data[x, y] for x, y in zip(range(len(data[:, 0])), indices)]
|
data : np.array
|
||||||
)
|
distances to hyper plane of every class
|
||||||
|
y : np.array
|
||||||
|
vector of labels (classes)
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
np.array
|
||||||
|
column of dataset to be taken into account to split dataset
|
||||||
|
"""
|
||||||
|
max_gain = 0
|
||||||
|
selected = -1
|
||||||
|
for col in range(data.shape[1]):
|
||||||
|
tup = y[data[:, col] > 0]
|
||||||
|
tdn = y[data[:, col] <= 0]
|
||||||
|
info_gain = self.information_gain(y, tup, tdn)
|
||||||
|
if info_gain > max_gain:
|
||||||
|
selected = col
|
||||||
|
max_gain = info_gain
|
||||||
|
return selected
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _max_samples(data: np.array, y: np.array) -> np.array:
|
def _max_samples(data: np.array, y: np.array) -> np.array:
|
||||||
|
"""return column of dataset to be taken into account to split dataset
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
data : np.array
|
||||||
|
distances to hyper plane of every class
|
||||||
|
y : np.array
|
||||||
|
column of dataset to be taken into account to split dataset
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
np.array
|
||||||
|
column of dataset to be taken into account to split dataset
|
||||||
|
"""
|
||||||
# select the class with max number of samples
|
# select the class with max number of samples
|
||||||
_, samples = np.unique(y, return_counts=True)
|
_, samples = np.unique(y, return_counts=True)
|
||||||
selected = np.argmax(samples)
|
return np.argmax(samples)
|
||||||
return data[:, selected]
|
|
||||||
|
|
||||||
def partition(self, samples: np.array, node: Snode):
|
|
||||||
"""Set the criteria to split arrays
|
|
||||||
|
|
||||||
|
def partition(self, samples: np.array, node: Snode, train: bool):
|
||||||
|
"""Set the criteria to split arrays. Compute the indices of the samples
|
||||||
|
that should go to one side of the tree (up)
|
||||||
"""
|
"""
|
||||||
|
# data contains the distances of every sample to every class hyperplane
|
||||||
|
# array of (m, nc) nc = # classes
|
||||||
data = self._distances(node, samples)
|
data = self._distances(node, samples)
|
||||||
if data.shape[0] < self._min_samples_split:
|
if data.shape[0] < self._min_samples_split:
|
||||||
self._down = np.ones((data.shape[0]), dtype=bool)
|
# there aren't enough samples to split
|
||||||
|
self._up = np.ones((data.shape[0]), dtype=bool)
|
||||||
return
|
return
|
||||||
if data.ndim > 1:
|
if data.ndim > 1:
|
||||||
# split criteria for multiclass
|
# split criteria for multiclass
|
||||||
data = self.decision_criteria(data, node._y)
|
# Convert data to a (m, 1) array selecting values for samples
|
||||||
self._down = data > 0
|
if train:
|
||||||
|
# in train time we have to compute the column to take into
|
||||||
|
# account to split the dataset
|
||||||
|
col = self.decision_criteria(data, node._y)
|
||||||
|
node.set_partition_column(col)
|
||||||
|
else:
|
||||||
|
# in predcit time just use the column computed in train time
|
||||||
|
# is taking the classifier of class <col>
|
||||||
|
col = node.get_partition_column()
|
||||||
|
if col == -1:
|
||||||
|
# No partition is producing information gain
|
||||||
|
data = np.ones(data.shape)
|
||||||
|
data = data[:, col]
|
||||||
|
self._up = data > 0
|
||||||
|
|
||||||
|
def part(self, origin: np.array) -> list:
|
||||||
|
"""Split an array in two based on indices (self._up) and its complement
|
||||||
|
partition has to be called first to establish up indices
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
origin : np.array
|
||||||
|
dataset to split
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
list
|
||||||
|
list with two splits of the array
|
||||||
|
"""
|
||||||
|
down = ~self._up
|
||||||
|
return [
|
||||||
|
origin[self._up] if any(self._up) else None,
|
||||||
|
origin[down] if any(down) else None,
|
||||||
|
]
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _distances(node: Snode, data: np.ndarray) -> np.array:
|
def _distances(node: Snode, data: np.ndarray) -> np.array:
|
||||||
"""Compute distances of the samples to the hyperplane of the node
|
"""Compute distances of the samples to the hyperplane of the node
|
||||||
|
|
||||||
:param node: node containing the svm classifier
|
Parameters
|
||||||
:type node: Snode
|
----------
|
||||||
:param data: samples to find out distance to hyperplane
|
node : Snode
|
||||||
:type data: np.ndarray
|
node containing the svm classifier
|
||||||
:return: array of shape (m, 1) with the distances of every sample to
|
data : np.ndarray
|
||||||
the hyperplane of the node
|
samples to compute distance to hyperplane
|
||||||
:rtype: np.array
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
np.array
|
||||||
|
array of shape (m, nc) with the distances of every sample to
|
||||||
|
the hyperplane of every class. nc = # of classes
|
||||||
"""
|
"""
|
||||||
return node._clf.decision_function(data[:, node._features])
|
return node._clf.decision_function(data[:, node._features])
|
||||||
|
|
||||||
def part(self, origin: np.array) -> list:
|
|
||||||
"""Split an array in two based on indices (down) and its complement
|
|
||||||
|
|
||||||
:param origin: dataset to split
|
|
||||||
:type origin: np.array
|
|
||||||
:param down: indices to use to split array
|
|
||||||
:type down: np.array
|
|
||||||
:return: list with two splits of the array
|
|
||||||
:rtype: list
|
|
||||||
"""
|
|
||||||
up = ~self._down
|
|
||||||
return [
|
|
||||||
origin[up] if any(up) else None,
|
|
||||||
origin[self._down] if any(self._down) else None,
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
class Stree(BaseEstimator, ClassifierMixin):
|
class Stree(BaseEstimator, ClassifierMixin):
|
||||||
"""Estimator that is based on binary trees of svm nodes
|
"""Estimator that is based on binary trees of svm nodes
|
||||||
@@ -311,14 +494,14 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
self,
|
self,
|
||||||
C: float = 1.0,
|
C: float = 1.0,
|
||||||
kernel: str = "linear",
|
kernel: str = "linear",
|
||||||
max_iter: int = 1000,
|
max_iter: int = 1e5,
|
||||||
random_state: int = None,
|
random_state: int = None,
|
||||||
max_depth: int = None,
|
max_depth: int = None,
|
||||||
tol: float = 1e-4,
|
tol: float = 1e-4,
|
||||||
degree: int = 3,
|
degree: int = 3,
|
||||||
gamma="scale",
|
gamma="scale",
|
||||||
split_criteria: str = "max_samples",
|
split_criteria: str = "impurity",
|
||||||
criterion: str = "gini",
|
criterion: str = "entropy",
|
||||||
min_samples_split: int = 0,
|
min_samples_split: int = 0,
|
||||||
max_features=None,
|
max_features=None,
|
||||||
splitter: str = "random",
|
splitter: str = "random",
|
||||||
@@ -339,6 +522,7 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
|
|
||||||
def _more_tags(self) -> dict:
|
def _more_tags(self) -> dict:
|
||||||
"""Required by sklearn to supply features of the classifier
|
"""Required by sklearn to supply features of the classifier
|
||||||
|
make mandatory the labels array
|
||||||
|
|
||||||
:return: the tag required
|
:return: the tag required
|
||||||
:rtype: dict
|
:rtype: dict
|
||||||
@@ -350,16 +534,19 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
) -> "Stree":
|
) -> "Stree":
|
||||||
"""Build the tree based on the dataset of samples and its labels
|
"""Build the tree based on the dataset of samples and its labels
|
||||||
|
|
||||||
:param X: dataset of samples to make predictions
|
Returns
|
||||||
:type X: np.array
|
-------
|
||||||
:param y: samples labels
|
Stree
|
||||||
:type y: np.array
|
itself to be able to chain actions: fit().predict() ...
|
||||||
:param sample_weight: weights of the samples. Rescale C per sample.
|
|
||||||
Hi' weights force the classifier to put more emphasis on these points
|
Raises
|
||||||
:type sample_weight: np.array optional
|
------
|
||||||
:raises ValueError: if parameters C or max_depth are out of bounds
|
ValueError
|
||||||
:return: itself to be able to chain actions: fit().predict() ...
|
if C < 0
|
||||||
:rtype: Stree
|
ValueError
|
||||||
|
if max_depth < 1
|
||||||
|
ValueError
|
||||||
|
if all samples have 0 or negative weights
|
||||||
"""
|
"""
|
||||||
# Check parameters are Ok.
|
# Check parameters are Ok.
|
||||||
if self.C < 0:
|
if self.C < 0:
|
||||||
@@ -379,7 +566,13 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
|
|
||||||
check_classification_targets(y)
|
check_classification_targets(y)
|
||||||
X, y = check_X_y(X, y)
|
X, y = check_X_y(X, y)
|
||||||
sample_weight = _check_sample_weight(sample_weight, X)
|
sample_weight = _check_sample_weight(
|
||||||
|
sample_weight, X, dtype=np.float64
|
||||||
|
)
|
||||||
|
if not any(sample_weight):
|
||||||
|
raise ValueError(
|
||||||
|
"Invalid input - all samples have zero or negative weights."
|
||||||
|
)
|
||||||
check_classification_targets(y)
|
check_classification_targets(y)
|
||||||
# Initialize computed parameters
|
# Initialize computed parameters
|
||||||
self.splitter_ = Splitter(
|
self.splitter_ = Splitter(
|
||||||
@@ -401,6 +594,8 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
self.max_features_ = self._initialize_max_features()
|
self.max_features_ = self._initialize_max_features()
|
||||||
self.tree_ = self.train(X, y, sample_weight, 1, "root")
|
self.tree_ = self.train(X, y, sample_weight, 1, "root")
|
||||||
self._build_predictor()
|
self._build_predictor()
|
||||||
|
self.X_ = X
|
||||||
|
self.y_ = y
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def train(
|
def train(
|
||||||
@@ -410,26 +605,36 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
sample_weight: np.ndarray,
|
sample_weight: np.ndarray,
|
||||||
depth: int,
|
depth: int,
|
||||||
title: str,
|
title: str,
|
||||||
) -> Snode:
|
) -> Optional[Snode]:
|
||||||
"""Recursive function to split the original dataset into predictor
|
"""Recursive function to split the original dataset into predictor
|
||||||
nodes (leaves)
|
nodes (leaves)
|
||||||
|
|
||||||
:param X: samples dataset
|
Parameters
|
||||||
:type X: np.ndarray
|
----------
|
||||||
:param y: samples labels
|
X : np.ndarray
|
||||||
:type y: np.ndarray
|
samples dataset
|
||||||
:param sample_weight: weight of samples. Rescale C per sample.
|
y : np.ndarray
|
||||||
Hi weights force the classifier to put more emphasis on these points.
|
samples labels
|
||||||
:type sample_weight: np.ndarray
|
sample_weight : np.ndarray
|
||||||
:param depth: actual depth in the tree
|
weight of samples. Rescale C per sample.
|
||||||
:type depth: int
|
depth : int
|
||||||
:param title: description of the node
|
actual depth in the tree
|
||||||
:type title: str
|
title : str
|
||||||
:return: binary tree
|
description of the node
|
||||||
:rtype: Snode
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
Optional[Snode]
|
||||||
|
binary tree
|
||||||
"""
|
"""
|
||||||
if depth > self.__max_depth:
|
if depth > self.__max_depth:
|
||||||
return None
|
return None
|
||||||
|
# Mask samples with 0 weight
|
||||||
|
if any(sample_weight == 0):
|
||||||
|
indices_zero = sample_weight == 0
|
||||||
|
X = X[~indices_zero, :]
|
||||||
|
y = y[~indices_zero]
|
||||||
|
sample_weight = sample_weight[~indices_zero]
|
||||||
if np.unique(y).shape[0] == 1:
|
if np.unique(y).shape[0] == 1:
|
||||||
# only 1 class => pure dataset
|
# only 1 class => pure dataset
|
||||||
return Snode(
|
return Snode(
|
||||||
@@ -439,15 +644,16 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
features=X.shape[1],
|
features=X.shape[1],
|
||||||
impurity=0.0,
|
impurity=0.0,
|
||||||
title=title + ", <pure>",
|
title=title + ", <pure>",
|
||||||
|
weight=sample_weight,
|
||||||
)
|
)
|
||||||
# Train the model
|
# Train the model
|
||||||
clf = self._build_clf()
|
clf = self._build_clf()
|
||||||
Xs, features = self.splitter_.get_subspace(X, y, self.max_features_)
|
Xs, features = self.splitter_.get_subspace(X, y, self.max_features_)
|
||||||
clf.fit(Xs, y, sample_weight=sample_weight)
|
clf.fit(Xs, y, sample_weight=sample_weight)
|
||||||
impurity = self.splitter_.impurity(y)
|
impurity = self.splitter_.partition_impurity(y)
|
||||||
node = Snode(clf, X, y, features, impurity, title)
|
node = Snode(clf, X, y, features, impurity, title, sample_weight)
|
||||||
self.depth_ = max(depth, self.depth_)
|
self.depth_ = max(depth, self.depth_)
|
||||||
self.splitter_.partition(X, node)
|
self.splitter_.partition(X, node, True)
|
||||||
X_U, X_D = self.splitter_.part(X)
|
X_U, X_D = self.splitter_.part(X)
|
||||||
y_u, y_d = self.splitter_.part(y)
|
y_u, y_d = self.splitter_.part(y)
|
||||||
sw_u, sw_d = self.splitter_.part(sample_weight)
|
sw_u, sw_d = self.splitter_.part(sample_weight)
|
||||||
@@ -460,14 +666,14 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
features=X.shape[1],
|
features=X.shape[1],
|
||||||
impurity=impurity,
|
impurity=impurity,
|
||||||
title=title + ", <cgaf>",
|
title=title + ", <cgaf>",
|
||||||
|
weight=sample_weight,
|
||||||
)
|
)
|
||||||
node.set_up(self.train(X_U, y_u, sw_u, depth + 1, title + " - Up"))
|
node.set_up(self.train(X_U, y_u, sw_u, depth + 1, title + " - Up"))
|
||||||
node.set_down(self.train(X_D, y_d, sw_d, depth + 1, title + " - Down"))
|
node.set_down(self.train(X_D, y_d, sw_d, depth + 1, title + " - Down"))
|
||||||
return node
|
return node
|
||||||
|
|
||||||
def _build_predictor(self):
|
def _build_predictor(self):
|
||||||
"""Process the leaves to make them predictors
|
"""Process the leaves to make them predictors"""
|
||||||
"""
|
|
||||||
|
|
||||||
def run_tree(node: Snode):
|
def run_tree(node: Snode):
|
||||||
if node.is_leaf():
|
if node.is_leaf():
|
||||||
@@ -479,8 +685,7 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
run_tree(self.tree_)
|
run_tree(self.tree_)
|
||||||
|
|
||||||
def _build_clf(self):
|
def _build_clf(self):
|
||||||
""" Build the correct classifier for the node
|
"""Build the correct classifier for the node"""
|
||||||
"""
|
|
||||||
return (
|
return (
|
||||||
LinearSVC(
|
LinearSVC(
|
||||||
max_iter=self.max_iter,
|
max_iter=self.max_iter,
|
||||||
@@ -503,12 +708,17 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
def _reorder_results(y: np.array, indices: np.array) -> np.array:
|
def _reorder_results(y: np.array, indices: np.array) -> np.array:
|
||||||
"""Reorder an array based on the array of indices passed
|
"""Reorder an array based on the array of indices passed
|
||||||
|
|
||||||
:param y: data untidy
|
Parameters
|
||||||
:type y: np.array
|
----------
|
||||||
:param indices: indices used to set order
|
y : np.array
|
||||||
:type indices: np.array
|
data untidy
|
||||||
:return: array y ordered
|
indices : np.array
|
||||||
:rtype: np.array
|
indices used to set order
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
np.array
|
||||||
|
array y ordered
|
||||||
"""
|
"""
|
||||||
# return array of same type given in y
|
# return array of same type given in y
|
||||||
y_ordered = y.copy()
|
y_ordered = y.copy()
|
||||||
@@ -520,10 +730,22 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
def predict(self, X: np.array) -> np.array:
|
def predict(self, X: np.array) -> np.array:
|
||||||
"""Predict labels for each sample in dataset passed
|
"""Predict labels for each sample in dataset passed
|
||||||
|
|
||||||
:param X: dataset of samples
|
Parameters
|
||||||
:type X: np.array
|
----------
|
||||||
:return: array of labels
|
X : np.array
|
||||||
:rtype: np.array
|
dataset of samples
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
np.array
|
||||||
|
array of labels
|
||||||
|
|
||||||
|
Raises
|
||||||
|
------
|
||||||
|
ValueError
|
||||||
|
if dataset with inconsistent number of features
|
||||||
|
NotFittedError
|
||||||
|
if model is not fitted
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def predict_class(
|
def predict_class(
|
||||||
@@ -535,7 +757,7 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
# set a class for every sample in dataset
|
# set a class for every sample in dataset
|
||||||
prediction = np.full((xp.shape[0], 1), node._class)
|
prediction = np.full((xp.shape[0], 1), node._class)
|
||||||
return prediction, indices
|
return prediction, indices
|
||||||
self.splitter_.partition(xp, node)
|
self.splitter_.partition(xp, node, train=False)
|
||||||
x_u, x_d = self.splitter_.part(xp)
|
x_u, x_d = self.splitter_.part(xp)
|
||||||
i_u, i_d = self.splitter_.part(indices)
|
i_u, i_d = self.splitter_.part(indices)
|
||||||
prx_u, prin_u = predict_class(x_u, i_u, node.get_up())
|
prx_u, prin_u = predict_class(x_u, i_u, node.get_up())
|
||||||
@@ -565,15 +787,19 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
) -> float:
|
) -> float:
|
||||||
"""Compute accuracy of the prediction
|
"""Compute accuracy of the prediction
|
||||||
|
|
||||||
:param X: dataset of samples to make predictions
|
Parameters
|
||||||
:type X: np.array
|
----------
|
||||||
:param y_true: samples labels
|
X : np.array
|
||||||
:type y_true: np.array
|
dataset of samples to make predictions
|
||||||
:param sample_weight: weights of the samples. Rescale C per sample.
|
y : np.array
|
||||||
Hi' weights force the classifier to put more emphasis on these points
|
samples labels
|
||||||
:type sample_weight: np.array optional
|
sample_weight : np.array, optional
|
||||||
:return: accuracy of the prediction
|
weights of the samples. Rescale C per sample, by default None
|
||||||
:rtype: float
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
float
|
||||||
|
accuracy of the prediction
|
||||||
"""
|
"""
|
||||||
# sklearn check
|
# sklearn check
|
||||||
check_is_fitted(self)
|
check_is_fitted(self)
|
||||||
@@ -590,8 +816,10 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
"""Create an iterator to be able to visit the nodes of the tree in
|
"""Create an iterator to be able to visit the nodes of the tree in
|
||||||
preorder, can make a list with all the nodes in preorder
|
preorder, can make a list with all the nodes in preorder
|
||||||
|
|
||||||
:return: an iterator, can for i in... and list(...)
|
Returns
|
||||||
:rtype: Siterator
|
-------
|
||||||
|
Siterator
|
||||||
|
an iterator, can for i in... and list(...)
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
tree = self.tree_
|
tree = self.tree_
|
||||||
@@ -602,8 +830,10 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
def __str__(self) -> str:
|
def __str__(self) -> str:
|
||||||
"""String representation of the tree
|
"""String representation of the tree
|
||||||
|
|
||||||
:return: description of nodes in the tree in preorder
|
Returns
|
||||||
:rtype: str
|
-------
|
||||||
|
str
|
||||||
|
description of nodes in the tree in preorder
|
||||||
"""
|
"""
|
||||||
output = ""
|
output = ""
|
||||||
for i in self:
|
for i in self:
|
||||||
|
@@ -33,22 +33,20 @@ class Snode_test(unittest.TestCase):
|
|||||||
max_card = max(card)
|
max_card = max(card)
|
||||||
min_card = min(card)
|
min_card = min(card)
|
||||||
if len(classes) > 1:
|
if len(classes) > 1:
|
||||||
try:
|
belief = max_card / (max_card + min_card)
|
||||||
belief = max_card / (max_card + min_card)
|
|
||||||
except ZeroDivisionError:
|
|
||||||
belief = 0.0
|
|
||||||
else:
|
else:
|
||||||
belief = 1
|
belief = 1
|
||||||
self.assertEqual(belief, node._belief)
|
self.assertEqual(belief, node._belief)
|
||||||
# Check Class
|
# Check Class
|
||||||
class_computed = classes[card == max_card]
|
class_computed = classes[card == max_card]
|
||||||
self.assertEqual(class_computed, node._class)
|
self.assertEqual(class_computed, node._class)
|
||||||
|
# Check Partition column
|
||||||
|
self.assertEqual(node._partition_column, -1)
|
||||||
|
|
||||||
check_leave(self._clf.tree_)
|
check_leave(self._clf.tree_)
|
||||||
|
|
||||||
def test_nodes_coefs(self):
|
def test_nodes_coefs(self):
|
||||||
"""Check if the nodes of the tree have the right attributes filled
|
"""Check if the nodes of the tree have the right attributes filled"""
|
||||||
"""
|
|
||||||
|
|
||||||
def run_tree(node: Snode):
|
def run_tree(node: Snode):
|
||||||
if node._belief < 1:
|
if node._belief < 1:
|
||||||
@@ -57,16 +55,19 @@ class Snode_test(unittest.TestCase):
|
|||||||
self.assertIsNotNone(node._clf.coef_)
|
self.assertIsNotNone(node._clf.coef_)
|
||||||
if node.is_leaf():
|
if node.is_leaf():
|
||||||
return
|
return
|
||||||
run_tree(node.get_down())
|
|
||||||
run_tree(node.get_up())
|
run_tree(node.get_up())
|
||||||
|
run_tree(node.get_down())
|
||||||
|
|
||||||
run_tree(self._clf.tree_)
|
model = Stree(self._random_state)
|
||||||
|
model.fit(*load_dataset(self._random_state, 3, 4))
|
||||||
|
run_tree(model.tree_)
|
||||||
|
|
||||||
def test_make_predictor_on_leaf(self):
|
def test_make_predictor_on_leaf(self):
|
||||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||||
test.make_predictor()
|
test.make_predictor()
|
||||||
self.assertEqual(1, test._class)
|
self.assertEqual(1, test._class)
|
||||||
self.assertEqual(0.75, test._belief)
|
self.assertEqual(0.75, test._belief)
|
||||||
|
self.assertEqual(-1, test._partition_column)
|
||||||
|
|
||||||
def test_make_predictor_on_not_leaf(self):
|
def test_make_predictor_on_not_leaf(self):
|
||||||
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test")
|
||||||
@@ -74,11 +75,14 @@ class Snode_test(unittest.TestCase):
|
|||||||
test.make_predictor()
|
test.make_predictor()
|
||||||
self.assertIsNone(test._class)
|
self.assertIsNone(test._class)
|
||||||
self.assertEqual(0, test._belief)
|
self.assertEqual(0, test._belief)
|
||||||
|
self.assertEqual(-1, test._partition_column)
|
||||||
|
self.assertEqual(-1, test.get_up()._partition_column)
|
||||||
|
|
||||||
def test_make_predictor_on_leaf_bogus_data(self):
|
def test_make_predictor_on_leaf_bogus_data(self):
|
||||||
test = Snode(None, [1, 2, 3, 4], [], [], 0.0, "test")
|
test = Snode(None, [1, 2, 3, 4], [], [], 0.0, "test")
|
||||||
test.make_predictor()
|
test.make_predictor()
|
||||||
self.assertIsNone(test._class)
|
self.assertIsNone(test._class)
|
||||||
|
self.assertEqual(-1, test._partition_column)
|
||||||
|
|
||||||
def test_copy_node(self):
|
def test_copy_node(self):
|
||||||
px = [1, 2, 3, 4]
|
px = [1, 2, 3, 4]
|
||||||
@@ -89,3 +93,4 @@ class Snode_test(unittest.TestCase):
|
|||||||
self.assertListEqual(computed._y, py)
|
self.assertListEqual(computed._y, py)
|
||||||
self.assertEqual("test", computed._title)
|
self.assertEqual("test", computed._title)
|
||||||
self.assertIsInstance(computed._clf, Stree)
|
self.assertIsInstance(computed._clf, Stree)
|
||||||
|
self.assertEqual(test._partition_column, computed._partition_column)
|
||||||
|
@@ -1,11 +1,11 @@
|
|||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
import random
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from sklearn.svm import LinearSVC
|
from sklearn.svm import SVC
|
||||||
|
from sklearn.datasets import load_wine, load_iris
|
||||||
from stree import Splitter
|
from stree import Splitter
|
||||||
from .utils import load_dataset
|
|
||||||
|
|
||||||
|
|
||||||
class Splitter_test(unittest.TestCase):
|
class Splitter_test(unittest.TestCase):
|
||||||
@@ -15,15 +15,15 @@ class Splitter_test(unittest.TestCase):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def build(
|
def build(
|
||||||
clf=LinearSVC(),
|
clf=SVC,
|
||||||
min_samples_split=0,
|
min_samples_split=0,
|
||||||
splitter_type="random",
|
splitter_type="random",
|
||||||
criterion="gini",
|
criterion="gini",
|
||||||
criteria="min_distance",
|
criteria="max_samples",
|
||||||
random_state=None,
|
random_state=None,
|
||||||
):
|
):
|
||||||
return Splitter(
|
return Splitter(
|
||||||
clf=clf,
|
clf=clf(random_state=random_state, kernel="rbf"),
|
||||||
min_samples_split=min_samples_split,
|
min_samples_split=min_samples_split,
|
||||||
splitter_type=splitter_type,
|
splitter_type=splitter_type,
|
||||||
criterion=criterion,
|
criterion=criterion,
|
||||||
@@ -43,10 +43,10 @@ class Splitter_test(unittest.TestCase):
|
|||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
self.build(criteria="duck")
|
self.build(criteria="duck")
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
self.build(clf=None)
|
_ = Splitter(clf=None)
|
||||||
for splitter_type in ["best", "random"]:
|
for splitter_type in ["best", "random"]:
|
||||||
for criterion in ["gini", "entropy"]:
|
for criterion in ["gini", "entropy"]:
|
||||||
for criteria in ["min_distance", "max_samples"]:
|
for criteria in ["max_samples", "impurity"]:
|
||||||
tcl = self.build(
|
tcl = self.build(
|
||||||
splitter_type=splitter_type,
|
splitter_type=splitter_type,
|
||||||
criterion=criterion,
|
criterion=criterion,
|
||||||
@@ -57,30 +57,74 @@ class Splitter_test(unittest.TestCase):
|
|||||||
self.assertEqual(criteria, tcl._criteria)
|
self.assertEqual(criteria, tcl._criteria)
|
||||||
|
|
||||||
def test_gini(self):
|
def test_gini(self):
|
||||||
y = [0, 1, 1, 1, 1, 1, 0, 0, 0, 1]
|
expected_values = [
|
||||||
expected = 0.48
|
([0, 1, 1, 1, 1, 1, 0, 0, 0, 1], 0.48),
|
||||||
self.assertEqual(expected, Splitter._gini(y))
|
([0, 1, 1, 2, 2, 3, 4, 5, 3, 2, 1, 1], 0.7777777777777778),
|
||||||
tcl = self.build(criterion="gini")
|
([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2], 0.520408163265306),
|
||||||
self.assertEqual(expected, tcl.criterion_function(y))
|
([0, 0, 1, 1, 1, 1, 0, 0], 0.5),
|
||||||
|
([0, 0, 1, 1, 2, 2, 3, 3], 0.75),
|
||||||
|
([0, 0, 1, 1, 1, 1, 1, 1], 0.375),
|
||||||
|
([0], 0),
|
||||||
|
([1, 1, 1, 1], 0),
|
||||||
|
]
|
||||||
|
for labels, expected in expected_values:
|
||||||
|
self.assertAlmostEqual(expected, Splitter._gini(labels))
|
||||||
|
tcl = self.build(criterion="gini")
|
||||||
|
self.assertAlmostEqual(expected, tcl.criterion_function(labels))
|
||||||
|
|
||||||
def test_entropy(self):
|
def test_entropy(self):
|
||||||
y = [0, 1, 1, 1, 1, 1, 0, 0, 0, 1]
|
expected_values = [
|
||||||
expected = 0.9709505944546686
|
([0, 1, 1, 1, 1, 1, 0, 0, 0, 1], 0.9709505944546686),
|
||||||
self.assertAlmostEqual(expected, Splitter._entropy(y))
|
([0, 1, 1, 2, 2, 3, 4, 5, 3, 2, 1, 1], 0.9111886696810589),
|
||||||
tcl = self.build(criterion="entropy")
|
([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2], 0.8120406807940999),
|
||||||
self.assertEqual(expected, tcl.criterion_function(y))
|
([0, 0, 1, 1, 1, 1, 0, 0], 1),
|
||||||
|
([0, 0, 1, 1, 2, 2, 3, 3], 1),
|
||||||
|
([0, 0, 1, 1, 1, 1, 1, 1], 0.8112781244591328),
|
||||||
|
([1], 0),
|
||||||
|
([0, 0, 0, 0], 0),
|
||||||
|
]
|
||||||
|
for labels, expected in expected_values:
|
||||||
|
self.assertAlmostEqual(expected, Splitter._entropy(labels))
|
||||||
|
tcl = self.build(criterion="entropy")
|
||||||
|
self.assertAlmostEqual(expected, tcl.criterion_function(labels))
|
||||||
|
|
||||||
def test_information_gain(self):
|
def test_information_gain(self):
|
||||||
yu = np.array([0, 1, 1, 1, 1, 1])
|
expected_values = [
|
||||||
yd = np.array([0, 0, 0, 1])
|
(
|
||||||
values_expected = [
|
[0, 1, 1, 1, 1, 1],
|
||||||
("gini", 0.31666666666666665),
|
[0, 0, 0, 1],
|
||||||
("entropy", 0.7145247027726656),
|
0.16333333333333333,
|
||||||
|
0.25642589168200297,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
[0, 1, 1, 2, 2, 3, 4, 5, 3, 2, 1, 1],
|
||||||
|
[5, 3, 2, 1, 1],
|
||||||
|
0.007381776239907684,
|
||||||
|
-0.03328610916207225,
|
||||||
|
),
|
||||||
|
([], [], 0.0, 0.0),
|
||||||
|
([1], [], 0.0, 0.0),
|
||||||
|
([], [1], 0.0, 0.0),
|
||||||
|
([0, 0, 0, 0], [0, 0], 0.0, 0.0),
|
||||||
|
([], [1, 1, 1, 2], 0.0, 0.0),
|
||||||
|
(None, [1, 2, 3], 0.0, 0.0),
|
||||||
|
([1, 2, 3], None, 0.0, 0.0),
|
||||||
]
|
]
|
||||||
for criterion, expected in values_expected:
|
for yu, yd, expected_gini, expected_entropy in expected_values:
|
||||||
tcl = self.build(criterion=criterion)
|
yu = np.array(yu, dtype=np.int32) if yu is not None else None
|
||||||
computed = tcl.information_gain(yu, yd)
|
yd = np.array(yd, dtype=np.int32) if yd is not None else None
|
||||||
self.assertAlmostEqual(expected, computed)
|
if yu is not None and yd is not None:
|
||||||
|
complete = np.append(yu, yd)
|
||||||
|
elif yd is not None:
|
||||||
|
complete = yd
|
||||||
|
else:
|
||||||
|
complete = yu
|
||||||
|
tcl = self.build(criterion="gini")
|
||||||
|
computed = tcl.information_gain(complete, yu, yd)
|
||||||
|
self.assertAlmostEqual(expected_gini, computed)
|
||||||
|
tcl = self.build(criterion="entropy")
|
||||||
|
computed = tcl.information_gain(complete, yu, yd)
|
||||||
|
self.assertAlmostEqual(expected_entropy, computed)
|
||||||
|
|
||||||
def test_max_samples(self):
|
def test_max_samples(self):
|
||||||
tcl = self.build(criteria="max_samples")
|
tcl = self.build(criteria="max_samples")
|
||||||
@@ -90,52 +134,90 @@ class Splitter_test(unittest.TestCase):
|
|||||||
[0.7, 0.01, -0.1],
|
[0.7, 0.01, -0.1],
|
||||||
[0.7, -0.9, 0.5],
|
[0.7, -0.9, 0.5],
|
||||||
[0.1, 0.2, 0.3],
|
[0.1, 0.2, 0.3],
|
||||||
|
[-0.1, 0.2, 0.3],
|
||||||
|
[-0.1, 0.2, 0.3],
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
expected = np.array([0.2, 0.01, -0.9, 0.2])
|
expected = data[:, 0]
|
||||||
y = [1, 2, 1, 0]
|
y = [1, 2, 1, 0, 0, 0]
|
||||||
computed = tcl._max_samples(data, y)
|
computed = tcl._max_samples(data, y)
|
||||||
self.assertEqual((4,), computed.shape)
|
self.assertEqual(0, computed)
|
||||||
self.assertListEqual(expected.tolist(), computed.tolist())
|
computed_data = data[:, computed]
|
||||||
|
self.assertEqual((6,), computed_data.shape)
|
||||||
|
self.assertListEqual(expected.tolist(), computed_data.tolist())
|
||||||
|
|
||||||
def test_min_distance(self):
|
def test_impurity(self):
|
||||||
tcl = self.build()
|
tcl = self.build(criteria="impurity")
|
||||||
data = np.array(
|
data = np.array(
|
||||||
[
|
[
|
||||||
[-0.1, 0.2, -0.3],
|
[-0.1, 0.2, -0.3],
|
||||||
[0.7, 0.01, -0.1],
|
[0.7, 0.01, -0.1],
|
||||||
[0.7, -0.9, 0.5],
|
[0.7, -0.9, 0.5],
|
||||||
[0.1, 0.2, 0.3],
|
[0.1, 0.2, 0.3],
|
||||||
|
[-0.1, 0.2, 0.3],
|
||||||
|
[-0.1, 0.2, 0.3],
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
expected = np.array([-0.1, 0.01, 0.5, 0.1])
|
expected = data[:, 2]
|
||||||
computed = tcl._min_distance(data, None)
|
y = np.array([1, 2, 1, 0, 0, 0])
|
||||||
self.assertEqual((4,), computed.shape)
|
computed = tcl._impurity(data, y)
|
||||||
self.assertListEqual(expected.tolist(), computed.tolist())
|
self.assertEqual(2, computed)
|
||||||
|
computed_data = data[:, computed]
|
||||||
|
self.assertEqual((6,), computed_data.shape)
|
||||||
|
self.assertListEqual(expected.tolist(), computed_data.tolist())
|
||||||
|
|
||||||
|
def test_generate_subspaces(self):
|
||||||
|
features = 250
|
||||||
|
for max_features in range(2, features):
|
||||||
|
num = len(Splitter._generate_spaces(features, max_features))
|
||||||
|
self.assertEqual(5, num)
|
||||||
|
self.assertEqual(3, len(Splitter._generate_spaces(3, 2)))
|
||||||
|
self.assertEqual(4, len(Splitter._generate_spaces(4, 3)))
|
||||||
|
|
||||||
|
def test_best_splitter_few_sets(self):
|
||||||
|
X, y = load_iris(return_X_y=True)
|
||||||
|
X = np.delete(X, 3, 1)
|
||||||
|
tcl = self.build(splitter_type="best", random_state=self._random_state)
|
||||||
|
dataset, computed = tcl.get_subspace(X, y, max_features=2)
|
||||||
|
self.assertListEqual([0, 2], list(computed))
|
||||||
|
self.assertListEqual(X[:, computed].tolist(), dataset.tolist())
|
||||||
|
|
||||||
def test_splitter_parameter(self):
|
def test_splitter_parameter(self):
|
||||||
expected_values = [
|
expected_values = [
|
||||||
[1, 7, 9],
|
[1, 4, 9, 12], # best entropy max_samples
|
||||||
[1, 7, 9],
|
[1, 3, 6, 10], # best entropy impurity
|
||||||
[1, 7, 9],
|
[6, 8, 10, 12], # best gini max_samples
|
||||||
[1, 7, 9],
|
[7, 8, 10, 11], # best gini impurity
|
||||||
[0, 5, 6],
|
[0, 3, 8, 12], # random entropy max_samples
|
||||||
[0, 5, 6],
|
[0, 3, 9, 11], # random entropy impurity
|
||||||
[0, 5, 6],
|
[0, 4, 7, 12], # random gini max_samples
|
||||||
[0, 5, 6],
|
[0, 2, 5, 6], # random gini impurity
|
||||||
]
|
]
|
||||||
X, y = load_dataset(self._random_state, n_features=12)
|
X, y = load_wine(return_X_y=True)
|
||||||
|
rn = 0
|
||||||
for splitter_type in ["best", "random"]:
|
for splitter_type in ["best", "random"]:
|
||||||
for criterion in ["gini", "entropy"]:
|
for criterion in ["entropy", "gini"]:
|
||||||
for criteria in ["min_distance", "max_samples"]:
|
for criteria in [
|
||||||
|
"max_samples",
|
||||||
|
"impurity",
|
||||||
|
]:
|
||||||
tcl = self.build(
|
tcl = self.build(
|
||||||
splitter_type=splitter_type,
|
splitter_type=splitter_type,
|
||||||
criterion=criterion,
|
criterion=criterion,
|
||||||
criteria=criteria,
|
criteria=criteria,
|
||||||
random_state=self._random_state,
|
|
||||||
)
|
)
|
||||||
expected = expected_values.pop(0)
|
expected = expected_values.pop(0)
|
||||||
dataset, computed = tcl.get_subspace(X, y, max_features=3)
|
random.seed(rn)
|
||||||
|
rn += 1
|
||||||
|
dataset, computed = tcl.get_subspace(X, y, max_features=4)
|
||||||
|
# print(
|
||||||
|
# "{}, # {:7s}{:8s}{:15s}".format(
|
||||||
|
# list(computed),
|
||||||
|
# splitter_type,
|
||||||
|
# criterion,
|
||||||
|
# criteria,
|
||||||
|
# )
|
||||||
|
# )
|
||||||
self.assertListEqual(expected, list(computed))
|
self.assertListEqual(expected, list(computed))
|
||||||
self.assertListEqual(
|
self.assertListEqual(
|
||||||
X[:, computed].tolist(), dataset.tolist()
|
X[:, computed].tolist(), dataset.tolist()
|
||||||
|
@@ -1,8 +1,11 @@
|
|||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
import warnings
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from sklearn.datasets import load_iris
|
from sklearn.datasets import load_iris, load_wine
|
||||||
|
from sklearn.exceptions import ConvergenceWarning
|
||||||
|
from sklearn.svm import LinearSVC
|
||||||
|
|
||||||
from stree import Stree, Snode
|
from stree import Stree, Snode
|
||||||
from .utils import load_dataset
|
from .utils import load_dataset
|
||||||
@@ -23,8 +26,10 @@ class Stree_test(unittest.TestCase):
|
|||||||
correct number of labels and its sons have the right number of elements
|
correct number of labels and its sons have the right number of elements
|
||||||
in their dataset
|
in their dataset
|
||||||
|
|
||||||
Arguments:
|
Parameters
|
||||||
node {Snode} -- node to check
|
----------
|
||||||
|
node : Snode
|
||||||
|
node to check
|
||||||
"""
|
"""
|
||||||
if node.is_leaf():
|
if node.is_leaf():
|
||||||
return
|
return
|
||||||
@@ -39,53 +44,28 @@ class Stree_test(unittest.TestCase):
|
|||||||
_, count_u = np.unique(y_up, return_counts=True)
|
_, count_u = np.unique(y_up, return_counts=True)
|
||||||
#
|
#
|
||||||
for i in unique_y:
|
for i in unique_y:
|
||||||
|
number_up = count_u[i]
|
||||||
try:
|
try:
|
||||||
number_down = count_d[i]
|
number_down = count_d[i]
|
||||||
except IndexError:
|
except IndexError:
|
||||||
number_down = 0
|
number_down = 0
|
||||||
try:
|
|
||||||
number_up = count_u[i]
|
|
||||||
except IndexError:
|
|
||||||
number_up = 0
|
|
||||||
self.assertEqual(count_y[i], number_down + number_up)
|
self.assertEqual(count_y[i], number_down + number_up)
|
||||||
# Is the partition made the same as the prediction?
|
# Is the partition made the same as the prediction?
|
||||||
# as the node is not a leaf...
|
# as the node is not a leaf...
|
||||||
_, count_yp = np.unique(y_prediction, return_counts=True)
|
_, count_yp = np.unique(y_prediction, return_counts=True)
|
||||||
self.assertEqual(count_yp[0], y_up.shape[0])
|
self.assertEqual(count_yp[1], y_up.shape[0])
|
||||||
self.assertEqual(count_yp[1], y_down.shape[0])
|
self.assertEqual(count_yp[0], y_down.shape[0])
|
||||||
self._check_tree(node.get_down())
|
self._check_tree(node.get_down())
|
||||||
self._check_tree(node.get_up())
|
self._check_tree(node.get_up())
|
||||||
|
|
||||||
def test_build_tree(self):
|
def test_build_tree(self):
|
||||||
"""Check if the tree is built the same way as predictions of models
|
"""Check if the tree is built the same way as predictions of models"""
|
||||||
"""
|
|
||||||
import warnings
|
|
||||||
|
|
||||||
warnings.filterwarnings("ignore")
|
warnings.filterwarnings("ignore")
|
||||||
for kernel in self._kernels:
|
for kernel in self._kernels:
|
||||||
clf = Stree(kernel=kernel, random_state=self._random_state)
|
clf = Stree(kernel=kernel, random_state=self._random_state)
|
||||||
clf.fit(*load_dataset(self._random_state))
|
clf.fit(*load_dataset(self._random_state))
|
||||||
self._check_tree(clf.tree_)
|
self._check_tree(clf.tree_)
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _find_out(px: np.array, x_original: np.array, y_original) -> list:
|
|
||||||
"""Find the original values of y for a given array of samples
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
px {np.array} -- array of samples to search for
|
|
||||||
x_original {np.array} -- original dataset
|
|
||||||
y_original {[type]} -- original classes
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
np.array -- classes of the given samples
|
|
||||||
"""
|
|
||||||
res = []
|
|
||||||
for needle in px:
|
|
||||||
for row in range(x_original.shape[0]):
|
|
||||||
if all(x_original[row, :] == needle):
|
|
||||||
res.append(y_original[row])
|
|
||||||
return res
|
|
||||||
|
|
||||||
def test_single_prediction(self):
|
def test_single_prediction(self):
|
||||||
X, y = load_dataset(self._random_state)
|
X, y = load_dataset(self._random_state)
|
||||||
for kernel in self._kernels:
|
for kernel in self._kernels:
|
||||||
@@ -102,22 +82,6 @@ class Stree_test(unittest.TestCase):
|
|||||||
yp = clf.fit(X, y).predict(X[:num, :])
|
yp = clf.fit(X, y).predict(X[:num, :])
|
||||||
self.assertListEqual(y[:num].tolist(), yp.tolist())
|
self.assertListEqual(y[:num].tolist(), yp.tolist())
|
||||||
|
|
||||||
def test_score(self):
|
|
||||||
X, y = load_dataset(self._random_state)
|
|
||||||
accuracies = [
|
|
||||||
0.9506666666666667,
|
|
||||||
0.9606666666666667,
|
|
||||||
0.9433333333333334,
|
|
||||||
]
|
|
||||||
for kernel, accuracy_expected in zip(self._kernels, accuracies):
|
|
||||||
clf = Stree(random_state=self._random_state, kernel=kernel,)
|
|
||||||
clf.fit(X, y)
|
|
||||||
accuracy_score = clf.score(X, y)
|
|
||||||
yp = clf.predict(X)
|
|
||||||
accuracy_computed = np.mean(yp == y)
|
|
||||||
self.assertEqual(accuracy_score, accuracy_computed)
|
|
||||||
self.assertAlmostEqual(accuracy_expected, accuracy_score)
|
|
||||||
|
|
||||||
def test_single_vs_multiple_prediction(self):
|
def test_single_vs_multiple_prediction(self):
|
||||||
"""Check if predicting sample by sample gives the same result as
|
"""Check if predicting sample by sample gives the same result as
|
||||||
predicting all samples at once
|
predicting all samples at once
|
||||||
@@ -137,20 +101,22 @@ class Stree_test(unittest.TestCase):
|
|||||||
self.assertListEqual(yp_line.tolist(), yp_once.tolist())
|
self.assertListEqual(yp_line.tolist(), yp_once.tolist())
|
||||||
|
|
||||||
def test_iterator_and_str(self):
|
def test_iterator_and_str(self):
|
||||||
"""Check preorder iterator
|
"""Check preorder iterator"""
|
||||||
"""
|
|
||||||
expected = [
|
expected = [
|
||||||
"root feaures=(0, 1, 2) impurity=0.5000",
|
"root feaures=(0, 1, 2) impurity=1.0000 counts=(array([0, 1]), arr"
|
||||||
"root - Down feaures=(0, 1, 2) impurity=0.0671",
|
"ay([750, 750]))",
|
||||||
"root - Down - Down, <cgaf> - Leaf class=1 belief= 0.975989 "
|
"root - Down, <cgaf> - Leaf class=0 belief= 0.928297 impurity=0.37"
|
||||||
"impurity=0.0469 counts=(array([0, 1]), array([ 17, 691]))",
|
"22 counts=(array([0, 1]), array([725, 56]))",
|
||||||
"root - Down - Up feaures=(0, 1, 2) impurity=0.3967",
|
"root - Up feaures=(0, 1, 2) impurity=0.2178 counts=(array([0, 1])"
|
||||||
"root - Down - Up - Down, <cgaf> - Leaf class=1 belief= 0.750000 "
|
", array([ 25, 694]))",
|
||||||
"impurity=0.3750 counts=(array([0, 1]), array([1, 3]))",
|
"root - Up - Down feaures=(0, 1, 2) impurity=0.8454 counts=(array("
|
||||||
"root - Down - Up - Up, <pure> - Leaf class=0 belief= 1.000000 "
|
"[0, 1]), array([8, 3]))",
|
||||||
"impurity=0.0000 counts=(array([0]), array([7]))",
|
"root - Up - Down - Down, <pure> - Leaf class=0 belief= 1.000000 i"
|
||||||
"root - Up, <cgaf> - Leaf class=0 belief= 0.928297 impurity=0.1331"
|
"mpurity=0.0000 counts=(array([0]), array([7]))",
|
||||||
" counts=(array([0, 1]), array([725, 56]))",
|
"root - Up - Down - Up, <cgaf> - Leaf class=1 belief= 0.750000 imp"
|
||||||
|
"urity=0.8113 counts=(array([0, 1]), array([1, 3]))",
|
||||||
|
"root - Up - Up, <cgaf> - Leaf class=1 belief= 0.975989 impurity=0"
|
||||||
|
".1634 counts=(array([0, 1]), array([ 17, 691]))",
|
||||||
]
|
]
|
||||||
computed = []
|
computed = []
|
||||||
expected_string = ""
|
expected_string = ""
|
||||||
@@ -164,9 +130,6 @@ class Stree_test(unittest.TestCase):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def test_is_a_sklearn_classifier():
|
def test_is_a_sklearn_classifier():
|
||||||
import warnings
|
|
||||||
from sklearn.exceptions import ConvergenceWarning
|
|
||||||
|
|
||||||
warnings.filterwarnings("ignore", category=ConvergenceWarning)
|
warnings.filterwarnings("ignore", category=ConvergenceWarning)
|
||||||
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
||||||
from sklearn.utils.estimator_checks import check_estimator
|
from sklearn.utils.estimator_checks import check_estimator
|
||||||
@@ -229,38 +192,43 @@ class Stree_test(unittest.TestCase):
|
|||||||
def test_muticlass_dataset(self):
|
def test_muticlass_dataset(self):
|
||||||
datasets = {
|
datasets = {
|
||||||
"Synt": load_dataset(random_state=self._random_state, n_classes=3),
|
"Synt": load_dataset(random_state=self._random_state, n_classes=3),
|
||||||
"Iris": load_iris(return_X_y=True),
|
"Iris": load_wine(return_X_y=True),
|
||||||
}
|
}
|
||||||
outcomes = {
|
outcomes = {
|
||||||
"Synt": {
|
"Synt": {
|
||||||
"max_samples linear": 0.9533333333333334,
|
"max_samples linear": 0.9606666666666667,
|
||||||
"max_samples rbf": 0.836,
|
"max_samples rbf": 0.7133333333333334,
|
||||||
"max_samples poly": 0.9473333333333334,
|
"max_samples poly": 0.49066666666666664,
|
||||||
"min_distance linear": 0.9533333333333334,
|
"impurity linear": 0.9606666666666667,
|
||||||
"min_distance rbf": 0.836,
|
"impurity rbf": 0.7133333333333334,
|
||||||
"min_distance poly": 0.9473333333333334,
|
"impurity poly": 0.49066666666666664,
|
||||||
},
|
},
|
||||||
"Iris": {
|
"Iris": {
|
||||||
"max_samples linear": 0.98,
|
"max_samples linear": 1.0,
|
||||||
"max_samples rbf": 1.0,
|
"max_samples rbf": 0.6910112359550562,
|
||||||
"max_samples poly": 1.0,
|
"max_samples poly": 0.6966292134831461,
|
||||||
"min_distance linear": 0.98,
|
"impurity linear": 1,
|
||||||
"min_distance rbf": 1.0,
|
"impurity rbf": 0.6910112359550562,
|
||||||
"min_distance poly": 1.0,
|
"impurity poly": 0.6966292134831461,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, dataset in datasets.items():
|
for name, dataset in datasets.items():
|
||||||
px, py = dataset
|
px, py = dataset
|
||||||
for criteria in ["max_samples", "min_distance"]:
|
for criteria in ["max_samples", "impurity"]:
|
||||||
for kernel in self._kernels:
|
for kernel in self._kernels:
|
||||||
clf = Stree(
|
clf = Stree(
|
||||||
C=1e4,
|
C=55,
|
||||||
max_iter=1e4,
|
max_iter=1e5,
|
||||||
kernel=kernel,
|
kernel=kernel,
|
||||||
random_state=self._random_state,
|
random_state=self._random_state,
|
||||||
)
|
)
|
||||||
clf.fit(px, py)
|
clf.fit(px, py)
|
||||||
outcome = outcomes[name][f"{criteria} {kernel}"]
|
outcome = outcomes[name][f"{criteria} {kernel}"]
|
||||||
|
# print(
|
||||||
|
# f"{name} {criteria} {kernel} {outcome} {clf.score(px"
|
||||||
|
# ", py)}"
|
||||||
|
# )
|
||||||
self.assertAlmostEqual(outcome, clf.score(px, py))
|
self.assertAlmostEqual(outcome, clf.score(px, py))
|
||||||
|
|
||||||
def test_max_features(self):
|
def test_max_features(self):
|
||||||
@@ -322,13 +290,152 @@ class Stree_test(unittest.TestCase):
|
|||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
clf.predict(X[:, :3])
|
clf.predict(X[:, :3])
|
||||||
|
|
||||||
|
# Tests of score
|
||||||
|
|
||||||
|
def test_score_binary(self):
|
||||||
|
X, y = load_dataset(self._random_state)
|
||||||
|
accuracies = [
|
||||||
|
0.9506666666666667,
|
||||||
|
0.9606666666666667,
|
||||||
|
0.9433333333333334,
|
||||||
|
]
|
||||||
|
for kernel, accuracy_expected in zip(self._kernels, accuracies):
|
||||||
|
clf = Stree(
|
||||||
|
random_state=self._random_state,
|
||||||
|
kernel=kernel,
|
||||||
|
)
|
||||||
|
clf.fit(X, y)
|
||||||
|
accuracy_score = clf.score(X, y)
|
||||||
|
yp = clf.predict(X)
|
||||||
|
accuracy_computed = np.mean(yp == y)
|
||||||
|
self.assertEqual(accuracy_score, accuracy_computed)
|
||||||
|
self.assertAlmostEqual(accuracy_expected, accuracy_score)
|
||||||
|
|
||||||
def test_score_max_features(self):
|
def test_score_max_features(self):
|
||||||
X, y = load_dataset(self._random_state)
|
X, y = load_dataset(self._random_state)
|
||||||
clf = Stree(random_state=self._random_state, max_features=2)
|
clf = Stree(random_state=self._random_state, max_features=2)
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
self.assertAlmostEqual(0.9426666666666667, clf.score(X, y))
|
self.assertAlmostEqual(0.9246666666666666, clf.score(X, y))
|
||||||
|
|
||||||
def test_bogus_splitter_parameter(self):
|
def test_bogus_splitter_parameter(self):
|
||||||
clf = Stree(splitter="duck")
|
clf = Stree(splitter="duck")
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
clf.fit(*load_dataset())
|
clf.fit(*load_dataset())
|
||||||
|
|
||||||
|
def test_multiclass_classifier_integrity(self):
|
||||||
|
"""Checks if the multiclass operation is done right"""
|
||||||
|
X, y = load_iris(return_X_y=True)
|
||||||
|
clf = Stree(random_state=0)
|
||||||
|
clf.fit(X, y)
|
||||||
|
score = clf.score(X, y)
|
||||||
|
# Check accuracy of the whole model
|
||||||
|
self.assertAlmostEquals(0.98, score, 5)
|
||||||
|
svm = LinearSVC(random_state=0)
|
||||||
|
svm.fit(X, y)
|
||||||
|
self.assertAlmostEquals(0.9666666666666667, svm.score(X, y), 5)
|
||||||
|
data = svm.decision_function(X)
|
||||||
|
expected = [
|
||||||
|
0.4444444444444444,
|
||||||
|
0.35777777777777775,
|
||||||
|
0.4569777777777778,
|
||||||
|
]
|
||||||
|
ty = data.copy()
|
||||||
|
ty[data <= 0] = 0
|
||||||
|
ty[data > 0] = 1
|
||||||
|
ty = ty.astype(int)
|
||||||
|
for i in range(3):
|
||||||
|
self.assertAlmostEquals(
|
||||||
|
expected[i],
|
||||||
|
clf.splitter_._gini(ty[:, i]),
|
||||||
|
)
|
||||||
|
# 1st Branch
|
||||||
|
# up has to have 50 samples of class 0
|
||||||
|
# down should have 100 [50, 50]
|
||||||
|
up = data[:, 2] > 0
|
||||||
|
resup = np.unique(y[up], return_counts=True)
|
||||||
|
resdn = np.unique(y[~up], return_counts=True)
|
||||||
|
self.assertListEqual([1, 2], resup[0].tolist())
|
||||||
|
self.assertListEqual([3, 50], resup[1].tolist())
|
||||||
|
self.assertListEqual([0, 1], resdn[0].tolist())
|
||||||
|
self.assertListEqual([50, 47], resdn[1].tolist())
|
||||||
|
# 2nd Branch
|
||||||
|
# up should have 53 samples of classes [1, 2] [3, 50]
|
||||||
|
# down shoud have 47 samples of class 1
|
||||||
|
node_up = clf.tree_.get_down().get_up()
|
||||||
|
node_dn = clf.tree_.get_down().get_down()
|
||||||
|
resup = np.unique(node_up._y, return_counts=True)
|
||||||
|
resdn = np.unique(node_dn._y, return_counts=True)
|
||||||
|
self.assertListEqual([1, 2], resup[0].tolist())
|
||||||
|
self.assertListEqual([3, 50], resup[1].tolist())
|
||||||
|
self.assertListEqual([1], resdn[0].tolist())
|
||||||
|
self.assertListEqual([47], resdn[1].tolist())
|
||||||
|
|
||||||
|
def test_score_multiclass_rbf(self):
|
||||||
|
X, y = load_dataset(
|
||||||
|
random_state=self._random_state,
|
||||||
|
n_classes=3,
|
||||||
|
n_features=5,
|
||||||
|
n_samples=500,
|
||||||
|
)
|
||||||
|
clf = Stree(kernel="rbf", random_state=self._random_state)
|
||||||
|
self.assertEqual(0.824, clf.fit(X, y).score(X, y))
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
self.assertEqual(0.6741573033707865, clf.fit(X, y).score(X, y))
|
||||||
|
|
||||||
|
def test_score_multiclass_poly(self):
|
||||||
|
X, y = load_dataset(
|
||||||
|
random_state=self._random_state,
|
||||||
|
n_classes=3,
|
||||||
|
n_features=5,
|
||||||
|
n_samples=500,
|
||||||
|
)
|
||||||
|
clf = Stree(
|
||||||
|
kernel="poly", random_state=self._random_state, C=10, degree=5
|
||||||
|
)
|
||||||
|
self.assertEqual(0.786, clf.fit(X, y).score(X, y))
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
self.assertEqual(0.702247191011236, clf.fit(X, y).score(X, y))
|
||||||
|
|
||||||
|
def test_score_multiclass_linear(self):
|
||||||
|
X, y = load_dataset(
|
||||||
|
random_state=self._random_state,
|
||||||
|
n_classes=3,
|
||||||
|
n_features=5,
|
||||||
|
n_samples=1500,
|
||||||
|
)
|
||||||
|
clf = Stree(kernel="linear", random_state=self._random_state)
|
||||||
|
self.assertEqual(0.9533333333333334, clf.fit(X, y).score(X, y))
|
||||||
|
X, y = load_wine(return_X_y=True)
|
||||||
|
self.assertEqual(0.9550561797752809, clf.fit(X, y).score(X, y))
|
||||||
|
|
||||||
|
def test_zero_all_sample_weights(self):
|
||||||
|
X, y = load_dataset(self._random_state)
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
Stree().fit(X, y, np.zeros(len(y)))
|
||||||
|
|
||||||
|
def test_mask_samples_weighted_zero(self):
|
||||||
|
X = np.array(
|
||||||
|
[
|
||||||
|
[1, 1],
|
||||||
|
[1, 1],
|
||||||
|
[1, 1],
|
||||||
|
[2, 2],
|
||||||
|
[2, 2],
|
||||||
|
[2, 2],
|
||||||
|
[3, 3],
|
||||||
|
[3, 3],
|
||||||
|
[3, 3],
|
||||||
|
]
|
||||||
|
)
|
||||||
|
y = np.array([1, 1, 1, 2, 2, 2, 5, 5, 5])
|
||||||
|
yw = np.array([1, 1, 1, 5, 5, 5, 5, 5, 5])
|
||||||
|
w = [1, 1, 1, 0, 0, 0, 1, 1, 1]
|
||||||
|
model1 = Stree().fit(X, y)
|
||||||
|
model2 = Stree().fit(X, y, w)
|
||||||
|
predict1 = model1.predict(X)
|
||||||
|
predict2 = model2.predict(X)
|
||||||
|
self.assertListEqual(y.tolist(), predict1.tolist())
|
||||||
|
self.assertListEqual(yw.tolist(), predict2.tolist())
|
||||||
|
self.assertEqual(model1.score(X, y), 1)
|
||||||
|
self.assertAlmostEqual(model2.score(X, y), 0.66666667)
|
||||||
|
self.assertEqual(model2.score(X, y, w), 1)
|
||||||
|
@@ -1,9 +1,9 @@
|
|||||||
from sklearn.datasets import make_classification
|
from sklearn.datasets import make_classification
|
||||||
|
|
||||||
|
|
||||||
def load_dataset(random_state=0, n_classes=2, n_features=3):
|
def load_dataset(random_state=0, n_classes=2, n_features=3, n_samples=1500):
|
||||||
X, y = make_classification(
|
X, y = make_classification(
|
||||||
n_samples=1500,
|
n_samples=n_samples,
|
||||||
n_features=n_features,
|
n_features=n_features,
|
||||||
n_informative=3,
|
n_informative=3,
|
||||||
n_redundant=0,
|
n_redundant=0,
|
||||||
|
Reference in New Issue
Block a user