Compare commits

...

8 Commits

Author SHA1 Message Date
5f8ca8f3bb Reformat test with new black version 2024-03-05 18:46:19 +01:00
Ricardo Montañana Gómez
fb8b9b344f Update README.md
update installation instructions
2024-03-05 18:18:55 +01:00
036d1ba2a7 Add separate methods to return nodes/leaves/depth 2023-11-27 10:02:14 +01:00
4de74973b8 Black format issue 2023-07-12 14:16:08 +02:00
Ricardo Montañana Gómez
28dd04b95a Update benchmark.ipynb 2023-05-13 14:44:49 +02:00
Ricardo Montañana Gómez
542bbce7db ci: ⬆️ Update ci files and badges 2023-01-15 02:18:41 +01:00
Ricardo Montañana Gómez
5b791bc5bf New_version_sklearn (#56)
* test: 🧪 Update max_iter as int in test_multiclass_dataset

* refactor: 📝 Rename base_estimator to estimator as the former is deprectated in notebook

* refactor: 📌 Convert max_iter to int as needed in sklearn 1.2

* chore: 🔖 Update version info to 1.3.1
2023-01-15 01:21:32 +01:00
Ricardo Montañana Gómez
c37f044e3a Update doc and version 1.30 (#55)
* Add complete classes counts to node and tests

* Implement optimized predict and new predict_proba

* Add predict_proba test

* Add python 3.10 to CI

* Update version number and documentation
2022-10-21 13:31:59 +02:00
12 changed files with 391 additions and 309 deletions

View File

@@ -2,12 +2,12 @@ name: "CodeQL"
on:
push:
branches: [ master ]
branches: [master]
pull_request:
# The branches below must be a subset of the branches above
branches: [ master ]
branches: [master]
schedule:
- cron: '16 17 * * 3'
- cron: "16 17 * * 3"
jobs:
analyze:
@@ -17,40 +17,40 @@ jobs:
strategy:
fail-fast: false
matrix:
language: [ 'python' ]
language: ["python"]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Checkout repository
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2

View File

@@ -16,9 +16,9 @@ jobs:
python: [3.8, "3.10"]
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python }}
uses: actions/setup-python@v2
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python }}
- name: Install dependencies
@@ -35,7 +35,7 @@ jobs:
coverage run -m unittest -v stree.tests
coverage xml
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v1
uses: codecov/codecov-action@v3
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./coverage.xml

View File

@@ -1,7 +1,7 @@
![CI](https://github.com/Doctorado-ML/STree/workflows/CI/badge.svg)
[![CodeQL](https://github.com/Doctorado-ML/STree/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/Doctorado-ML/STree/actions/workflows/codeql-analysis.yml)
[![codecov](https://codecov.io/gh/doctorado-ml/stree/branch/master/graph/badge.svg)](https://codecov.io/gh/doctorado-ml/stree)
[![Codacy Badge](https://app.codacy.com/project/badge/Grade/35fa3dfd53a24a339344b33d9f9f2f3d)](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/Doctorado-ML/STree.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Doctorado-ML/STree/context:python)
[![PyPI version](https://badge.fury.io/py/STree.svg)](https://badge.fury.io/py/STree)
![https://img.shields.io/badge/python-3.8%2B-blue](https://img.shields.io/badge/python-3.8%2B-brightgreen)
[![DOI](https://zenodo.org/badge/262658230.svg)](https://zenodo.org/badge/latestdoi/262658230)
@@ -15,7 +15,7 @@ Oblique Tree classifier based on SVM nodes. The nodes are built and splitted wit
## Installation
```bash
pip install git+https://github.com/doctorado-ml/stree
pip install Stree
```
## Documentation
@@ -50,7 +50,8 @@ Can be found in [stree.readthedocs.io](https://stree.readthedocs.io/en/stable/)
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
| | splitter | {"best", "random", "trandom", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them. **trandom”**: The algorithm generates only one random combination. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label. **"cfs"**: Apply Correlation-based Feature Selection. **"fcbf"**: Apply Fast Correlation-Based Filter. **"iwss"**: IWSS based algorithm |
| | splitter | {"best", "random", "trandom", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features).
Supported strategies are: **best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them. **trandom”**: The algorithm generates only one random combination. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label. **"cfs"**: Apply Correlation-based Feature Selection. **"fcbf"**: Apply Fast Correlation-Based Filter. **"iwss"**: IWSS based algorithm |
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest |

View File

@@ -12,19 +12,18 @@
#
import os
import sys
import stree
from stree._version import __version__
sys.path.insert(0, os.path.abspath("../../stree/"))
# -- Project information -----------------------------------------------------
project = "STree"
copyright = "2020 - 2021, Ricardo Montañana Gómez"
copyright = "2020 - 2022, Ricardo Montañana Gómez"
author = "Ricardo Montañana Gómez"
# The full version, including alpha/beta/rc tags
version = stree.__version__
version = __version__
release = version

View File

@@ -3,20 +3,20 @@
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
| --- | ------------------- | -------------------------------------------------------------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
| \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of liblinear, linear, poly or rbf. liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library |
| \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of liblinear, linear, poly or rbf.<br>liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library |
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
| \* | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls |
| | max_depth | \<int\> | None | Specifies the maximum depth of the tree |
| \* | tol | \<float\> | 1e-4 | Tolerance for stopping criterion. |
| \* | degree | \<int\> | 3 | Degree of the polynomial kernel function (poly). Ignored by all other kernels. |
| \* | gamma | {"scale", "auto"} or \<float\> | scale | Kernel coefficient for rbf, poly and sigmoid.<br>if gamma='scale' (default) is passed then it uses 1 / (n_features \* X.var()) as value of gamma,<br>if auto, uses 1 / n_features. |
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\*. max_samples is incompatible with 'ovo' multiclass_strategy |
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\*.<br>max_samples is incompatible with 'ovo' multiclass_strategy |
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features).<br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
| | splitter | {"best", "random", "trandom", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them. **trandom”**: The algorithm generates only one random combination. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label. **"cfs"**: Apply Correlation-based Feature Selection. **"fcbf"**: Apply Fast Correlation-Based Filter. **"iwss"**: IWSS based algorithm |
| | splitter | {"best", "random", "trandom", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features).<br>Supported strategies are:<br>**“best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features.<br>**“random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them.<br>**“trandom”**: The algorithm generates only one random combination.<br>**"mutual"**: Chooses the best features w.r.t. their mutual info with the label.<br>**"cfs"**: Apply Correlation-based Feature Selection.<br>**"fcbf"**: Apply Fast Correlation-Based Filter.<br>**"iwss"**: IWSS based algorithm |
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest |
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets:<br>**"ovo"**: one versus one.<br>**"ovr"**: one versus rest |
\* Hyperparameter used by the support vector classifier of every node

View File

@@ -178,7 +178,7 @@
"outputs": [],
"source": [
"# Stree\n",
"stree = Stree(random_state=random_state, C=.01, max_iter=1e3, kernel=\"liblinear\", multiclass_strategy=\"ovr\")"
"stree = Stree(random_state=random_state, C=.01, max_iter=1000, kernel=\"liblinear\", multiclass_strategy=\"ovr\")"
]
},
{
@@ -198,7 +198,7 @@
"outputs": [],
"source": [
"# SVC (linear)\n",
"svc = LinearSVC(random_state=random_state, C=.01, max_iter=1e3)"
"svc = LinearSVC(random_state=random_state, C=.01, max_iter=1000)"
]
},
{

View File

@@ -1,253 +1,253 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Test Gridsearch\n",
"with different kernels and different configurations"
]
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Test Gridsearch\n",
"with different kernels and different configurations"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Setup\n",
"Uncomment the next cell if STree is not already installed"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#\n",
"# Google Colab setup\n",
"#\n",
"#!pip install git+https://github.com/doctorado-ml/stree\n",
"!pip install pandas"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "zIHKVxthDZEa"
},
"outputs": [],
"source": [
"import random\n",
"import os\n",
"import pandas as pd\n",
"import numpy as np\n",
"from sklearn.ensemble import AdaBoostClassifier\n",
"from sklearn.svm import LinearSVC\n",
"from sklearn.model_selection import GridSearchCV, train_test_split\n",
"from stree import Stree"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "IEmq50QgDZEi"
},
"outputs": [],
"source": [
"if not os.path.isfile('data/creditcard.csv'):\n",
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
" !tar xzf creditcard.tgz"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "z9Q-YUfBDZEq",
"outputId": "afc822fb-f16a-4302-8a67-2b9e2880159b",
"tags": []
},
"outputs": [],
"source": [
"random_state=1\n",
"\n",
"def load_creditcard(n_examples=0):\n",
" df = pd.read_csv('data/creditcard.csv')\n",
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
" y = df.Class\n",
" X = df.drop(['Class', 'Time', 'Amount'], axis=1).values\n",
" if n_examples > 0:\n",
" # Take first n_examples samples\n",
" X = X[:n_examples, :]\n",
" y = y[:n_examples, :]\n",
" else:\n",
" # Take all the positive samples with a number of random negatives\n",
" if n_examples < 0:\n",
" Xt = X[(y == 1).ravel()]\n",
" yt = y[(y == 1).ravel()]\n",
" indices = random.sample(range(X.shape[0]), -1 * n_examples)\n",
" X = np.append(Xt, X[indices], axis=0)\n",
" y = np.append(yt, y[indices], axis=0)\n",
" print(\"X.shape\", X.shape, \" y.shape\", y.shape)\n",
" print(\"Fraud: {0:.3f}% {1}\".format(len(y[y == 1])*100/X.shape[0], len(y[y == 1])))\n",
" print(\"Valid: {0:.3f}% {1}\".format(len(y[y == 0]) * 100 / X.shape[0], len(y[y == 0])))\n",
" Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=0.7, shuffle=True, random_state=random_state, stratify=y)\n",
" return Xtrain, Xtest, ytrain, ytest\n",
"\n",
"data = load_creditcard(-1000) # Take all true samples + 1000 of the others\n",
"# data = load_creditcard(5000) # Take the first 5000 samples\n",
"# data = load_creditcard(0) # Take all the samples\n",
"\n",
"Xtrain = data[0]\n",
"Xtest = data[1]\n",
"ytrain = data[2]\n",
"ytest = data[3]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Tests"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "HmX3kR4PDZEw"
},
"outputs": [],
"source": [
"parameters = [{\n",
" 'base_estimator': [Stree(random_state=random_state)],\n",
" 'n_estimators': [10, 25],\n",
" 'learning_rate': [.5, 1],\n",
" 'estimator__split_criteria': ['max_samples', 'impurity'],\n",
" 'estimator__tol': [.1, 1e-02],\n",
" 'estimator__max_depth': [3, 5, 7],\n",
" 'estimator__C': [1, 7, 55],\n",
" 'estimator__kernel': ['linear']\n",
"},\n",
"{\n",
" 'base_estimator': [Stree(random_state=random_state)],\n",
" 'n_estimators': [10, 25],\n",
" 'learning_rate': [.5, 1],\n",
" 'estimator__split_criteria': ['max_samples', 'impurity'],\n",
" 'estimator__tol': [.1, 1e-02],\n",
" 'estimator__max_depth': [3, 5, 7],\n",
" 'estimator__C': [1, 7, 55],\n",
" 'estimator__degree': [3, 5, 7],\n",
" 'estimator__kernel': ['poly']\n",
"},\n",
"{\n",
" 'base_estimator': [Stree(random_state=random_state)],\n",
" 'n_estimators': [10, 25],\n",
" 'learning_rate': [.5, 1],\n",
" 'estimator__split_criteria': ['max_samples', 'impurity'],\n",
" 'estimator__tol': [.1, 1e-02],\n",
" 'estimator__max_depth': [3, 5, 7],\n",
" 'estimator__C': [1, 7, 55],\n",
" 'estimator__gamma': [.1, 1, 10],\n",
" 'estimator__kernel': ['rbf']\n",
"}]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Stree().get_params()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "CrcB8o6EDZE5",
"outputId": "7703413a-d563-4289-a13b-532f38f82762",
"tags": []
},
"outputs": [],
"source": [
"clf = AdaBoostClassifier(random_state=random_state, algorithm=\"SAMME\")\n",
"grid = GridSearchCV(clf, parameters, verbose=5, n_jobs=-1, return_train_score=True)\n",
"grid.fit(Xtrain, ytrain)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "ZjX88NoYDZE8",
"outputId": "285163c8-fa33-4915-8ae7-61c4f7844344",
"tags": []
},
"outputs": [],
"source": [
"print(\"Best estimator: \", grid.best_estimator_)\n",
"print(\"Best hyperparameters: \", grid.best_params_)\n",
"print(\"Best accuracy: \", grid.best_score_)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Best estimator: AdaBoostClassifier(algorithm='SAMME',\n",
" base_estimator=Stree(C=55, max_depth=7, random_state=1,\n",
" split_criteria='max_samples', tol=0.1),\n",
" learning_rate=0.5, n_estimators=25, random_state=1)\n",
"Best hyperparameters: {'base_estimator': Stree(C=55, max_depth=7, random_state=1, split_criteria='max_samples', tol=0.1), 'estimator__C': 55, 'estimator__kernel': 'linear', 'estimator__max_depth': 7, 'estimator__split_criteria': 'max_samples', 'estimator__tol': 0.1, 'learning_rate': 0.5, 'n_estimators': 25}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Best accuracy: 0.9511777695988222"
]
}
],
"metadata": {
"colab": {
"name": "gridsearch.ipynb",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.2-final"
}
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Setup\n",
"Uncomment the next cell if STree is not already installed"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#\n",
"# Google Colab setup\n",
"#\n",
"#!pip install git+https://github.com/doctorado-ml/stree\n",
"!pip install pandas"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "zIHKVxthDZEa"
},
"outputs": [],
"source": [
"import random\n",
"import os\n",
"import pandas as pd\n",
"import numpy as np\n",
"from sklearn.ensemble import AdaBoostClassifier\n",
"from sklearn.svm import LinearSVC\n",
"from sklearn.model_selection import GridSearchCV, train_test_split\n",
"from stree import Stree"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "IEmq50QgDZEi"
},
"outputs": [],
"source": [
"if not os.path.isfile('data/creditcard.csv'):\n",
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
" !tar xzf creditcard.tgz"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "z9Q-YUfBDZEq",
"outputId": "afc822fb-f16a-4302-8a67-2b9e2880159b",
"tags": []
},
"outputs": [],
"source": [
"random_state=1\n",
"\n",
"def load_creditcard(n_examples=0):\n",
" df = pd.read_csv('data/creditcard.csv')\n",
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
" y = df.Class\n",
" X = df.drop(['Class', 'Time', 'Amount'], axis=1).values\n",
" if n_examples > 0:\n",
" # Take first n_examples samples\n",
" X = X[:n_examples, :]\n",
" y = y[:n_examples, :]\n",
" else:\n",
" # Take all the positive samples with a number of random negatives\n",
" if n_examples < 0:\n",
" Xt = X[(y == 1).ravel()]\n",
" yt = y[(y == 1).ravel()]\n",
" indices = random.sample(range(X.shape[0]), -1 * n_examples)\n",
" X = np.append(Xt, X[indices], axis=0)\n",
" y = np.append(yt, y[indices], axis=0)\n",
" print(\"X.shape\", X.shape, \" y.shape\", y.shape)\n",
" print(\"Fraud: {0:.3f}% {1}\".format(len(y[y == 1])*100/X.shape[0], len(y[y == 1])))\n",
" print(\"Valid: {0:.3f}% {1}\".format(len(y[y == 0]) * 100 / X.shape[0], len(y[y == 0])))\n",
" Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=0.7, shuffle=True, random_state=random_state, stratify=y)\n",
" return Xtrain, Xtest, ytrain, ytest\n",
"\n",
"data = load_creditcard(-1000) # Take all true samples + 1000 of the others\n",
"# data = load_creditcard(5000) # Take the first 5000 samples\n",
"# data = load_creditcard(0) # Take all the samples\n",
"\n",
"Xtrain = data[0]\n",
"Xtest = data[1]\n",
"ytrain = data[2]\n",
"ytest = data[3]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Tests"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "HmX3kR4PDZEw"
},
"outputs": [],
"source": [
"parameters = [{\n",
" 'base_estimator': [Stree(random_state=random_state)],\n",
" 'n_estimators': [10, 25],\n",
" 'learning_rate': [.5, 1],\n",
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
" 'base_estimator__tol': [.1, 1e-02],\n",
" 'base_estimator__max_depth': [3, 5, 7],\n",
" 'base_estimator__C': [1, 7, 55],\n",
" 'base_estimator__kernel': ['linear']\n",
"},\n",
"{\n",
" 'base_estimator': [Stree(random_state=random_state)],\n",
" 'n_estimators': [10, 25],\n",
" 'learning_rate': [.5, 1],\n",
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
" 'base_estimator__tol': [.1, 1e-02],\n",
" 'base_estimator__max_depth': [3, 5, 7],\n",
" 'base_estimator__C': [1, 7, 55],\n",
" 'base_estimator__degree': [3, 5, 7],\n",
" 'base_estimator__kernel': ['poly']\n",
"},\n",
"{\n",
" 'base_estimator': [Stree(random_state=random_state)],\n",
" 'n_estimators': [10, 25],\n",
" 'learning_rate': [.5, 1],\n",
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
" 'base_estimator__tol': [.1, 1e-02],\n",
" 'base_estimator__max_depth': [3, 5, 7],\n",
" 'base_estimator__C': [1, 7, 55],\n",
" 'base_estimator__gamma': [.1, 1, 10],\n",
" 'base_estimator__kernel': ['rbf']\n",
"}]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Stree().get_params()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "CrcB8o6EDZE5",
"outputId": "7703413a-d563-4289-a13b-532f38f82762",
"tags": []
},
"outputs": [],
"source": [
"clf = AdaBoostClassifier(random_state=random_state, algorithm=\"SAMME\")\n",
"grid = GridSearchCV(clf, parameters, verbose=5, n_jobs=-1, return_train_score=True)\n",
"grid.fit(Xtrain, ytrain)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "ZjX88NoYDZE8",
"outputId": "285163c8-fa33-4915-8ae7-61c4f7844344",
"tags": []
},
"outputs": [],
"source": [
"print(\"Best estimator: \", grid.best_estimator_)\n",
"print(\"Best hyperparameters: \", grid.best_params_)\n",
"print(\"Best accuracy: \", grid.best_score_)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Best estimator: AdaBoostClassifier(algorithm='SAMME',\n",
" base_estimator=Stree(C=55, max_depth=7, random_state=1,\n",
" split_criteria='max_samples', tol=0.1),\n",
" learning_rate=0.5, n_estimators=25, random_state=1)\n",
"Best hyperparameters: {'base_estimator': Stree(C=55, max_depth=7, random_state=1, split_criteria='max_samples', tol=0.1), 'base_estimator__C': 55, 'base_estimator__kernel': 'linear', 'base_estimator__max_depth': 7, 'base_estimator__split_criteria': 'max_samples', 'base_estimator__tol': 0.1, 'learning_rate': 0.5, 'n_estimators': 25}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Best accuracy: 0.9511777695988222"
]
}
],
"metadata": {
"colab": {
"name": "gridsearch.ipynb",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.2-final"
}
},
"nbformat": 4,
"nbformat_minor": 4
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -7,9 +7,8 @@ def readme():
return f.read()
def get_data(field):
def get_data(field, file_name="__init__.py"):
item = ""
file_name = "_version.py" if field == "version" else "__init__.py"
with open(os.path.join("stree", file_name)) as f:
for line in f.readlines():
if line.startswith(f"__{field}__"):
@@ -21,9 +20,14 @@ def get_data(field):
return item
def get_requirements():
with open("requirements.txt") as f:
return f.read().splitlines()
setuptools.setup(
name="STree",
version=get_data("version"),
version=get_data("version", "_version.py"),
license=get_data("license"),
description="Oblique decision tree with svm nodes",
long_description=readme(),
@@ -46,7 +50,7 @@ setuptools.setup(
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Intended Audience :: Science/Research",
],
install_requires=["scikit-learn", "mufs"],
install_requires=get_requirements(),
test_suite="stree.tests",
zip_safe=False,
)

View File

@@ -267,7 +267,6 @@ class Splitter:
random_state=None,
normalize=False,
):
self._clf = clf
self._random_state = random_state
if random_state is not None:

View File

@@ -139,7 +139,7 @@ class Stree(BaseEstimator, ClassifierMixin):
self,
C: float = 1.0,
kernel: str = "linear",
max_iter: int = 1e5,
max_iter: int = int(1e5),
random_state: int = None,
max_depth: int = None,
tol: float = 1e-4,
@@ -153,7 +153,6 @@ class Stree(BaseEstimator, ClassifierMixin):
multiclass_strategy: str = "ovo",
normalize: bool = False,
):
self.max_iter = max_iter
self.C = C
self.kernel = kernel
@@ -368,6 +367,21 @@ class Stree(BaseEstimator, ClassifierMixin):
)
def __predict_class(self, X: np.array) -> np.array:
"""Compute the predicted class for the samples in X. Returns the number
of samples of each class in the corresponding leaf node.
Parameters
----------
X : np.array
Array of samples
Returns
-------
np.array
Array of shape (n_samples, n_classes) with the number of samples
of each class in the corresponding leaf node
"""
def compute_prediction(xp, indices, node):
if xp is None:
return
@@ -388,6 +402,25 @@ class Stree(BaseEstimator, ClassifierMixin):
return result
def check_predict(self, X) -> np.array:
"""Checks predict and predict_proba preconditions. If input X is not an
np.array convert it to one.
Parameters
----------
X : np.ndarray
Array of samples
Returns
-------
np.array
Array of samples
Raises
------
ValueError
If number of features of X is different of the number of features
in training data
"""
check_is_fitted(self, ["tree_"])
# Input validation
X = check_array(X)
@@ -451,6 +484,43 @@ class Stree(BaseEstimator, ClassifierMixin):
X = self.check_predict(X)
return self.classes_[np.argmax(self.__predict_class(X), axis=1)]
def get_nodes(self) -> int:
"""Return the number of nodes in the tree
Returns
-------
int
number of nodes
"""
nodes = 0
for _ in self:
nodes += 1
return nodes
def get_leaves(self) -> int:
"""Return the number of leaves in the tree
Returns
-------
int
number of leaves
"""
leaves = 0
for node in self:
if node.is_leaf():
leaves += 1
return leaves
def get_depth(self) -> int:
"""Return the depth of the tree
Returns
-------
int
depth of the tree
"""
return self.depth_
def nodes_leaves(self) -> tuple:
"""Compute the number of nodes and leaves in the built tree

View File

@@ -1 +1 @@
__version__ = "1.2.4"
__version__ = "1.3.2"

View File

@@ -239,6 +239,7 @@ class Stree_test(unittest.TestCase):
)
tcl.fit(*load_dataset(self._random_state))
self.assertEqual(depth, tcl.depth_)
self.assertEqual(depth, tcl.get_depth())
def test_unfitted_tree_is_iterable(self):
tcl = Stree()
@@ -306,10 +307,10 @@ class Stree_test(unittest.TestCase):
for criteria in ["max_samples", "impurity"]:
for kernel in self._kernels:
clf = Stree(
max_iter=1e4,
multiclass_strategy="ovr"
if kernel == "liblinear"
else "ovo",
max_iter=int(1e4),
multiclass_strategy=(
"ovr" if kernel == "liblinear" else "ovo"
),
kernel=kernel,
random_state=self._random_state,
)
@@ -640,10 +641,12 @@ class Stree_test(unittest.TestCase):
clf = Stree(random_state=self._random_state)
clf.fit(X, y)
self.assertEqual(6, clf.depth_)
self.assertEqual(6, clf.get_depth())
X, y = load_wine(return_X_y=True)
clf = Stree(random_state=self._random_state)
clf.fit(X, y)
self.assertEqual(4, clf.depth_)
self.assertEqual(4, clf.get_depth())
def test_nodes_leaves(self):
"""Check number of nodes and leaves."""
@@ -657,13 +660,17 @@ class Stree_test(unittest.TestCase):
clf.fit(X, y)
nodes, leaves = clf.nodes_leaves()
self.assertEqual(31, nodes)
self.assertEqual(31, clf.get_nodes())
self.assertEqual(16, leaves)
self.assertEqual(16, clf.get_leaves())
X, y = load_wine(return_X_y=True)
clf = Stree(random_state=self._random_state)
clf.fit(X, y)
nodes, leaves = clf.nodes_leaves()
self.assertEqual(11, nodes)
self.assertEqual(11, clf.get_nodes())
self.assertEqual(6, leaves)
self.assertEqual(6, clf.get_leaves())
def test_nodes_leaves_artificial(self):
"""Check leaves of artificial dataset."""
@@ -682,7 +689,9 @@ class Stree_test(unittest.TestCase):
clf.tree_ = n1
nodes, leaves = clf.nodes_leaves()
self.assertEqual(6, nodes)
self.assertEqual(6, clf.get_nodes())
self.assertEqual(2, leaves)
self.assertEqual(2, clf.get_leaves())
def test_bogus_multiclass_strategy(self):
"""Check invalid multiclass strategy."""