mirror of
https://github.com/Doctorado-ML/STree.git
synced 2025-08-18 00:46:02 +00:00
Compare commits
7 Commits
Author | SHA1 | Date | |
---|---|---|---|
5f8ca8f3bb
|
|||
|
fb8b9b344f | ||
036d1ba2a7
|
|||
4de74973b8
|
|||
|
28dd04b95a | ||
|
542bbce7db
|
||
|
5b791bc5bf |
58
.github/workflows/codeql-analysis.yml
vendored
58
.github/workflows/codeql-analysis.yml
vendored
@@ -2,12 +2,12 @@ name: "CodeQL"
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [ master ]
|
branches: [master]
|
||||||
pull_request:
|
pull_request:
|
||||||
# The branches below must be a subset of the branches above
|
# The branches below must be a subset of the branches above
|
||||||
branches: [ master ]
|
branches: [master]
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '16 17 * * 3'
|
- cron: "16 17 * * 3"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
analyze:
|
analyze:
|
||||||
@@ -17,40 +17,40 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
language: [ 'python' ]
|
language: ["python"]
|
||||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||||
# Learn more:
|
# Learn more:
|
||||||
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
# Initializes the CodeQL tools for scanning.
|
# Initializes the CodeQL tools for scanning.
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@v1
|
uses: github/codeql-action/init@v2
|
||||||
with:
|
with:
|
||||||
languages: ${{ matrix.language }}
|
languages: ${{ matrix.language }}
|
||||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||||
# By default, queries listed here will override any specified in a config file.
|
# By default, queries listed here will override any specified in a config file.
|
||||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||||
|
|
||||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||||
# If this step fails, then you should remove it and run the build manually (see below)
|
# If this step fails, then you should remove it and run the build manually (see below)
|
||||||
- name: Autobuild
|
- name: Autobuild
|
||||||
uses: github/codeql-action/autobuild@v1
|
uses: github/codeql-action/autobuild@v2
|
||||||
|
|
||||||
# ℹ️ Command-line programs to run using the OS shell.
|
# ℹ️ Command-line programs to run using the OS shell.
|
||||||
# 📚 https://git.io/JvXDl
|
# 📚 https://git.io/JvXDl
|
||||||
|
|
||||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||||
# and modify them (or add more) to build your code if your project
|
# and modify them (or add more) to build your code if your project
|
||||||
# uses a compiled language
|
# uses a compiled language
|
||||||
|
|
||||||
#- run: |
|
#- run: |
|
||||||
# make bootstrap
|
# make bootstrap
|
||||||
# make release
|
# make release
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
- name: Perform CodeQL Analysis
|
||||||
uses: github/codeql-action/analyze@v1
|
uses: github/codeql-action/analyze@v2
|
||||||
|
6
.github/workflows/main.yml
vendored
6
.github/workflows/main.yml
vendored
@@ -16,9 +16,9 @@ jobs:
|
|||||||
python: [3.8, "3.10"]
|
python: [3.8, "3.10"]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- name: Set up Python ${{ matrix.python }}
|
- name: Set up Python ${{ matrix.python }}
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python }}
|
python-version: ${{ matrix.python }}
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
@@ -35,7 +35,7 @@ jobs:
|
|||||||
coverage run -m unittest -v stree.tests
|
coverage run -m unittest -v stree.tests
|
||||||
coverage xml
|
coverage xml
|
||||||
- name: Upload coverage to Codecov
|
- name: Upload coverage to Codecov
|
||||||
uses: codecov/codecov-action@v1
|
uses: codecov/codecov-action@v3
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.CODECOV_TOKEN }}
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
files: ./coverage.xml
|
files: ./coverage.xml
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||

|

|
||||||
|
[](https://github.com/Doctorado-ML/STree/actions/workflows/codeql-analysis.yml)
|
||||||
[](https://codecov.io/gh/doctorado-ml/stree)
|
[](https://codecov.io/gh/doctorado-ml/stree)
|
||||||
[](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
|
[](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
|
||||||
[](https://lgtm.com/projects/g/Doctorado-ML/STree/context:python)
|
|
||||||
[](https://badge.fury.io/py/STree)
|
[](https://badge.fury.io/py/STree)
|
||||||

|

|
||||||
[](https://zenodo.org/badge/latestdoi/262658230)
|
[](https://zenodo.org/badge/latestdoi/262658230)
|
||||||
@@ -15,7 +15,7 @@ Oblique Tree classifier based on SVM nodes. The nodes are built and splitted wit
|
|||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install git+https://github.com/doctorado-ml/stree
|
pip install Stree
|
||||||
```
|
```
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
@@ -178,7 +178,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Stree\n",
|
"# Stree\n",
|
||||||
"stree = Stree(random_state=random_state, C=.01, max_iter=1e3, kernel=\"liblinear\", multiclass_strategy=\"ovr\")"
|
"stree = Stree(random_state=random_state, C=.01, max_iter=1000, kernel=\"liblinear\", multiclass_strategy=\"ovr\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -198,7 +198,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# SVC (linear)\n",
|
"# SVC (linear)\n",
|
||||||
"svc = LinearSVC(random_state=random_state, C=.01, max_iter=1e3)"
|
"svc = LinearSVC(random_state=random_state, C=.01, max_iter=1000)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@@ -1,253 +1,253 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"# Test Gridsearch\n",
|
"# Test Gridsearch\n",
|
||||||
"with different kernels and different configurations"
|
"with different kernels and different configurations"
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Setup\n",
|
||||||
|
"Uncomment the next cell if STree is not already installed"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#\n",
|
||||||
|
"# Google Colab setup\n",
|
||||||
|
"#\n",
|
||||||
|
"#!pip install git+https://github.com/doctorado-ml/stree\n",
|
||||||
|
"!pip install pandas"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "zIHKVxthDZEa"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import random\n",
|
||||||
|
"import os\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"from sklearn.ensemble import AdaBoostClassifier\n",
|
||||||
|
"from sklearn.svm import LinearSVC\n",
|
||||||
|
"from sklearn.model_selection import GridSearchCV, train_test_split\n",
|
||||||
|
"from stree import Stree"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "IEmq50QgDZEi"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"if not os.path.isfile('data/creditcard.csv'):\n",
|
||||||
|
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
||||||
|
" !tar xzf creditcard.tgz"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "z9Q-YUfBDZEq",
|
||||||
|
"outputId": "afc822fb-f16a-4302-8a67-2b9e2880159b",
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"random_state=1\n",
|
||||||
|
"\n",
|
||||||
|
"def load_creditcard(n_examples=0):\n",
|
||||||
|
" df = pd.read_csv('data/creditcard.csv')\n",
|
||||||
|
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
||||||
|
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
||||||
|
" y = df.Class\n",
|
||||||
|
" X = df.drop(['Class', 'Time', 'Amount'], axis=1).values\n",
|
||||||
|
" if n_examples > 0:\n",
|
||||||
|
" # Take first n_examples samples\n",
|
||||||
|
" X = X[:n_examples, :]\n",
|
||||||
|
" y = y[:n_examples, :]\n",
|
||||||
|
" else:\n",
|
||||||
|
" # Take all the positive samples with a number of random negatives\n",
|
||||||
|
" if n_examples < 0:\n",
|
||||||
|
" Xt = X[(y == 1).ravel()]\n",
|
||||||
|
" yt = y[(y == 1).ravel()]\n",
|
||||||
|
" indices = random.sample(range(X.shape[0]), -1 * n_examples)\n",
|
||||||
|
" X = np.append(Xt, X[indices], axis=0)\n",
|
||||||
|
" y = np.append(yt, y[indices], axis=0)\n",
|
||||||
|
" print(\"X.shape\", X.shape, \" y.shape\", y.shape)\n",
|
||||||
|
" print(\"Fraud: {0:.3f}% {1}\".format(len(y[y == 1])*100/X.shape[0], len(y[y == 1])))\n",
|
||||||
|
" print(\"Valid: {0:.3f}% {1}\".format(len(y[y == 0]) * 100 / X.shape[0], len(y[y == 0])))\n",
|
||||||
|
" Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=0.7, shuffle=True, random_state=random_state, stratify=y)\n",
|
||||||
|
" return Xtrain, Xtest, ytrain, ytest\n",
|
||||||
|
"\n",
|
||||||
|
"data = load_creditcard(-1000) # Take all true samples + 1000 of the others\n",
|
||||||
|
"# data = load_creditcard(5000) # Take the first 5000 samples\n",
|
||||||
|
"# data = load_creditcard(0) # Take all the samples\n",
|
||||||
|
"\n",
|
||||||
|
"Xtrain = data[0]\n",
|
||||||
|
"Xtest = data[1]\n",
|
||||||
|
"ytrain = data[2]\n",
|
||||||
|
"ytest = data[3]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Tests"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "HmX3kR4PDZEw"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"parameters = [{\n",
|
||||||
|
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
||||||
|
" 'n_estimators': [10, 25],\n",
|
||||||
|
" 'learning_rate': [.5, 1],\n",
|
||||||
|
" 'estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||||
|
" 'estimator__tol': [.1, 1e-02],\n",
|
||||||
|
" 'estimator__max_depth': [3, 5, 7],\n",
|
||||||
|
" 'estimator__C': [1, 7, 55],\n",
|
||||||
|
" 'estimator__kernel': ['linear']\n",
|
||||||
|
"},\n",
|
||||||
|
"{\n",
|
||||||
|
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
||||||
|
" 'n_estimators': [10, 25],\n",
|
||||||
|
" 'learning_rate': [.5, 1],\n",
|
||||||
|
" 'estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||||
|
" 'estimator__tol': [.1, 1e-02],\n",
|
||||||
|
" 'estimator__max_depth': [3, 5, 7],\n",
|
||||||
|
" 'estimator__C': [1, 7, 55],\n",
|
||||||
|
" 'estimator__degree': [3, 5, 7],\n",
|
||||||
|
" 'estimator__kernel': ['poly']\n",
|
||||||
|
"},\n",
|
||||||
|
"{\n",
|
||||||
|
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
||||||
|
" 'n_estimators': [10, 25],\n",
|
||||||
|
" 'learning_rate': [.5, 1],\n",
|
||||||
|
" 'estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
||||||
|
" 'estimator__tol': [.1, 1e-02],\n",
|
||||||
|
" 'estimator__max_depth': [3, 5, 7],\n",
|
||||||
|
" 'estimator__C': [1, 7, 55],\n",
|
||||||
|
" 'estimator__gamma': [.1, 1, 10],\n",
|
||||||
|
" 'estimator__kernel': ['rbf']\n",
|
||||||
|
"}]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"Stree().get_params()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "CrcB8o6EDZE5",
|
||||||
|
"outputId": "7703413a-d563-4289-a13b-532f38f82762",
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"clf = AdaBoostClassifier(random_state=random_state, algorithm=\"SAMME\")\n",
|
||||||
|
"grid = GridSearchCV(clf, parameters, verbose=5, n_jobs=-1, return_train_score=True)\n",
|
||||||
|
"grid.fit(Xtrain, ytrain)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"colab": {},
|
||||||
|
"colab_type": "code",
|
||||||
|
"id": "ZjX88NoYDZE8",
|
||||||
|
"outputId": "285163c8-fa33-4915-8ae7-61c4f7844344",
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print(\"Best estimator: \", grid.best_estimator_)\n",
|
||||||
|
"print(\"Best hyperparameters: \", grid.best_params_)\n",
|
||||||
|
"print(\"Best accuracy: \", grid.best_score_)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Best estimator: AdaBoostClassifier(algorithm='SAMME',\n",
|
||||||
|
" base_estimator=Stree(C=55, max_depth=7, random_state=1,\n",
|
||||||
|
" split_criteria='max_samples', tol=0.1),\n",
|
||||||
|
" learning_rate=0.5, n_estimators=25, random_state=1)\n",
|
||||||
|
"Best hyperparameters: {'base_estimator': Stree(C=55, max_depth=7, random_state=1, split_criteria='max_samples', tol=0.1), 'estimator__C': 55, 'estimator__kernel': 'linear', 'estimator__max_depth': 7, 'estimator__split_criteria': 'max_samples', 'estimator__tol': 0.1, 'learning_rate': 0.5, 'n_estimators': 25}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Best accuracy: 0.9511777695988222"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"colab": {
|
||||||
|
"name": "gridsearch.ipynb",
|
||||||
|
"provenance": []
|
||||||
|
},
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.8.2-final"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
{
|
"nbformat": 4,
|
||||||
"cell_type": "markdown",
|
"nbformat_minor": 4
|
||||||
"metadata": {},
|
}
|
||||||
"source": [
|
|
||||||
"# Setup\n",
|
|
||||||
"Uncomment the next cell if STree is not already installed"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#\n",
|
|
||||||
"# Google Colab setup\n",
|
|
||||||
"#\n",
|
|
||||||
"#!pip install git+https://github.com/doctorado-ml/stree\n",
|
|
||||||
"!pip install pandas"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"colab": {},
|
|
||||||
"colab_type": "code",
|
|
||||||
"id": "zIHKVxthDZEa"
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import random\n",
|
|
||||||
"import os\n",
|
|
||||||
"import pandas as pd\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"from sklearn.ensemble import AdaBoostClassifier\n",
|
|
||||||
"from sklearn.svm import LinearSVC\n",
|
|
||||||
"from sklearn.model_selection import GridSearchCV, train_test_split\n",
|
|
||||||
"from stree import Stree"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"colab": {},
|
|
||||||
"colab_type": "code",
|
|
||||||
"id": "IEmq50QgDZEi"
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"if not os.path.isfile('data/creditcard.csv'):\n",
|
|
||||||
" !wget --no-check-certificate --content-disposition http://nube.jccm.es/index.php/s/Zs7SYtZQJ3RQ2H2/download\n",
|
|
||||||
" !tar xzf creditcard.tgz"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"colab": {},
|
|
||||||
"colab_type": "code",
|
|
||||||
"id": "z9Q-YUfBDZEq",
|
|
||||||
"outputId": "afc822fb-f16a-4302-8a67-2b9e2880159b",
|
|
||||||
"tags": []
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"random_state=1\n",
|
|
||||||
"\n",
|
|
||||||
"def load_creditcard(n_examples=0):\n",
|
|
||||||
" df = pd.read_csv('data/creditcard.csv')\n",
|
|
||||||
" print(\"Fraud: {0:.3f}% {1}\".format(df.Class[df.Class == 1].count()*100/df.shape[0], df.Class[df.Class == 1].count()))\n",
|
|
||||||
" print(\"Valid: {0:.3f}% {1}\".format(df.Class[df.Class == 0].count()*100/df.shape[0], df.Class[df.Class == 0].count()))\n",
|
|
||||||
" y = df.Class\n",
|
|
||||||
" X = df.drop(['Class', 'Time', 'Amount'], axis=1).values\n",
|
|
||||||
" if n_examples > 0:\n",
|
|
||||||
" # Take first n_examples samples\n",
|
|
||||||
" X = X[:n_examples, :]\n",
|
|
||||||
" y = y[:n_examples, :]\n",
|
|
||||||
" else:\n",
|
|
||||||
" # Take all the positive samples with a number of random negatives\n",
|
|
||||||
" if n_examples < 0:\n",
|
|
||||||
" Xt = X[(y == 1).ravel()]\n",
|
|
||||||
" yt = y[(y == 1).ravel()]\n",
|
|
||||||
" indices = random.sample(range(X.shape[0]), -1 * n_examples)\n",
|
|
||||||
" X = np.append(Xt, X[indices], axis=0)\n",
|
|
||||||
" y = np.append(yt, y[indices], axis=0)\n",
|
|
||||||
" print(\"X.shape\", X.shape, \" y.shape\", y.shape)\n",
|
|
||||||
" print(\"Fraud: {0:.3f}% {1}\".format(len(y[y == 1])*100/X.shape[0], len(y[y == 1])))\n",
|
|
||||||
" print(\"Valid: {0:.3f}% {1}\".format(len(y[y == 0]) * 100 / X.shape[0], len(y[y == 0])))\n",
|
|
||||||
" Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=0.7, shuffle=True, random_state=random_state, stratify=y)\n",
|
|
||||||
" return Xtrain, Xtest, ytrain, ytest\n",
|
|
||||||
"\n",
|
|
||||||
"data = load_creditcard(-1000) # Take all true samples + 1000 of the others\n",
|
|
||||||
"# data = load_creditcard(5000) # Take the first 5000 samples\n",
|
|
||||||
"# data = load_creditcard(0) # Take all the samples\n",
|
|
||||||
"\n",
|
|
||||||
"Xtrain = data[0]\n",
|
|
||||||
"Xtest = data[1]\n",
|
|
||||||
"ytrain = data[2]\n",
|
|
||||||
"ytest = data[3]"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"# Tests"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"colab": {},
|
|
||||||
"colab_type": "code",
|
|
||||||
"id": "HmX3kR4PDZEw"
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"parameters = [{\n",
|
|
||||||
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
|
||||||
" 'n_estimators': [10, 25],\n",
|
|
||||||
" 'learning_rate': [.5, 1],\n",
|
|
||||||
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
|
||||||
" 'base_estimator__tol': [.1, 1e-02],\n",
|
|
||||||
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
|
||||||
" 'base_estimator__C': [1, 7, 55],\n",
|
|
||||||
" 'base_estimator__kernel': ['linear']\n",
|
|
||||||
"},\n",
|
|
||||||
"{\n",
|
|
||||||
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
|
||||||
" 'n_estimators': [10, 25],\n",
|
|
||||||
" 'learning_rate': [.5, 1],\n",
|
|
||||||
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
|
||||||
" 'base_estimator__tol': [.1, 1e-02],\n",
|
|
||||||
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
|
||||||
" 'base_estimator__C': [1, 7, 55],\n",
|
|
||||||
" 'base_estimator__degree': [3, 5, 7],\n",
|
|
||||||
" 'base_estimator__kernel': ['poly']\n",
|
|
||||||
"},\n",
|
|
||||||
"{\n",
|
|
||||||
" 'base_estimator': [Stree(random_state=random_state)],\n",
|
|
||||||
" 'n_estimators': [10, 25],\n",
|
|
||||||
" 'learning_rate': [.5, 1],\n",
|
|
||||||
" 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n",
|
|
||||||
" 'base_estimator__tol': [.1, 1e-02],\n",
|
|
||||||
" 'base_estimator__max_depth': [3, 5, 7],\n",
|
|
||||||
" 'base_estimator__C': [1, 7, 55],\n",
|
|
||||||
" 'base_estimator__gamma': [.1, 1, 10],\n",
|
|
||||||
" 'base_estimator__kernel': ['rbf']\n",
|
|
||||||
"}]"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"Stree().get_params()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"colab": {},
|
|
||||||
"colab_type": "code",
|
|
||||||
"id": "CrcB8o6EDZE5",
|
|
||||||
"outputId": "7703413a-d563-4289-a13b-532f38f82762",
|
|
||||||
"tags": []
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"clf = AdaBoostClassifier(random_state=random_state, algorithm=\"SAMME\")\n",
|
|
||||||
"grid = GridSearchCV(clf, parameters, verbose=5, n_jobs=-1, return_train_score=True)\n",
|
|
||||||
"grid.fit(Xtrain, ytrain)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"colab": {},
|
|
||||||
"colab_type": "code",
|
|
||||||
"id": "ZjX88NoYDZE8",
|
|
||||||
"outputId": "285163c8-fa33-4915-8ae7-61c4f7844344",
|
|
||||||
"tags": []
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"Best estimator: \", grid.best_estimator_)\n",
|
|
||||||
"print(\"Best hyperparameters: \", grid.best_params_)\n",
|
|
||||||
"print(\"Best accuracy: \", grid.best_score_)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Best estimator: AdaBoostClassifier(algorithm='SAMME',\n",
|
|
||||||
" base_estimator=Stree(C=55, max_depth=7, random_state=1,\n",
|
|
||||||
" split_criteria='max_samples', tol=0.1),\n",
|
|
||||||
" learning_rate=0.5, n_estimators=25, random_state=1)\n",
|
|
||||||
"Best hyperparameters: {'base_estimator': Stree(C=55, max_depth=7, random_state=1, split_criteria='max_samples', tol=0.1), 'base_estimator__C': 55, 'base_estimator__kernel': 'linear', 'base_estimator__max_depth': 7, 'base_estimator__split_criteria': 'max_samples', 'base_estimator__tol': 0.1, 'learning_rate': 0.5, 'n_estimators': 25}"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Best accuracy: 0.9511777695988222"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"colab": {
|
|
||||||
"name": "gridsearch.ipynb",
|
|
||||||
"provenance": []
|
|
||||||
},
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python3"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.8.2-final"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 4
|
|
||||||
}
|
|
||||||
|
@@ -267,7 +267,6 @@ class Splitter:
|
|||||||
random_state=None,
|
random_state=None,
|
||||||
normalize=False,
|
normalize=False,
|
||||||
):
|
):
|
||||||
|
|
||||||
self._clf = clf
|
self._clf = clf
|
||||||
self._random_state = random_state
|
self._random_state = random_state
|
||||||
if random_state is not None:
|
if random_state is not None:
|
||||||
|
@@ -139,7 +139,7 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
self,
|
self,
|
||||||
C: float = 1.0,
|
C: float = 1.0,
|
||||||
kernel: str = "linear",
|
kernel: str = "linear",
|
||||||
max_iter: int = 1e5,
|
max_iter: int = int(1e5),
|
||||||
random_state: int = None,
|
random_state: int = None,
|
||||||
max_depth: int = None,
|
max_depth: int = None,
|
||||||
tol: float = 1e-4,
|
tol: float = 1e-4,
|
||||||
@@ -153,7 +153,6 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
multiclass_strategy: str = "ovo",
|
multiclass_strategy: str = "ovo",
|
||||||
normalize: bool = False,
|
normalize: bool = False,
|
||||||
):
|
):
|
||||||
|
|
||||||
self.max_iter = max_iter
|
self.max_iter = max_iter
|
||||||
self.C = C
|
self.C = C
|
||||||
self.kernel = kernel
|
self.kernel = kernel
|
||||||
@@ -485,6 +484,43 @@ class Stree(BaseEstimator, ClassifierMixin):
|
|||||||
X = self.check_predict(X)
|
X = self.check_predict(X)
|
||||||
return self.classes_[np.argmax(self.__predict_class(X), axis=1)]
|
return self.classes_[np.argmax(self.__predict_class(X), axis=1)]
|
||||||
|
|
||||||
|
def get_nodes(self) -> int:
|
||||||
|
"""Return the number of nodes in the tree
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
int
|
||||||
|
number of nodes
|
||||||
|
"""
|
||||||
|
nodes = 0
|
||||||
|
for _ in self:
|
||||||
|
nodes += 1
|
||||||
|
return nodes
|
||||||
|
|
||||||
|
def get_leaves(self) -> int:
|
||||||
|
"""Return the number of leaves in the tree
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
int
|
||||||
|
number of leaves
|
||||||
|
"""
|
||||||
|
leaves = 0
|
||||||
|
for node in self:
|
||||||
|
if node.is_leaf():
|
||||||
|
leaves += 1
|
||||||
|
return leaves
|
||||||
|
|
||||||
|
def get_depth(self) -> int:
|
||||||
|
"""Return the depth of the tree
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
int
|
||||||
|
depth of the tree
|
||||||
|
"""
|
||||||
|
return self.depth_
|
||||||
|
|
||||||
def nodes_leaves(self) -> tuple:
|
def nodes_leaves(self) -> tuple:
|
||||||
"""Compute the number of nodes and leaves in the built tree
|
"""Compute the number of nodes and leaves in the built tree
|
||||||
|
|
||||||
|
@@ -1 +1 @@
|
|||||||
__version__ = "1.3.0"
|
__version__ = "1.3.2"
|
||||||
|
@@ -239,6 +239,7 @@ class Stree_test(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
tcl.fit(*load_dataset(self._random_state))
|
tcl.fit(*load_dataset(self._random_state))
|
||||||
self.assertEqual(depth, tcl.depth_)
|
self.assertEqual(depth, tcl.depth_)
|
||||||
|
self.assertEqual(depth, tcl.get_depth())
|
||||||
|
|
||||||
def test_unfitted_tree_is_iterable(self):
|
def test_unfitted_tree_is_iterable(self):
|
||||||
tcl = Stree()
|
tcl = Stree()
|
||||||
@@ -306,10 +307,10 @@ class Stree_test(unittest.TestCase):
|
|||||||
for criteria in ["max_samples", "impurity"]:
|
for criteria in ["max_samples", "impurity"]:
|
||||||
for kernel in self._kernels:
|
for kernel in self._kernels:
|
||||||
clf = Stree(
|
clf = Stree(
|
||||||
max_iter=1e4,
|
max_iter=int(1e4),
|
||||||
multiclass_strategy="ovr"
|
multiclass_strategy=(
|
||||||
if kernel == "liblinear"
|
"ovr" if kernel == "liblinear" else "ovo"
|
||||||
else "ovo",
|
),
|
||||||
kernel=kernel,
|
kernel=kernel,
|
||||||
random_state=self._random_state,
|
random_state=self._random_state,
|
||||||
)
|
)
|
||||||
@@ -640,10 +641,12 @@ class Stree_test(unittest.TestCase):
|
|||||||
clf = Stree(random_state=self._random_state)
|
clf = Stree(random_state=self._random_state)
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
self.assertEqual(6, clf.depth_)
|
self.assertEqual(6, clf.depth_)
|
||||||
|
self.assertEqual(6, clf.get_depth())
|
||||||
X, y = load_wine(return_X_y=True)
|
X, y = load_wine(return_X_y=True)
|
||||||
clf = Stree(random_state=self._random_state)
|
clf = Stree(random_state=self._random_state)
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
self.assertEqual(4, clf.depth_)
|
self.assertEqual(4, clf.depth_)
|
||||||
|
self.assertEqual(4, clf.get_depth())
|
||||||
|
|
||||||
def test_nodes_leaves(self):
|
def test_nodes_leaves(self):
|
||||||
"""Check number of nodes and leaves."""
|
"""Check number of nodes and leaves."""
|
||||||
@@ -657,13 +660,17 @@ class Stree_test(unittest.TestCase):
|
|||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
nodes, leaves = clf.nodes_leaves()
|
nodes, leaves = clf.nodes_leaves()
|
||||||
self.assertEqual(31, nodes)
|
self.assertEqual(31, nodes)
|
||||||
|
self.assertEqual(31, clf.get_nodes())
|
||||||
self.assertEqual(16, leaves)
|
self.assertEqual(16, leaves)
|
||||||
|
self.assertEqual(16, clf.get_leaves())
|
||||||
X, y = load_wine(return_X_y=True)
|
X, y = load_wine(return_X_y=True)
|
||||||
clf = Stree(random_state=self._random_state)
|
clf = Stree(random_state=self._random_state)
|
||||||
clf.fit(X, y)
|
clf.fit(X, y)
|
||||||
nodes, leaves = clf.nodes_leaves()
|
nodes, leaves = clf.nodes_leaves()
|
||||||
self.assertEqual(11, nodes)
|
self.assertEqual(11, nodes)
|
||||||
|
self.assertEqual(11, clf.get_nodes())
|
||||||
self.assertEqual(6, leaves)
|
self.assertEqual(6, leaves)
|
||||||
|
self.assertEqual(6, clf.get_leaves())
|
||||||
|
|
||||||
def test_nodes_leaves_artificial(self):
|
def test_nodes_leaves_artificial(self):
|
||||||
"""Check leaves of artificial dataset."""
|
"""Check leaves of artificial dataset."""
|
||||||
@@ -682,7 +689,9 @@ class Stree_test(unittest.TestCase):
|
|||||||
clf.tree_ = n1
|
clf.tree_ = n1
|
||||||
nodes, leaves = clf.nodes_leaves()
|
nodes, leaves = clf.nodes_leaves()
|
||||||
self.assertEqual(6, nodes)
|
self.assertEqual(6, nodes)
|
||||||
|
self.assertEqual(6, clf.get_nodes())
|
||||||
self.assertEqual(2, leaves)
|
self.assertEqual(2, leaves)
|
||||||
|
self.assertEqual(2, clf.get_leaves())
|
||||||
|
|
||||||
def test_bogus_multiclass_strategy(self):
|
def test_bogus_multiclass_strategy(self):
|
||||||
"""Check invalid multiclass strategy."""
|
"""Check invalid multiclass strategy."""
|
||||||
|
Reference in New Issue
Block a user