Compare commits

..

1 Commits

Author SHA1 Message Date
1b08cb9bdf Add select KBest features #17 2021-04-26 01:15:30 +02:00
26 changed files with 187 additions and 673 deletions

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2020-2021, Ricardo Montañana Gómez
Copyright (c) 2020 Doctorado-ML
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,49 +0,0 @@
SHELL := /bin/bash
.DEFAULT_GOAL := help
.PHONY: coverage deps help lint push test doc build
coverage: ## Run tests with coverage
coverage erase
coverage run -m unittest -v stree.tests
coverage report -m
deps: ## Install dependencies
pip install -r requirements.txt
lint: ## Lint and static-check
black stree
flake8 stree
mypy stree
push: ## Push code with tags
git push && git push --tags
test: ## Run tests
python -m unittest -v stree.tests
doc: ## Update documentation
make -C docs --makefile=Makefile html
build: ## Build package
rm -fr dist/*
python setup.py sdist bdist_wheel
doc-clean: ## Update documentation
make -C docs --makefile=Makefile clean
help: ## Show help message
@IFS=$$'\n' ; \
help_lines=(`fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##/:/'`); \
printf "%s\n\n" "Usage: make [task]"; \
printf "%-20s %s\n" "task" "help" ; \
printf "%-20s %s\n" "------" "----" ; \
for help_line in $${help_lines[@]}; do \
IFS=$$':' ; \
help_split=($$help_line) ; \
help_command=`echo $${help_split[0]} | sed -e 's/^ *//' -e 's/ *$$//'` ; \
help_info=`echo $${help_split[2]} | sed -e 's/^ *//' -e 's/ *$$//'` ; \
printf '\033[36m'; \
printf "%-20s %s" $$help_command ; \
printf '\033[0m'; \
printf "%s\n" $$help_info; \
done

View File

@@ -1,9 +1,8 @@
![CI](https://github.com/Doctorado-ML/STree/workflows/CI/badge.svg)
[![codecov](https://codecov.io/gh/doctorado-ml/stree/branch/master/graph/badge.svg)](https://codecov.io/gh/doctorado-ml/stree)
[![Codacy Badge](https://app.codacy.com/project/badge/Grade/35fa3dfd53a24a339344b33d9f9f2f3d)](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/Doctorado-ML/STree.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Doctorado-ML/STree/context:python)
# STree
# Stree
Oblique Tree classifier based on SVM nodes. The nodes are built and splitted with sklearn SVC models. Stree is a sklearn estimator and can be integrated in pipelines, grid searches, etc.
@@ -15,10 +14,6 @@ Oblique Tree classifier based on SVM nodes. The nodes are built and splitted wit
pip install git+https://github.com/doctorado-ml/stree
```
## Documentation
Can be found in
## Examples
### Jupyter notebooks
@@ -36,22 +31,20 @@ Can be found in
## Hyperparameters
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
| --- | ------------------- | ------------------------------------------------------ | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| --- | ------------------ | ------------------------------------------------------ | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
| \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of liblinear, linear, poly or rbf. liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library |
| \* | kernel | {"linear", "poly", "rbf"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of linear, poly or rbf. |
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
| \* | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls |
| | max_depth | \<int\> | None | Specifies the maximum depth of the tree |
| \* | tol | \<float\> | 1e-4 | Tolerance for stopping criterion. |
| \* | degree | \<int\> | 3 | Degree of the polynomial kernel function (poly). Ignored by all other kernels. |
| \* | gamma | {"scale", "auto"} or \<float\> | scale | Kernel coefficient for rbf and poly.<br>if gamma='scale' (default) is passed then it uses 1 / (n_features \* X.var()) as value of gamma,<br>if auto, uses 1 / n_features. |
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\*. max_samples is incompatible with 'ovo' multiclass_strategy |
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\* |
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
| | splitter | {"best", "random", "mutual"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **random”**: The algorithm generates 5 candidates and choose one randomly. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label |
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest |
| | splitter | {"best", "random"} | random | The strategy used to choose the feature set at each node (only used if max_features != num_features). <br>Supported strategies are “best” to choose the best feature set and “random” to choose a random combination. <br>The algorithm generates 5 candidates at most to choose from in both strategies. |
\* Hyperparameter used by the support vector classifier of every node
@@ -68,7 +61,3 @@ Once we have the column to take into account for the split, the algorithm splits
```bash
python -m unittest -v stree.tests
```
## License
STree is [MIT](https://github.com/doctorado-ml/stree/blob/master/LICENSE) licensed

View File

@@ -1,8 +1,8 @@
coverage:
overage:
status:
project:
default:
target: 100%
target: 90%
comment:
layout: "reach, diff, flags, files"
behavior: default

View File

@@ -1,20 +0,0 @@
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line, and also
# from the environment for the first two.
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
SOURCEDIR = source
BUILDDIR = build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

View File

@@ -1,4 +0,0 @@
sphinx
sphinx-rtd-theme
myst-parser
git+https://github.com/doctorado-ml/stree

View File

@@ -1,9 +0,0 @@
Siterator
=========
.. automodule:: stree
.. autoclass:: Siterator
:members:
:undoc-members:
:private-members:
:show-inheritance:

View File

@@ -1,9 +0,0 @@
Snode
=====
.. automodule:: stree
.. autoclass:: Snode
:members:
:undoc-members:
:private-members:
:show-inheritance:

View File

@@ -1,9 +0,0 @@
Splitter
========
.. automodule:: stree
.. autoclass:: Splitter
:members:
:undoc-members:
:private-members:
:show-inheritance:

View File

@@ -1,9 +0,0 @@
Stree
=====
.. automodule:: stree
.. autoclass:: Stree
:members:
:undoc-members:
:private-members:
:show-inheritance:

View File

@@ -1,11 +0,0 @@
API index
=========
.. toctree::
:maxdepth: 2
:caption: Contents:
Stree
Splitter
Snode
Siterator

View File

@@ -1,57 +0,0 @@
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import stree
sys.path.insert(0, os.path.abspath("../../stree/"))
# -- Project information -----------------------------------------------------
project = "STree"
copyright = "2020 - 2021, Ricardo Montañana Gómez"
author = "Ricardo Montañana Gómez"
# The full version, including alpha/beta/rc tags
version = stree.__version__
release = version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["myst_parser", "sphinx.ext.autodoc", "sphinx.ext.viewcode"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]

View File

@@ -1,44 +0,0 @@
# Examples
## Notebooks
- [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/Doctorado-ML/STree/master?urlpath=lab/tree/notebooks/benchmark.ipynb) Benchmark
- [![benchmark](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/benchmark.ipynb) Benchmark
- [![features](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/features.ipynb) Some features
- [![Gridsearch](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/gridsearch.ipynb) Gridsearch
- [![Ensemble](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Doctorado-ML/STree/blob/master/notebooks/ensemble.ipynb) Ensembles
## Sample Code
```python
import time
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from stree import Stree
random_state = 1
X, y = load_iris(return_X_y=True)
Xtrain, Xtest, ytrain, ytest = train_test_split(
X, y, test_size=0.2, random_state=random_state
)
now = time.time()
print("Predicting with max_features=sqrt(n_features)")
clf = Stree(random_state=random_state, max_features="auto")
clf.fit(Xtrain, ytrain)
print(f"Took {time.time() - now:.2f} seconds to train")
print(clf)
print(f"Classifier's accuracy (train): {clf.score(Xtrain, ytrain):.4f}")
print(f"Classifier's accuracy (test) : {clf.score(Xtest, ytest):.4f}")
print("=" * 40)
print("Predicting with max_features=n_features")
clf = Stree(random_state=random_state)
clf.fit(Xtrain, ytrain)
print(f"Took {time.time() - now:.2f} seconds to train")
print(clf)
print(f"Classifier's accuracy (train): {clf.score(Xtrain, ytrain):.4f}")
print(f"Classifier's accuracy (test) : {clf.score(Xtest, ytest):.4f}")
```

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.1 MiB

View File

@@ -1,29 +0,0 @@
## Hyperparameters
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
| --- | ------------------- | ------------------------------------------------------ | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
| \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of liblinear, linear, poly or rbf. liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library |
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
| \* | random_state | \<int\> | None | Controls the pseudo random number generation for shuffling the data for probability estimates. Ignored when probability is False.<br>Pass an int for reproducible output across multiple function calls |
| | max_depth | \<int\> | None | Specifies the maximum depth of the tree |
| \* | tol | \<float\> | 1e-4 | Tolerance for stopping criterion. |
| \* | degree | \<int\> | 3 | Degree of the polynomial kernel function (poly). Ignored by all other kernels. |
| \* | gamma | {"scale", "auto"} or \<float\> | scale | Kernel coefficient for rbf and poly.<br>if gamma='scale' (default) is passed then it uses 1 / (n_features \* X.var()) as value of gamma,<br>if auto, uses 1 / n_features. |
| | split_criteria | {"impurity", "max_samples"} | impurity | Decides (just in case of a multi class classification) which column (class) use to split the dataset in a node\*\*. max_samples is incompatible with 'ovo' multiclass_strategy |
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
| | splitter | {"best", "random", "mutual"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **random”**: The algorithm generates 5 candidates and choose one randomly. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label |
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest |
\* Hyperparameter used by the support vector classifier of every node
\*\* **Splitting in a STree node**
The decision function is applied to the dataset and distances from samples to hyperplanes are computed in a matrix. This matrix has as many columns as classes the samples belongs to (if more than two, i.e. multiclass classification) or 1 column if it's a binary class dataset. In binary classification only one hyperplane is computed and therefore only one column is needed to store the distances of the samples to it. If three or more classes are present in the dataset we need as many hyperplanes as classes are there, and therefore one column per hyperplane is needed.
In case of multiclass classification we have to decide which column take into account to make the split, that depends on hyperparameter _split_criteria_, if "impurity" is chosen then STree computes information gain of every split candidate using each column and chooses the one that maximize the information gain, otherwise STree choses the column with more samples with a predicted class (the column with more positive numbers in it).
Once we have the column to take into account for the split, the algorithm splits samples with positive distances to hyperplane from the rest.

View File

@@ -1,15 +0,0 @@
Welcome to STree's documentation!
=================================
.. toctree::
:caption: Contents:
:titlesonly:
stree
install
hyperparameters
example
api/index
* :ref:`genindex`

View File

@@ -1,16 +0,0 @@
Install
=======
The main stable release
``pip install stree``
or the last development branch
``pip install git+https://github.com/doctorado-ml/stree``
Tests
*****
``python -m unittest -v stree.tests``

View File

@@ -1,14 +0,0 @@
# STree
[![Codeship Status for Doctorado-ML/STree](https://app.codeship.com/projects/8b2bd350-8a1b-0138-5f2c-3ad36f3eb318/status?branch=master)](https://app.codeship.com/projects/399170)
[![codecov](https://codecov.io/gh/doctorado-ml/stree/branch/master/graph/badge.svg)](https://codecov.io/gh/doctorado-ml/stree)
[![Codacy Badge](https://app.codacy.com/project/badge/Grade/35fa3dfd53a24a339344b33d9f9f2f3d)](https://www.codacy.com/gh/Doctorado-ML/STree?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/STree&utm_campaign=Badge_Grade)
[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/Doctorado-ML/STree.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Doctorado-ML/STree/context:python)
Oblique Tree classifier based on SVM nodes. The nodes are built and splitted with sklearn SVC models. Stree is a sklearn estimator and can be integrated in pipelines, grid searches, etc.
![Stree](./example.png)
## License
STree is [MIT](https://github.com/doctorado-ml/stree/blob/master/LICENSE) licensed

29
main.py Normal file
View File

@@ -0,0 +1,29 @@
import time
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from stree import Stree
random_state = 1
X, y = load_iris(return_X_y=True)
Xtrain, Xtest, ytrain, ytest = train_test_split(
X, y, test_size=0.3, random_state=random_state
)
now = time.time()
print("Predicting with max_features=sqrt(n_features)")
clf = Stree(C=0.01, random_state=random_state, max_features="auto")
clf.fit(Xtrain, ytrain)
print(f"Took {time.time() - now:.2f} seconds to train")
print(clf)
print(f"Classifier's accuracy (train): {clf.score(Xtrain, ytrain):.4f}")
print(f"Classifier's accuracy (test) : {clf.score(Xtest, ytest):.4f}")
print("=" * 40)
print("Predicting with max_features=n_features")
clf = Stree(C=0.01, random_state=random_state)
clf.fit(Xtrain, ytrain)
print(f"Took {time.time() - now:.2f} seconds to train")
print(clf)
print(f"Classifier's accuracy (train): {clf.score(Xtrain, ytrain):.4f}")
print(f"Classifier's accuracy (test) : {clf.score(Xtest, ytest):.4f}")

View File

@@ -1,50 +1,36 @@
import setuptools
__version__ = "1.0rc1"
__author__ = "Ricardo Montañana Gómez"
def readme():
with open("README.md") as f:
return f.read()
def get_data(field):
item = ""
with open("stree/__init__.py") as f:
for line in f.readlines():
if line.startswith(f"__{field}__"):
delim = '"' if '"' in line else "'"
item = line.split(delim)[1]
break
else:
raise RuntimeError(f"Unable to find {field} string.")
return item
setuptools.setup(
name="STree",
version=get_data("version"),
license=get_data("license"),
version=__version__,
license="MIT License",
description="Oblique decision tree with svm nodes",
long_description=readme(),
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
url="https://github.com/Doctorado-ML/STree#stree",
project_urls={
"Code": "https://github.com/Doctorado-ML/STree",
"Documentation": "https://stree.readthedocs.io/en/latest/index.html",
},
author=get_data("author"),
author_email=get_data("author_email"),
url="https://github.com/doctorado-ml/stree",
author=__author__,
author_email="ricardo.montanana@alu.uclm.es",
keywords="scikit-learn oblique-classifier oblique-decision-tree decision-\
tree svm svc",
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: " + get_data("license"),
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.8",
"Natural Language :: English",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Intended Audience :: Science/Research",
],
install_requires=["scikit-learn", "numpy"],
install_requires=["scikit-learn", "numpy", "ipympl"],
test_suite="stree.tests",
zip_safe=False,
)

View File

@@ -1,10 +0,0 @@
version: 2
sphinx:
configuration: docs/source/conf.py
python:
version: 3.8
install:
- requirements: requirements.txt
- requirements: docs/requirements.txt

View File

@@ -1,5 +1,9 @@
"""
Oblique decision tree classifier based on SVM nodes
__author__ = "Ricardo Montañana Gómez"
__copyright__ = "Copyright 2020, Ricardo Montañana Gómez"
__license__ = "MIT"
__version__ = "0.9"
Build an oblique tree classifier based on SVM nodes
"""
import os
@@ -11,8 +15,9 @@ from typing import Optional
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.svm import SVC, LinearSVC
from sklearn.feature_selection import SelectKBest, mutual_info_classif
from sklearn.feature_selection import SelectKBest
from sklearn.preprocessing import StandardScaler
from sklearn.utils import check_consistent_length
from sklearn.utils.multiclass import check_classification_targets
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.validation import (
@@ -21,6 +26,7 @@ from sklearn.utils.validation import (
check_is_fitted,
_check_sample_weight,
)
from sklearn.metrics._classification import _weighted_sum, _check_targets
class Snode:
@@ -141,6 +147,7 @@ class Snode:
f"{self._belief: .6f} impurity={self._impurity:.4f} "
f"counts={count_values}"
)
else:
return (
f"{self._title} feaures={self._features} impurity="
f"{self._impurity:.4f} "
@@ -155,10 +162,6 @@ class Siterator:
self._stack = []
self._push(tree)
def __iter__(self):
# To complete the iterator interface
return self
def _push(self, node: Snode):
if node is not None:
self._stack.append(node)
@@ -209,9 +212,9 @@ class Splitter:
f"criteria has to be max_samples or impurity; got ({criteria})"
)
if feature_select not in ["random", "best", "mutual"]:
if feature_select not in ["random", "best"]:
raise ValueError(
"splitter must be in {random, best, mutual} got "
"splitter must be either random or best, got "
f"({feature_select})"
)
self.criterion_function = getattr(self, f"_{self._criterion}")
@@ -295,23 +298,6 @@ class Splitter:
def _select_best_set(
self, dataset: np.array, labels: np.array, features_sets: list
) -> list:
"""Return the best set of features among feature_sets, the criterion is
the information gain
Parameters
----------
dataset : np.array
array of samples (# samples, # features)
labels : np.array
array of labels
features_sets : list
list of features sets to check
Returns
-------
list
best feature set
"""
max_gain = 0
selected = None
warnings.filterwarnings("ignore", category=ConvergenceWarning)
@@ -377,29 +363,20 @@ class Splitter:
tuple
indices of the features selected
"""
# No feature reduction
if dataset.shape[1] == max_features:
# No feature reduction applies
return tuple(range(dataset.shape[1]))
# Random feature reduction
if self._feature_select == "random":
features_sets = self._generate_spaces(
dataset.shape[1], max_features
)
return self._select_best_set(dataset, labels, features_sets)
# return the KBest features
if self._feature_select == "best":
# Take KBest features
return (
SelectKBest(k=max_features)
.fit(dataset, labels)
.get_support(indices=True)
)
# return best features with mutual info with the label
feature_list = mutual_info_classif(dataset, labels)
return tuple(
sorted(
range(len(feature_list)), key=lambda sub: feature_list[sub]
)[-max_features:]
)
def get_subspace(
self, dataset: np.array, labels: np.array, max_features: int
@@ -474,15 +451,6 @@ class Splitter:
def partition(self, samples: np.array, node: Snode, train: bool):
"""Set the criteria to split arrays. Compute the indices of the samples
that should go to one side of the tree (up)
Parameters
----------
samples : np.array
array of samples (# samples, # features)
node : Snode
Node of the tree where partition is going to be made
train : bool
Train time - True / Test time - False
"""
# data contains the distances of every sample to every class hyperplane
# array of (m, nc) nc = # classes
@@ -574,7 +542,6 @@ class Stree(BaseEstimator, ClassifierMixin):
min_samples_split: int = 0,
max_features=None,
splitter: str = "random",
multiclass_strategy: str = "ovo",
normalize: bool = False,
):
self.max_iter = max_iter
@@ -591,7 +558,6 @@ class Stree(BaseEstimator, ClassifierMixin):
self.criterion = criterion
self.splitter = splitter
self.normalize = normalize
self.multiclass_strategy = multiclass_strategy
def _more_tags(self) -> dict:
"""Required by sklearn to supply features of the classifier
@@ -636,25 +602,7 @@ class Stree(BaseEstimator, ClassifierMixin):
f"Maximum depth has to be greater than 1... got (max_depth=\
{self.max_depth})"
)
if self.multiclass_strategy not in ["ovr", "ovo"]:
raise ValueError(
"mutliclass_strategy has to be either ovr or ovo"
f" but got {self.multiclass_strategy}"
)
if self.multiclass_strategy == "ovo":
if self.kernel == "liblinear":
raise ValueError(
"The kernel liblinear is incompatible with ovo "
"multiclass_strategy"
)
if self.split_criteria == "max_samples":
raise ValueError(
"The multiclass_strategy 'ovo' is incompatible with "
"split_criteria 'max_samples'"
)
kernels = ["liblinear", "linear", "rbf", "poly", "sigmoid"]
if self.kernel not in kernels:
raise ValueError(f"Kernel {self.kernel} not in {kernels}")
check_classification_targets(y)
X, y = check_X_y(X, y)
sample_weight = _check_sample_weight(
@@ -684,12 +632,13 @@ class Stree(BaseEstimator, ClassifierMixin):
self.n_features_ = X.shape[1]
self.n_features_in_ = X.shape[1]
self.max_features_ = self._initialize_max_features()
self.tree_ = self._train(X, y, sample_weight, 1, "root")
self.tree_ = self.train(X, y, sample_weight, 1, "root")
self._build_predictor()
self.X_ = X
self.y_ = y
return self
def _train(
def train(
self,
X: np.ndarray,
y: np.ndarray,
@@ -732,7 +681,6 @@ class Stree(BaseEstimator, ClassifierMixin):
if np.unique(y).shape[0] == 1:
# only 1 class => pure dataset
node.set_title(title + ", <pure>")
node.make_predictor()
return node
# Train the model
clf = self._build_clf()
@@ -751,20 +699,31 @@ class Stree(BaseEstimator, ClassifierMixin):
if X_U is None or X_D is None:
# didn't part anything
node.set_title(title + ", <cgaf>")
node.make_predictor()
return node
node.set_up(
self._train(X_U, y_u, sw_u, depth + 1, title + f" - Up({depth+1})")
self.train(X_U, y_u, sw_u, depth + 1, title + f" - Up({depth+1})")
)
node.set_down(
self._train(
self.train(
X_D, y_d, sw_d, depth + 1, title + f" - Down({depth+1})"
)
)
return node
def _build_predictor(self):
"""Process the leaves to make them predictors"""
def run_tree(node: Snode):
if node.is_leaf():
node.make_predictor()
return
run_tree(node.get_down())
run_tree(node.get_up())
run_tree(self.tree_)
def _build_clf(self):
"""Build the right classifier for the node"""
"""Build the correct classifier for the node"""
return (
LinearSVC(
max_iter=self.max_iter,
@@ -772,7 +731,7 @@ class Stree(BaseEstimator, ClassifierMixin):
C=self.C,
tol=self.tol,
)
if self.kernel == "liblinear"
if self.kernel == "linear"
else SVC(
kernel=self.kernel,
max_iter=self.max_iter,
@@ -780,8 +739,6 @@ class Stree(BaseEstimator, ClassifierMixin):
C=self.C,
gamma=self.gamma,
degree=self.degree,
random_state=self.random_state,
decision_function_shape=self.multiclass_strategy,
)
)
@@ -863,6 +820,36 @@ class Stree(BaseEstimator, ClassifierMixin):
)
return self.classes_[result]
def score(
self, X: np.array, y: np.array, sample_weight: np.array = None
) -> float:
"""Compute accuracy of the prediction
Parameters
----------
X : np.array
dataset of samples to make predictions
y : np.array
samples labels
sample_weight : np.array, optional
weights of the samples. Rescale C per sample, by default None
Returns
-------
float
accuracy of the prediction
"""
# sklearn check
check_is_fitted(self)
check_classification_targets(y)
X, y = check_X_y(X, y)
y_pred = self.predict(X).reshape(y.shape)
# Compute accuracy for each possible representation
_, y_true, y_pred = _check_targets(y, y_pred)
check_consistent_length(y_true, y_pred, sample_weight)
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize=True)
def nodes_leaves(self) -> tuple:
"""Compute the number of nodes and leaves in the built tree
@@ -924,12 +911,6 @@ class Stree(BaseEstimator, ClassifierMixin):
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, numbers.Integral):
if self.max_features > self.n_features_:
raise ValueError(
"Invalid value for max_features. "
"It can not be greater than number of features "
f"({self.n_features_})"
)
max_features = self.max_features
else: # float
if self.max_features > 0.0:

View File

@@ -1,10 +1,3 @@
from .Strees import Stree, Snode, Siterator, Splitter
__version__ = "1.1"
__author__ = "Ricardo Montañana Gómez"
__copyright__ = "Copyright 2020-2021, Ricardo Montañana Gómez"
__license__ = "MIT License"
__author_email__ = "ricardo.montanana@alu.uclm.es"
__all__ = ["Stree", "Snode", "Siterator", "Splitter"]

View File

@@ -8,11 +8,7 @@ from .utils import load_dataset
class Snode_test(unittest.TestCase):
def __init__(self, *args, **kwargs):
self._random_state = 1
self._clf = Stree(
random_state=self._random_state,
kernel="liblinear",
multiclass_strategy="ovr",
)
self._clf = Stree(random_state=self._random_state)
self._clf.fit(*load_dataset(self._random_state))
super().__init__(*args, **kwargs)

View File

@@ -195,14 +195,10 @@ class Splitter_test(unittest.TestCase):
[0, 3, 7, 12], # random entropy impurity
[1, 7, 9, 12], # random gini max_samples
[1, 5, 8, 12], # random gini impurity
[6, 9, 11, 12], # mutual entropy max_samples
[6, 9, 11, 12], # mutual entropy impurity
[6, 9, 11, 12], # mutual gini max_samples
[6, 9, 11, 12], # mutual gini impurity
]
X, y = load_wine(return_X_y=True)
rn = 0
for feature_select in ["best", "random", "mutual"]:
for feature_select in ["best", "random"]:
for criterion in ["entropy", "gini"]:
for criteria in [
"max_samples",
@@ -225,7 +221,7 @@ class Splitter_test(unittest.TestCase):
# criteria,
# )
# )
self.assertListEqual(expected, sorted(list(computed)))
self.assertListEqual(expected, list(computed))
self.assertListEqual(
X[:, computed].tolist(), dataset.tolist()
)

View File

@@ -14,27 +14,13 @@ from .utils import load_dataset
class Stree_test(unittest.TestCase):
def __init__(self, *args, **kwargs):
self._random_state = 1
self._kernels = ["liblinear", "linear", "rbf", "poly", "sigmoid"]
self._kernels = ["linear", "rbf", "poly"]
super().__init__(*args, **kwargs)
@classmethod
def setUp(cls):
os.environ["TESTING"] = "1"
def test_valid_kernels(self):
X, y = load_dataset()
for kernel in self._kernels:
clf = Stree(kernel=kernel, multiclass_strategy="ovr")
clf.fit(X, y)
self.assertIsNotNone(clf.tree_)
def test_bogus_kernel(self):
kernel = "other"
X, y = load_dataset()
clf = Stree(kernel=kernel)
with self.assertRaises(ValueError):
clf.fit(X, y)
def _check_tree(self, node: Snode):
"""Check recursively that the nodes that are not leaves have the
correct number of labels and its sons have the right number of elements
@@ -54,19 +40,14 @@ class Stree_test(unittest.TestCase):
# i.e. The partition algorithm didn't forget any sample
self.assertEqual(node._y.shape[0], y_down.shape[0] + y_up.shape[0])
unique_y, count_y = np.unique(node._y, return_counts=True)
labels_d, count_d = np.unique(y_down, return_counts=True)
labels_u, count_u = np.unique(y_up, return_counts=True)
dict_d = {label: count_d[i] for i, label in enumerate(labels_d)}
dict_u = {label: count_u[i] for i, label in enumerate(labels_u)}
_, count_d = np.unique(y_down, return_counts=True)
_, count_u = np.unique(y_up, return_counts=True)
#
for i in unique_y:
number_up = count_u[i]
try:
number_up = dict_u[i]
except KeyError:
number_up = 0
try:
number_down = dict_d[i]
except KeyError:
number_down = count_d[i]
except IndexError:
number_down = 0
self.assertEqual(count_y[i], number_down + number_up)
# Is the partition made the same as the prediction?
@@ -81,22 +62,14 @@ class Stree_test(unittest.TestCase):
"""Check if the tree is built the same way as predictions of models"""
warnings.filterwarnings("ignore")
for kernel in self._kernels:
clf = Stree(
kernel="sigmoid",
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
random_state=self._random_state,
)
clf = Stree(kernel=kernel, random_state=self._random_state)
clf.fit(*load_dataset(self._random_state))
self._check_tree(clf.tree_)
def test_single_prediction(self):
X, y = load_dataset(self._random_state)
for kernel in self._kernels:
clf = Stree(
kernel=kernel,
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
random_state=self._random_state,
)
clf = Stree(kernel=kernel, random_state=self._random_state)
yp = clf.fit(X, y).predict((X[0, :].reshape(-1, X.shape[1])))
self.assertEqual(yp[0], y[0])
@@ -104,12 +77,8 @@ class Stree_test(unittest.TestCase):
# First 27 elements the predictions are the same as the truth
num = 27
X, y = load_dataset(self._random_state)
for kernel in ["liblinear", "linear", "rbf", "poly"]:
clf = Stree(
kernel=kernel,
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
random_state=self._random_state,
)
for kernel in self._kernels:
clf = Stree(kernel=kernel, random_state=self._random_state)
yp = clf.fit(X, y).predict(X[:num, :])
self.assertListEqual(y[:num].tolist(), yp.tolist())
@@ -119,11 +88,7 @@ class Stree_test(unittest.TestCase):
"""
X, y = load_dataset(self._random_state)
for kernel in self._kernels:
clf = Stree(
kernel=kernel,
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
random_state=self._random_state,
)
clf = Stree(kernel=kernel, random_state=self._random_state)
clf.fit(X, y)
# Compute prediction line by line
yp_line = np.array([], dtype=int)
@@ -155,13 +120,9 @@ class Stree_test(unittest.TestCase):
]
computed = []
expected_string = ""
clf = Stree(
kernel="liblinear",
multiclass_strategy="ovr",
random_state=self._random_state,
)
clf = Stree(kernel="linear", random_state=self._random_state)
clf.fit(*load_dataset(self._random_state))
for node in iter(clf):
for node in clf:
computed.append(str(node))
expected_string += str(node) + "\n"
self.assertListEqual(expected, computed)
@@ -197,12 +158,7 @@ class Stree_test(unittest.TestCase):
def test_check_max_depth(self):
depths = (3, 4)
for depth in depths:
tcl = Stree(
kernel="liblinear",
multiclass_strategy="ovr",
random_state=self._random_state,
max_depth=depth,
)
tcl = Stree(random_state=self._random_state, max_depth=depth)
tcl.fit(*load_dataset(self._random_state))
self.assertEqual(depth, tcl.depth_)
@@ -223,7 +179,7 @@ class Stree_test(unittest.TestCase):
for kernel in self._kernels:
clf = Stree(
kernel=kernel,
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
split_criteria="max_samples",
random_state=self._random_state,
)
px = [[1, 2], [5, 6], [9, 10]]
@@ -234,36 +190,26 @@ class Stree_test(unittest.TestCase):
self.assertListEqual(py, clf.classes_.tolist())
def test_muticlass_dataset(self):
warnings.filterwarnings("ignore", category=ConvergenceWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
datasets = {
"Synt": load_dataset(random_state=self._random_state, n_classes=3),
"Iris": load_wine(return_X_y=True),
}
outcomes = {
"Synt": {
"max_samples liblinear": 0.9493333333333334,
"max_samples linear": 0.9426666666666667,
"max_samples rbf": 0.9606666666666667,
"max_samples poly": 0.9373333333333334,
"max_samples sigmoid": 0.824,
"impurity liblinear": 0.9493333333333334,
"impurity linear": 0.9426666666666667,
"impurity rbf": 0.9606666666666667,
"impurity poly": 0.9373333333333334,
"impurity sigmoid": 0.824,
"max_samples linear": 0.9606666666666667,
"max_samples rbf": 0.7133333333333334,
"max_samples poly": 0.618,
"impurity linear": 0.9606666666666667,
"impurity rbf": 0.7133333333333334,
"impurity poly": 0.618,
},
"Iris": {
"max_samples liblinear": 0.9550561797752809,
"max_samples linear": 1.0,
"max_samples rbf": 0.6685393258426966,
"max_samples poly": 0.6853932584269663,
"max_samples sigmoid": 0.6404494382022472,
"impurity liblinear": 0.9550561797752809,
"impurity linear": 1.0,
"impurity rbf": 0.6685393258426966,
"impurity poly": 0.6853932584269663,
"impurity sigmoid": 0.6404494382022472,
"max_samples rbf": 0.6910112359550562,
"max_samples poly": 0.6966292134831461,
"impurity linear": 1,
"impurity rbf": 0.6910112359550562,
"impurity poly": 0.6966292134831461,
},
}
@@ -272,22 +218,18 @@ class Stree_test(unittest.TestCase):
for criteria in ["max_samples", "impurity"]:
for kernel in self._kernels:
clf = Stree(
max_iter=1e4,
multiclass_strategy="ovr"
if kernel == "liblinear"
else "ovo",
C=55,
max_iter=1e5,
kernel=kernel,
random_state=self._random_state,
)
clf.fit(px, py)
outcome = outcomes[name][f"{criteria} {kernel}"]
# print(f'"{criteria} {kernel}": {clf.score(px, py)},')
self.assertAlmostEqual(
outcome,
clf.score(px, py),
5,
f"{name} - {criteria} - {kernel}",
)
# print(
# f"{name} {criteria} {kernel} {outcome} {clf.score(px"
# ", py)}"
# )
self.assertAlmostEqual(outcome, clf.score(px, py))
def test_max_features(self):
n_features = 16
@@ -312,12 +254,6 @@ class Stree_test(unittest.TestCase):
with self.assertRaises(ValueError):
_ = clf._initialize_max_features()
def test_wrong_max_features(self):
X, y = load_dataset(n_features=15)
clf = Stree(max_features=16)
with self.assertRaises(ValueError):
clf.fit(X, y)
def test_get_subspaces(self):
dataset = np.random.random((10, 16))
y = np.random.randint(0, 2, 10)
@@ -355,19 +291,17 @@ class Stree_test(unittest.TestCase):
clf.predict(X[:, :3])
# Tests of score
def test_score_binary(self):
X, y = load_dataset(self._random_state)
accuracies = [
0.9506666666666667,
0.9493333333333334,
0.9606666666666667,
0.9433333333333334,
0.9153333333333333,
]
for kernel, accuracy_expected in zip(self._kernels, accuracies):
clf = Stree(
random_state=self._random_state,
multiclass_strategy="ovr" if kernel == "liblinear" else "ovo",
kernel=kernel,
)
clf.fit(X, y)
@@ -379,12 +313,7 @@ class Stree_test(unittest.TestCase):
def test_score_max_features(self):
X, y = load_dataset(self._random_state)
clf = Stree(
kernel="liblinear",
multiclass_strategy="ovr",
random_state=self._random_state,
max_features=2,
)
clf = Stree(random_state=self._random_state, max_features=2)
clf.fit(X, y)
self.assertAlmostEqual(0.9453333333333334, clf.score(X, y))
@@ -396,9 +325,7 @@ class Stree_test(unittest.TestCase):
def test_multiclass_classifier_integrity(self):
"""Checks if the multiclass operation is done right"""
X, y = load_iris(return_X_y=True)
clf = Stree(
kernel="liblinear", multiclass_strategy="ovr", random_state=0
)
clf = Stree(random_state=0)
clf.fit(X, y)
score = clf.score(X, y)
# Check accuracy of the whole model
@@ -454,10 +381,10 @@ class Stree_test(unittest.TestCase):
clf2 = Stree(
kernel="rbf", random_state=self._random_state, normalize=True
)
self.assertEqual(0.966, clf.fit(X, y).score(X, y))
self.assertEqual(0.964, clf2.fit(X, y).score(X, y))
self.assertEqual(0.768, clf.fit(X, y).score(X, y))
self.assertEqual(0.814, clf2.fit(X, y).score(X, y))
X, y = load_wine(return_X_y=True)
self.assertEqual(0.6685393258426966, clf.fit(X, y).score(X, y))
self.assertEqual(0.6741573033707865, clf.fit(X, y).score(X, y))
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
def test_score_multiclass_poly(self):
@@ -475,78 +402,24 @@ class Stree_test(unittest.TestCase):
random_state=self._random_state,
normalize=True,
)
self.assertEqual(0.946, clf.fit(X, y).score(X, y))
self.assertEqual(0.972, clf2.fit(X, y).score(X, y))
self.assertEqual(0.786, clf.fit(X, y).score(X, y))
self.assertEqual(0.818, clf2.fit(X, y).score(X, y))
X, y = load_wine(return_X_y=True)
self.assertEqual(0.7808988764044944, clf.fit(X, y).score(X, y))
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
def test_score_multiclass_liblinear(self):
X, y = load_dataset(
random_state=self._random_state,
n_classes=3,
n_features=5,
n_samples=500,
)
clf = Stree(
kernel="liblinear",
multiclass_strategy="ovr",
random_state=self._random_state,
C=10,
)
clf2 = Stree(
kernel="liblinear",
multiclass_strategy="ovr",
random_state=self._random_state,
normalize=True,
)
self.assertEqual(0.968, clf.fit(X, y).score(X, y))
self.assertEqual(0.97, clf2.fit(X, y).score(X, y))
X, y = load_wine(return_X_y=True)
self.assertEqual(1.0, clf.fit(X, y).score(X, y))
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
def test_score_multiclass_sigmoid(self):
X, y = load_dataset(
random_state=self._random_state,
n_classes=3,
n_features=5,
n_samples=500,
)
clf = Stree(kernel="sigmoid", random_state=self._random_state, C=10)
clf2 = Stree(
kernel="sigmoid",
random_state=self._random_state,
normalize=True,
C=10,
)
self.assertEqual(0.796, clf.fit(X, y).score(X, y))
self.assertEqual(0.952, clf2.fit(X, y).score(X, y))
X, y = load_wine(return_X_y=True)
self.assertEqual(0.6910112359550562, clf.fit(X, y).score(X, y))
self.assertEqual(0.9662921348314607, clf2.fit(X, y).score(X, y))
self.assertEqual(0.702247191011236, clf.fit(X, y).score(X, y))
self.assertEqual(0.6067415730337079, clf2.fit(X, y).score(X, y))
def test_score_multiclass_linear(self):
warnings.filterwarnings("ignore", category=ConvergenceWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
X, y = load_dataset(
random_state=self._random_state,
n_classes=3,
n_features=5,
n_samples=1500,
)
clf = Stree(
kernel="liblinear",
multiclass_strategy="ovr",
random_state=self._random_state,
)
clf = Stree(kernel="linear", random_state=self._random_state)
self.assertEqual(0.9533333333333334, clf.fit(X, y).score(X, y))
# Check with context based standardization
clf2 = Stree(
kernel="liblinear",
multiclass_strategy="ovr",
random_state=self._random_state,
normalize=True,
kernel="linear", random_state=self._random_state, normalize=True
)
self.assertEqual(0.9526666666666667, clf2.fit(X, y).score(X, y))
X, y = load_wine(return_X_y=True)
@@ -573,7 +446,7 @@ class Stree_test(unittest.TestCase):
]
)
y = np.array([1, 1, 1, 2, 2, 2, 5, 5, 5])
yw = np.array([1, 1, 1, 1, 1, 1, 5, 5, 5])
yw = np.array([1, 1, 1, 5, 5, 5, 5, 5, 5])
w = [1, 1, 1, 0, 0, 0, 1, 1, 1]
model1 = Stree().fit(X, y)
model2 = Stree().fit(X, y, w)
@@ -610,14 +483,14 @@ class Stree_test(unittest.TestCase):
clf = Stree(random_state=self._random_state)
clf.fit(X, y)
nodes, leaves = clf.nodes_leaves()
self.assertEqual(31, nodes)
self.assertEqual(16, leaves)
self.assertEqual(25, nodes)
self.assertEquals(13, leaves)
X, y = load_wine(return_X_y=True)
clf = Stree(random_state=self._random_state)
clf.fit(X, y)
nodes, leaves = clf.nodes_leaves()
self.assertEqual(11, nodes)
self.assertEqual(6, leaves)
self.assertEqual(9, nodes)
self.assertEquals(5, leaves)
def test_nodes_leaves_artificial(self):
n1 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test1")
@@ -636,27 +509,3 @@ class Stree_test(unittest.TestCase):
nodes, leaves = clf.nodes_leaves()
self.assertEqual(6, nodes)
self.assertEqual(2, leaves)
def test_bogus_multiclass_strategy(self):
clf = Stree(multiclass_strategy="other")
X, y = load_wine(return_X_y=True)
with self.assertRaises(ValueError):
clf.fit(X, y)
def test_multiclass_strategy(self):
X, y = load_wine(return_X_y=True)
clf_o = Stree(multiclass_strategy="ovo")
clf_r = Stree(multiclass_strategy="ovr")
score_o = clf_o.fit(X, y).score(X, y)
score_r = clf_r.fit(X, y).score(X, y)
self.assertEqual(1.0, score_o)
self.assertEqual(0.9269662921348315, score_r)
def test_incompatible_hyperparameters(self):
X, y = load_wine(return_X_y=True)
clf = Stree(kernel="liblinear", multiclass_strategy="ovo")
with self.assertRaises(ValueError):
clf.fit(X, y)
clf = Stree(multiclass_strategy="ovo", split_criteria="max_samples")
with self.assertRaises(ValueError):
clf.fit(X, y)