Compare commits

..

1 Commits

Author SHA1 Message Date
7a625eee09 Change entropy function with scipy (#38) 2021-11-01 18:41:15 +01:00
12 changed files with 49 additions and 224 deletions

View File

@@ -12,7 +12,7 @@ jobs:
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [macos-latest, ubuntu-latest, windows-latest] os: [macos-latest, ubuntu-latest]
python: [3.8] python: [3.8]
steps: steps:

View File

@@ -1,37 +0,0 @@
cff-version: 1.2.0
message: "If you use this software, please cite it as below."
authors:
- family-names: "Montañana"
given-names: "Ricardo"
orcid: "https://orcid.org/0000-0003-3242-5452"
- family-names: "Gámez"
given-names: "José A."
orcid: "https://orcid.org/0000-0003-1188-1117"
- family-names: "Puerta"
given-names: "José M."
orcid: "https://orcid.org/0000-0002-9164-5191"
title: "STree"
version: 1.2.3
doi: 10.5281/zenodo.5504083
date-released: 2021-11-02
url: "https://github.com/Doctorado-ML/STree"
preferred-citation:
type: article
authors:
- family-names: "Montañana"
given-names: "Ricardo"
orcid: "https://orcid.org/0000-0003-3242-5452"
- family-names: "Gámez"
given-names: "José A."
orcid: "https://orcid.org/0000-0003-1188-1117"
- family-names: "Puerta"
given-names: "José M."
orcid: "https://orcid.org/0000-0002-9164-5191"
doi: "10.1007/978-3-030-85713-4_6"
journal: "Lecture Notes in Computer Science"
month: 9
start: 54
end: 64
title: "STree: A Single Multi-class Oblique Decision Tree Based on Support Vector Machines"
volume: 12882
year: 2021

View File

@@ -10,9 +10,6 @@ coverage: ## Run tests with coverage
deps: ## Install dependencies deps: ## Install dependencies
pip install -r requirements.txt pip install -r requirements.txt
devdeps: ## Install development dependencies
pip install black pip-audit flake8 mypy coverage
lint: ## Lint and static-check lint: ## Lint and static-check
black stree black stree
flake8 stree flake8 stree
@@ -35,9 +32,6 @@ build: ## Build package
doc-clean: ## Update documentation doc-clean: ## Update documentation
make -C docs --makefile=Makefile clean make -C docs --makefile=Makefile clean
audit: ## Audit pip
pip-audit
help: ## Show help message help: ## Show help message
@IFS=$$'\n' ; \ @IFS=$$'\n' ; \
help_lines=(`fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##/:/'`); \ help_lines=(`fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##/:/'`); \

View File

@@ -37,7 +37,7 @@ Can be found in [stree.readthedocs.io](https://stree.readthedocs.io/en/stable/)
## Hyperparameters ## Hyperparameters
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** | | | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
| --- | ------------------- | -------------------------------------------------------------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | --- | ------------------- | ------------------------------------------------------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. | | \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
| \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of liblinear, linear, poly or rbf. liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library | | \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of liblinear, linear, poly or rbf. liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library |
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. | | \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
@@ -50,7 +50,7 @@ Can be found in [stree.readthedocs.io](https://stree.readthedocs.io/en/stable/)
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. | | | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any | | | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. | | | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
| | splitter | {"best", "random", "trandom", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them. **trandom”**: The algorithm generates only one random combination. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label. **"cfs"**: Apply Correlation-based Feature Selection. **"fcbf"**: Apply Fast Correlation-Based Filter. **"iwss"**: IWSS based algorithm | | | splitter | {"best", "random", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them. **trandom”**: The algorithm generates a true random combination. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label. **"cfs"**: Apply Correlation-based Feature Selection. **"fcbf"**: Apply Fast Correlation-Based Filter. **"iwss"**: IWSS based algorithm |
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it | | | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest | | \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest |
@@ -73,7 +73,3 @@ python -m unittest -v stree.tests
## License ## License
STree is [MIT](https://github.com/doctorado-ml/stree/blob/master/LICENSE) licensed STree is [MIT](https://github.com/doctorado-ml/stree/blob/master/LICENSE) licensed
## Reference
R. Montañana, J. A. Gámez, J. M. Puerta, "STree: a single multi-class oblique decision tree based on support vector machines.", 2021 LNAI 12882, pg. 54-64

View File

@@ -54,4 +54,4 @@ html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here, # Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files, # relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css". # so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [] html_static_path = ["_static"]

View File

@@ -1,7 +1,7 @@
# Hyperparameters # Hyperparameters
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** | | | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
| --- | ------------------- | -------------------------------------------------------------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | --- | ------------------- | ------------------------------------------------------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. | | \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
| \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of liblinear, linear, poly or rbf. liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library | | \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of liblinear, linear, poly or rbf. liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library |
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. | | \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
@@ -14,7 +14,7 @@
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. | | | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any | | | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. | | | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
| | splitter | {"best", "random", "trandom", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them. **trandom”**: The algorithm generates only one random combination. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label. **"cfs"**: Apply Correlation-based Feature Selection. **"fcbf"**: Apply Fast Correlation-Based Filter. **"iwss"**: IWSS based algorithm | | | splitter | {"best", "random", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them. **trandom”**: The algorithm generates a true random combination. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label. **"cfs"**: Apply Correlation-based Feature Selection. **"fcbf"**: Apply Fast Correlation-Based Filter. **"iwss"**: IWSS based algorithm |
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it | | | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest | | \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest |

View File

@@ -1,5 +1,4 @@
import setuptools import setuptools
import os
def readme(): def readme():
@@ -9,8 +8,7 @@ def readme():
def get_data(field): def get_data(field):
item = "" item = ""
file_name = "_version.py" if field == "version" else "__init__.py" with open("stree/__init__.py") as f:
with open(os.path.join("stree", file_name)) as f:
for line in f.readlines(): for line in f.readlines():
if line.startswith(f"__{field}__"): if line.startswith(f"__{field}__"):
delim = '"' if '"' in line else "'" delim = '"' if '"' in line else "'"

View File

@@ -145,28 +145,6 @@ class Snode:
except IndexError: except IndexError:
self._class = None self._class = None
def graph(self):
"""
Return a string representing the node in graphviz format
"""
output = ""
count_values = np.unique(self._y, return_counts=True)
if self.is_leaf():
output += (
f'N{id(self)} [shape=box style=filled label="'
f"class={self._class} impurity={self._impurity:.3f} "
f'classes={count_values[0]} samples={count_values[1]}"];\n'
)
else:
output += (
f'N{id(self)} [label="#features={len(self._features)} '
f"classes={count_values[0]} samples={count_values[1]} "
f'({sum(count_values[1])})" fontcolor=black];\n'
)
output += f"N{id(self)} -> N{id(self.get_up())} [color=black];\n"
output += f"N{id(self)} -> N{id(self.get_down())} [color=black];\n"
return output
def __str__(self) -> str: def __str__(self) -> str:
count_values = np.unique(self._y, return_counts=True) count_values = np.unique(self._y, return_counts=True)
if self.is_leaf(): if self.is_leaf():
@@ -224,8 +202,7 @@ class Splitter:
max_features < num_features). Supported strategies are: “best”: sklearn max_features < num_features). Supported strategies are: “best”: sklearn
SelectKBest algorithm is used in every node to choose the max_features SelectKBest algorithm is used in every node to choose the max_features
best features. “random”: The algorithm generates 5 candidates and best features. “random”: The algorithm generates 5 candidates and
choose the best (max. info. gain) of them. “trandom”: The algorithm choose the best (max. info. gain) of them. "mutual": Chooses the best
generates only one random combination. "mutual": Chooses the best
features w.r.t. their mutual info with the label. "cfs": Apply features w.r.t. their mutual info with the label. "cfs": Apply
Correlation-based Feature Selection. "fcbf": Apply Fast Correlation- Correlation-based Feature Selection. "fcbf": Apply Fast Correlation-
Based, by default None Based, by default None
@@ -389,8 +366,9 @@ class Splitter:
.get_support(indices=True) .get_support(indices=True)
) )
@staticmethod
def _fs_mutual( def _fs_mutual(
self, dataset: np.array, labels: np.array, max_features: int dataset: np.array, labels: np.array, max_features: int
) -> tuple: ) -> tuple:
"""Return the best features with mutual information with labels """Return the best features with mutual information with labels
@@ -410,9 +388,7 @@ class Splitter:
indices of the features selected indices of the features selected
""" """
# return best features with mutual info with the label # return best features with mutual info with the label
feature_list = mutual_info_classif( feature_list = mutual_info_classif(dataset, labels)
dataset, labels, random_state=self._random_state
)
return tuple( return tuple(
sorted( sorted(
range(len(feature_list)), key=lambda sub: feature_list[sub] range(len(feature_list)), key=lambda sub: feature_list[sub]
@@ -502,18 +478,6 @@ class Splitter:
@staticmethod @staticmethod
def _entropy(y: np.array) -> float: def _entropy(y: np.array) -> float:
"""Compute entropy of a labels set
Parameters
----------
y : np.array
set of labels
Returns
-------
float
entropy
"""
n_labels = len(y) n_labels = len(y)
if n_labels <= 1: if n_labels <= 1:
return 0 return 0
@@ -521,13 +485,10 @@ class Splitter:
proportions = counts / n_labels proportions = counts / n_labels
n_classes = np.count_nonzero(proportions) n_classes = np.count_nonzero(proportions)
if n_classes <= 1: if n_classes <= 1:
return 0 return 0.0
entropy = 0.0 from scipy.stats import entropy
# Compute standard entropy.
for prop in proportions: return entropy(y, base=n_classes)
if prop != 0.0:
entropy -= prop * log(prop, n_classes)
return entropy
def information_gain( def information_gain(
self, labels: np.array, labels_up: np.array, labels_dn: np.array self, labels: np.array, labels_up: np.array, labels_dn: np.array

View File

@@ -17,7 +17,6 @@ from sklearn.utils.validation import (
_check_sample_weight, _check_sample_weight,
) )
from .Splitter import Splitter, Snode, Siterator from .Splitter import Splitter, Snode, Siterator
from ._version import __version__
class Stree(BaseEstimator, ClassifierMixin): class Stree(BaseEstimator, ClassifierMixin):
@@ -83,8 +82,7 @@ class Stree(BaseEstimator, ClassifierMixin):
max_features < num_features). Supported strategies are: “best”: sklearn max_features < num_features). Supported strategies are: “best”: sklearn
SelectKBest algorithm is used in every node to choose the max_features SelectKBest algorithm is used in every node to choose the max_features
best features. “random”: The algorithm generates 5 candidates and best features. “random”: The algorithm generates 5 candidates and
choose the best (max. info. gain) of them. “trandom”: The algorithm choose the best (max. info. gain) of them. "mutual": Chooses the best
generates only one random combination. "mutual": Chooses the best
features w.r.t. their mutual info with the label. "cfs": Apply features w.r.t. their mutual info with the label. "cfs": Apply
Correlation-based Feature Selection. "fcbf": Apply Fast Correlation- Correlation-based Feature Selection. "fcbf": Apply Fast Correlation-
Based , by default "random" Based , by default "random"
@@ -130,7 +128,7 @@ class Stree(BaseEstimator, ClassifierMixin):
References References
---------- ----------
R. Montañana, J. A. Gámez, J. M. Puerta, "STree: a single multi-class R. Montañana, J. A. Gámez, J. M. Puerta, "STree: a single multi-class
oblique decision tree based on support vector machines.", 2021 LNAI 12882 oblique decision tree based on support vector machines.", 2021 LNAI...
""" """
@@ -170,11 +168,6 @@ class Stree(BaseEstimator, ClassifierMixin):
self.normalize = normalize self.normalize = normalize
self.multiclass_strategy = multiclass_strategy self.multiclass_strategy = multiclass_strategy
@staticmethod
def version() -> str:
"""Return the version of the package."""
return __version__
def _more_tags(self) -> dict: def _more_tags(self) -> dict:
"""Required by sklearn to supply features of the classifier """Required by sklearn to supply features of the classifier
make mandatory the labels array make mandatory the labels array
@@ -476,23 +469,6 @@ class Stree(BaseEstimator, ClassifierMixin):
tree = None tree = None
return Siterator(tree) return Siterator(tree)
def graph(self, title="") -> str:
"""Graphviz code representing the tree
Returns
-------
str
graphviz code
"""
output = (
"digraph STree {\nlabel=<STree "
f"{title}>\nfontsize=30\nfontcolor=blue\nlabelloc=t\n"
)
for node in self:
output += node.graph()
output += "}\n"
return output
def __str__(self) -> str: def __str__(self) -> str:
"""String representation of the tree """String representation of the tree

View File

@@ -1,5 +1,7 @@
from .Strees import Stree, Siterator from .Strees import Stree, Siterator
__version__ = "1.2.1"
__author__ = "Ricardo Montañana Gómez" __author__ = "Ricardo Montañana Gómez"
__copyright__ = "Copyright 2020-2021, Ricardo Montañana Gómez" __copyright__ = "Copyright 2020-2021, Ricardo Montañana Gómez"
__license__ = "MIT License" __license__ = "MIT License"

View File

@@ -1 +0,0 @@
__version__ = "1.2.4"

View File

@@ -10,7 +10,6 @@ from sklearn.svm import LinearSVC
from stree import Stree from stree import Stree
from stree.Splitter import Snode from stree.Splitter import Snode
from .utils import load_dataset from .utils import load_dataset
from .._version import __version__
class Stree_test(unittest.TestCase): class Stree_test(unittest.TestCase):
@@ -358,7 +357,6 @@ class Stree_test(unittest.TestCase):
# Tests of score # Tests of score
def test_score_binary(self): def test_score_binary(self):
"""Check score for binary classification."""
X, y = load_dataset(self._random_state) X, y = load_dataset(self._random_state)
accuracies = [ accuracies = [
0.9506666666666667, 0.9506666666666667,
@@ -381,7 +379,6 @@ class Stree_test(unittest.TestCase):
self.assertAlmostEqual(accuracy_expected, accuracy_score) self.assertAlmostEqual(accuracy_expected, accuracy_score)
def test_score_max_features(self): def test_score_max_features(self):
"""Check score using max_features."""
X, y = load_dataset(self._random_state) X, y = load_dataset(self._random_state)
clf = Stree( clf = Stree(
kernel="liblinear", kernel="liblinear",
@@ -393,7 +390,6 @@ class Stree_test(unittest.TestCase):
self.assertAlmostEqual(0.9453333333333334, clf.score(X, y)) self.assertAlmostEqual(0.9453333333333334, clf.score(X, y))
def test_bogus_splitter_parameter(self): def test_bogus_splitter_parameter(self):
"""Check that bogus splitter parameter raises exception."""
clf = Stree(splitter="duck") clf = Stree(splitter="duck")
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
clf.fit(*load_dataset()) clf.fit(*load_dataset())
@@ -449,7 +445,6 @@ class Stree_test(unittest.TestCase):
self.assertListEqual([47], resdn[1].tolist()) self.assertListEqual([47], resdn[1].tolist())
def test_score_multiclass_rbf(self): def test_score_multiclass_rbf(self):
"""Test score for multiclass classification with rbf kernel."""
X, y = load_dataset( X, y = load_dataset(
random_state=self._random_state, random_state=self._random_state,
n_classes=3, n_classes=3,
@@ -467,7 +462,6 @@ class Stree_test(unittest.TestCase):
self.assertEqual(1.0, clf2.fit(X, y).score(X, y)) self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
def test_score_multiclass_poly(self): def test_score_multiclass_poly(self):
"""Test score for multiclass classification with poly kernel."""
X, y = load_dataset( X, y = load_dataset(
random_state=self._random_state, random_state=self._random_state,
n_classes=3, n_classes=3,
@@ -489,7 +483,6 @@ class Stree_test(unittest.TestCase):
self.assertEqual(1.0, clf2.fit(X, y).score(X, y)) self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
def test_score_multiclass_liblinear(self): def test_score_multiclass_liblinear(self):
"""Test score for multiclass classification with liblinear kernel."""
X, y = load_dataset( X, y = load_dataset(
random_state=self._random_state, random_state=self._random_state,
n_classes=3, n_classes=3,
@@ -515,7 +508,6 @@ class Stree_test(unittest.TestCase):
self.assertEqual(1.0, clf2.fit(X, y).score(X, y)) self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
def test_score_multiclass_sigmoid(self): def test_score_multiclass_sigmoid(self):
"""Test score for multiclass classification with sigmoid kernel."""
X, y = load_dataset( X, y = load_dataset(
random_state=self._random_state, random_state=self._random_state,
n_classes=3, n_classes=3,
@@ -536,7 +528,6 @@ class Stree_test(unittest.TestCase):
self.assertEqual(0.9662921348314607, clf2.fit(X, y).score(X, y)) self.assertEqual(0.9662921348314607, clf2.fit(X, y).score(X, y))
def test_score_multiclass_linear(self): def test_score_multiclass_linear(self):
"""Test score for multiclass classification with linear kernel."""
warnings.filterwarnings("ignore", category=ConvergenceWarning) warnings.filterwarnings("ignore", category=ConvergenceWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning) warnings.filterwarnings("ignore", category=RuntimeWarning)
X, y = load_dataset( X, y = load_dataset(
@@ -564,13 +555,11 @@ class Stree_test(unittest.TestCase):
self.assertEqual(1.0, clf2.fit(X, y).score(X, y)) self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
def test_zero_all_sample_weights(self): def test_zero_all_sample_weights(self):
"""Test exception raises when all sample weights are zero."""
X, y = load_dataset(self._random_state) X, y = load_dataset(self._random_state)
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
Stree().fit(X, y, np.zeros(len(y))) Stree().fit(X, y, np.zeros(len(y)))
def test_mask_samples_weighted_zero(self): def test_mask_samples_weighted_zero(self):
"""Check that the weighted zero samples are masked."""
X = np.array( X = np.array(
[ [
[1, 1], [1, 1],
@@ -598,7 +587,6 @@ class Stree_test(unittest.TestCase):
self.assertEqual(model2.score(X, y, w), 1) self.assertEqual(model2.score(X, y, w), 1)
def test_depth(self): def test_depth(self):
"""Check depth of the tree."""
X, y = load_dataset( X, y = load_dataset(
random_state=self._random_state, random_state=self._random_state,
n_classes=3, n_classes=3,
@@ -614,7 +602,6 @@ class Stree_test(unittest.TestCase):
self.assertEqual(4, clf.depth_) self.assertEqual(4, clf.depth_)
def test_nodes_leaves(self): def test_nodes_leaves(self):
"""Check number of nodes and leaves."""
X, y = load_dataset( X, y = load_dataset(
random_state=self._random_state, random_state=self._random_state,
n_classes=3, n_classes=3,
@@ -634,7 +621,6 @@ class Stree_test(unittest.TestCase):
self.assertEqual(6, leaves) self.assertEqual(6, leaves)
def test_nodes_leaves_artificial(self): def test_nodes_leaves_artificial(self):
"""Check leaves of artificial dataset."""
n1 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test1") n1 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test1")
n2 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test2") n2 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test2")
n3 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test3") n3 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test3")
@@ -653,14 +639,12 @@ class Stree_test(unittest.TestCase):
self.assertEqual(2, leaves) self.assertEqual(2, leaves)
def test_bogus_multiclass_strategy(self): def test_bogus_multiclass_strategy(self):
"""Check invalid multiclass strategy."""
clf = Stree(multiclass_strategy="other") clf = Stree(multiclass_strategy="other")
X, y = load_wine(return_X_y=True) X, y = load_wine(return_X_y=True)
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
clf.fit(X, y) clf.fit(X, y)
def test_multiclass_strategy(self): def test_multiclass_strategy(self):
"""Check multiclass strategy."""
X, y = load_wine(return_X_y=True) X, y = load_wine(return_X_y=True)
clf_o = Stree(multiclass_strategy="ovo") clf_o = Stree(multiclass_strategy="ovo")
clf_r = Stree(multiclass_strategy="ovr") clf_r = Stree(multiclass_strategy="ovr")
@@ -670,7 +654,6 @@ class Stree_test(unittest.TestCase):
self.assertEqual(0.9269662921348315, score_r) self.assertEqual(0.9269662921348315, score_r)
def test_incompatible_hyperparameters(self): def test_incompatible_hyperparameters(self):
"""Check incompatible hyperparameters."""
X, y = load_wine(return_X_y=True) X, y = load_wine(return_X_y=True)
clf = Stree(kernel="liblinear", multiclass_strategy="ovo") clf = Stree(kernel="liblinear", multiclass_strategy="ovo")
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
@@ -678,50 +661,3 @@ class Stree_test(unittest.TestCase):
clf = Stree(multiclass_strategy="ovo", split_criteria="max_samples") clf = Stree(multiclass_strategy="ovo", split_criteria="max_samples")
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
clf.fit(X, y) clf.fit(X, y)
def test_version(self):
"""Check STree version."""
clf = Stree()
self.assertEqual(__version__, clf.version())
def test_graph(self):
"""Check graphviz representation of the tree."""
X, y = load_wine(return_X_y=True)
clf = Stree(random_state=self._random_state)
expected_head = (
"digraph STree {\nlabel=<STree >\nfontsize=30\n"
"fontcolor=blue\nlabelloc=t\n"
)
expected_tail = (
' [shape=box style=filled label="class=1 impurity=0.000 '
'classes=[1] samples=[1]"];\n}\n'
)
self.assertEqual(clf.graph(), expected_head + "}\n")
clf.fit(X, y)
computed = clf.graph()
computed_head = computed[: len(expected_head)]
num = -len(expected_tail)
computed_tail = computed[num:]
self.assertEqual(computed_head, expected_head)
self.assertEqual(computed_tail, expected_tail)
def test_graph_title(self):
X, y = load_wine(return_X_y=True)
clf = Stree(random_state=self._random_state)
expected_head = (
"digraph STree {\nlabel=<STree Sample title>\nfontsize=30\n"
"fontcolor=blue\nlabelloc=t\n"
)
expected_tail = (
' [shape=box style=filled label="class=1 impurity=0.000 '
'classes=[1] samples=[1]"];\n}\n'
)
self.assertEqual(clf.graph("Sample title"), expected_head + "}\n")
clf.fit(X, y)
computed = clf.graph("Sample title")
computed_head = computed[: len(expected_head)]
num = -len(expected_tail)
computed_tail = computed[num:]
self.assertEqual(computed_head, expected_head)
self.assertEqual(computed_tail, expected_tail)