mirror of
https://github.com/Doctorado-ML/STree.git
synced 2025-08-17 16:36:01 +00:00
Compare commits
11 Commits
entropy_fu
...
v1.2.4
Author | SHA1 | Date | |
---|---|---|---|
|
93be8a89a8 | ||
82838fa3e0
|
|||
f0b2ce3c7b
|
|||
00ed57c015
|
|||
|
08222f109e | ||
cc931d8547
|
|||
b044a057df
|
|||
fc48bc8ba4
|
|||
|
8251f07674 | ||
|
0b15a5af11 | ||
|
28d905368b |
2
.github/workflows/main.yml
vendored
2
.github/workflows/main.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [macos-latest, ubuntu-latest]
|
||||
os: [macos-latest, ubuntu-latest, windows-latest]
|
||||
python: [3.8]
|
||||
|
||||
steps:
|
||||
|
37
CITATION.cff
Normal file
37
CITATION.cff
Normal file
@@ -0,0 +1,37 @@
|
||||
cff-version: 1.2.0
|
||||
message: "If you use this software, please cite it as below."
|
||||
authors:
|
||||
- family-names: "Montañana"
|
||||
given-names: "Ricardo"
|
||||
orcid: "https://orcid.org/0000-0003-3242-5452"
|
||||
- family-names: "Gámez"
|
||||
given-names: "José A."
|
||||
orcid: "https://orcid.org/0000-0003-1188-1117"
|
||||
- family-names: "Puerta"
|
||||
given-names: "José M."
|
||||
orcid: "https://orcid.org/0000-0002-9164-5191"
|
||||
title: "STree"
|
||||
version: 1.2.3
|
||||
doi: 10.5281/zenodo.5504083
|
||||
date-released: 2021-11-02
|
||||
url: "https://github.com/Doctorado-ML/STree"
|
||||
preferred-citation:
|
||||
type: article
|
||||
authors:
|
||||
- family-names: "Montañana"
|
||||
given-names: "Ricardo"
|
||||
orcid: "https://orcid.org/0000-0003-3242-5452"
|
||||
- family-names: "Gámez"
|
||||
given-names: "José A."
|
||||
orcid: "https://orcid.org/0000-0003-1188-1117"
|
||||
- family-names: "Puerta"
|
||||
given-names: "José M."
|
||||
orcid: "https://orcid.org/0000-0002-9164-5191"
|
||||
doi: "10.1007/978-3-030-85713-4_6"
|
||||
journal: "Lecture Notes in Computer Science"
|
||||
month: 9
|
||||
start: 54
|
||||
end: 64
|
||||
title: "STree: A Single Multi-class Oblique Decision Tree Based on Support Vector Machines"
|
||||
volume: 12882
|
||||
year: 2021
|
6
Makefile
6
Makefile
@@ -10,6 +10,9 @@ coverage: ## Run tests with coverage
|
||||
deps: ## Install dependencies
|
||||
pip install -r requirements.txt
|
||||
|
||||
devdeps: ## Install development dependencies
|
||||
pip install black pip-audit flake8 mypy coverage
|
||||
|
||||
lint: ## Lint and static-check
|
||||
black stree
|
||||
flake8 stree
|
||||
@@ -32,6 +35,9 @@ build: ## Build package
|
||||
doc-clean: ## Update documentation
|
||||
make -C docs --makefile=Makefile clean
|
||||
|
||||
audit: ## Audit pip
|
||||
pip-audit
|
||||
|
||||
help: ## Show help message
|
||||
@IFS=$$'\n' ; \
|
||||
help_lines=(`fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##/:/'`); \
|
||||
|
@@ -37,7 +37,7 @@ Can be found in [stree.readthedocs.io](https://stree.readthedocs.io/en/stable/)
|
||||
## Hyperparameters
|
||||
|
||||
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
|
||||
| --- | ------------------- | ------------------------------------------------------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| --- | ------------------- | -------------------------------------------------------------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
|
||||
| \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of ‘liblinear’, ‘linear’, ‘poly’ or ‘rbf’. liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library |
|
||||
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
|
||||
@@ -50,7 +50,7 @@ Can be found in [stree.readthedocs.io](https://stree.readthedocs.io/en/stable/)
|
||||
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
|
||||
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
|
||||
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
|
||||
| | splitter | {"best", "random", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **“best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **“random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them. **“trandom”**: The algorithm generates a true random combination. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label. **"cfs"**: Apply Correlation-based Feature Selection. **"fcbf"**: Apply Fast Correlation-Based Filter. **"iwss"**: IWSS based algorithm |
|
||||
| | splitter | {"best", "random", "trandom", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **“best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **“random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them. **“trandom”**: The algorithm generates only one random combination. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label. **"cfs"**: Apply Correlation-based Feature Selection. **"fcbf"**: Apply Fast Correlation-Based Filter. **"iwss"**: IWSS based algorithm |
|
||||
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
|
||||
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest |
|
||||
|
||||
@@ -73,3 +73,7 @@ python -m unittest -v stree.tests
|
||||
## License
|
||||
|
||||
STree is [MIT](https://github.com/doctorado-ml/stree/blob/master/LICENSE) licensed
|
||||
|
||||
## Reference
|
||||
|
||||
R. Montañana, J. A. Gámez, J. M. Puerta, "STree: a single multi-class oblique decision tree based on support vector machines.", 2021 LNAI 12882, pg. 54-64
|
||||
|
@@ -54,4 +54,4 @@ html_theme = "sphinx_rtd_theme"
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ["_static"]
|
||||
html_static_path = []
|
||||
|
@@ -1,7 +1,7 @@
|
||||
# Hyperparameters
|
||||
|
||||
| | **Hyperparameter** | **Type/Values** | **Default** | **Meaning** |
|
||||
| --- | ------------------- | ------------------------------------------------------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| --- | ------------------- | -------------------------------------------------------------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| \* | C | \<float\> | 1.0 | Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. |
|
||||
| \* | kernel | {"liblinear", "linear", "poly", "rbf", "sigmoid"} | linear | Specifies the kernel type to be used in the algorithm. It must be one of ‘liblinear’, ‘linear’, ‘poly’ or ‘rbf’. liblinear uses [liblinear](https://www.csie.ntu.edu.tw/~cjlin/liblinear/) library and the rest uses [libsvm](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) library through scikit-learn library |
|
||||
| \* | max_iter | \<int\> | 1e5 | Hard limit on iterations within solver, or -1 for no limit. |
|
||||
@@ -14,7 +14,7 @@
|
||||
| | criterion | {“gini”, “entropy”} | entropy | The function to measure the quality of a split (only used if max_features != num_features). <br>Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. |
|
||||
| | min_samples_split | \<int\> | 0 | The minimum number of samples required to split an internal node. 0 (default) for any |
|
||||
| | max_features | \<int\>, \<float\> <br><br>or {“auto”, “sqrt”, “log2”} | None | The number of features to consider when looking for the split:<br>If int, then consider max_features features at each split.<br>If float, then max_features is a fraction and int(max_features \* n_features) features are considered at each split.<br>If “auto”, then max_features=sqrt(n_features).<br>If “sqrt”, then max_features=sqrt(n_features).<br>If “log2”, then max_features=log2(n_features).<br>If None, then max_features=n_features. |
|
||||
| | splitter | {"best", "random", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **“best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **“random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them. **“trandom”**: The algorithm generates a true random combination. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label. **"cfs"**: Apply Correlation-based Feature Selection. **"fcbf"**: Apply Fast Correlation-Based Filter. **"iwss"**: IWSS based algorithm |
|
||||
| | splitter | {"best", "random", "trandom", "mutual", "cfs", "fcbf", "iwss"} | "random" | The strategy used to choose the feature set at each node (only used if max_features < num_features). Supported strategies are: **“best”**: sklearn SelectKBest algorithm is used in every node to choose the max_features best features. **“random”**: The algorithm generates 5 candidates and choose the best (max. info. gain) of them. **“trandom”**: The algorithm generates only one random combination. **"mutual"**: Chooses the best features w.r.t. their mutual info with the label. **"cfs"**: Apply Correlation-based Feature Selection. **"fcbf"**: Apply Fast Correlation-Based Filter. **"iwss"**: IWSS based algorithm |
|
||||
| | normalize | \<bool\> | False | If standardization of features should be applied on each node with the samples that reach it |
|
||||
| \* | multiclass_strategy | {"ovo", "ovr"} | "ovo" | Strategy to use with multiclass datasets, **"ovo"**: one versus one. **"ovr"**: one versus rest |
|
||||
|
||||
|
4
setup.py
4
setup.py
@@ -1,4 +1,5 @@
|
||||
import setuptools
|
||||
import os
|
||||
|
||||
|
||||
def readme():
|
||||
@@ -8,7 +9,8 @@ def readme():
|
||||
|
||||
def get_data(field):
|
||||
item = ""
|
||||
with open("stree/__init__.py") as f:
|
||||
file_name = "_version.py" if field == "version" else "__init__.py"
|
||||
with open(os.path.join("stree", file_name)) as f:
|
||||
for line in f.readlines():
|
||||
if line.startswith(f"__{field}__"):
|
||||
delim = '"' if '"' in line else "'"
|
||||
|
@@ -145,6 +145,28 @@ class Snode:
|
||||
except IndexError:
|
||||
self._class = None
|
||||
|
||||
def graph(self):
|
||||
"""
|
||||
Return a string representing the node in graphviz format
|
||||
"""
|
||||
output = ""
|
||||
count_values = np.unique(self._y, return_counts=True)
|
||||
if self.is_leaf():
|
||||
output += (
|
||||
f'N{id(self)} [shape=box style=filled label="'
|
||||
f"class={self._class} impurity={self._impurity:.3f} "
|
||||
f'classes={count_values[0]} samples={count_values[1]}"];\n'
|
||||
)
|
||||
else:
|
||||
output += (
|
||||
f'N{id(self)} [label="#features={len(self._features)} '
|
||||
f"classes={count_values[0]} samples={count_values[1]} "
|
||||
f'({sum(count_values[1])})" fontcolor=black];\n'
|
||||
)
|
||||
output += f"N{id(self)} -> N{id(self.get_up())} [color=black];\n"
|
||||
output += f"N{id(self)} -> N{id(self.get_down())} [color=black];\n"
|
||||
return output
|
||||
|
||||
def __str__(self) -> str:
|
||||
count_values = np.unique(self._y, return_counts=True)
|
||||
if self.is_leaf():
|
||||
@@ -202,7 +224,8 @@ class Splitter:
|
||||
max_features < num_features). Supported strategies are: “best”: sklearn
|
||||
SelectKBest algorithm is used in every node to choose the max_features
|
||||
best features. “random”: The algorithm generates 5 candidates and
|
||||
choose the best (max. info. gain) of them. "mutual": Chooses the best
|
||||
choose the best (max. info. gain) of them. “trandom”: The algorithm
|
||||
generates only one random combination. "mutual": Chooses the best
|
||||
features w.r.t. their mutual info with the label. "cfs": Apply
|
||||
Correlation-based Feature Selection. "fcbf": Apply Fast Correlation-
|
||||
Based, by default None
|
||||
@@ -366,9 +389,8 @@ class Splitter:
|
||||
.get_support(indices=True)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _fs_mutual(
|
||||
dataset: np.array, labels: np.array, max_features: int
|
||||
self, dataset: np.array, labels: np.array, max_features: int
|
||||
) -> tuple:
|
||||
"""Return the best features with mutual information with labels
|
||||
|
||||
@@ -388,7 +410,9 @@ class Splitter:
|
||||
indices of the features selected
|
||||
"""
|
||||
# return best features with mutual info with the label
|
||||
feature_list = mutual_info_classif(dataset, labels)
|
||||
feature_list = mutual_info_classif(
|
||||
dataset, labels, random_state=self._random_state
|
||||
)
|
||||
return tuple(
|
||||
sorted(
|
||||
range(len(feature_list)), key=lambda sub: feature_list[sub]
|
||||
|
@@ -17,6 +17,7 @@ from sklearn.utils.validation import (
|
||||
_check_sample_weight,
|
||||
)
|
||||
from .Splitter import Splitter, Snode, Siterator
|
||||
from ._version import __version__
|
||||
|
||||
|
||||
class Stree(BaseEstimator, ClassifierMixin):
|
||||
@@ -82,7 +83,8 @@ class Stree(BaseEstimator, ClassifierMixin):
|
||||
max_features < num_features). Supported strategies are: “best”: sklearn
|
||||
SelectKBest algorithm is used in every node to choose the max_features
|
||||
best features. “random”: The algorithm generates 5 candidates and
|
||||
choose the best (max. info. gain) of them. "mutual": Chooses the best
|
||||
choose the best (max. info. gain) of them. “trandom”: The algorithm
|
||||
generates only one random combination. "mutual": Chooses the best
|
||||
features w.r.t. their mutual info with the label. "cfs": Apply
|
||||
Correlation-based Feature Selection. "fcbf": Apply Fast Correlation-
|
||||
Based , by default "random"
|
||||
@@ -128,7 +130,7 @@ class Stree(BaseEstimator, ClassifierMixin):
|
||||
References
|
||||
----------
|
||||
R. Montañana, J. A. Gámez, J. M. Puerta, "STree: a single multi-class
|
||||
oblique decision tree based on support vector machines.", 2021 LNAI...
|
||||
oblique decision tree based on support vector machines.", 2021 LNAI 12882
|
||||
|
||||
|
||||
"""
|
||||
@@ -168,6 +170,11 @@ class Stree(BaseEstimator, ClassifierMixin):
|
||||
self.normalize = normalize
|
||||
self.multiclass_strategy = multiclass_strategy
|
||||
|
||||
@staticmethod
|
||||
def version() -> str:
|
||||
"""Return the version of the package."""
|
||||
return __version__
|
||||
|
||||
def _more_tags(self) -> dict:
|
||||
"""Required by sklearn to supply features of the classifier
|
||||
make mandatory the labels array
|
||||
@@ -469,6 +476,23 @@ class Stree(BaseEstimator, ClassifierMixin):
|
||||
tree = None
|
||||
return Siterator(tree)
|
||||
|
||||
def graph(self, title="") -> str:
|
||||
"""Graphviz code representing the tree
|
||||
|
||||
Returns
|
||||
-------
|
||||
str
|
||||
graphviz code
|
||||
"""
|
||||
output = (
|
||||
"digraph STree {\nlabel=<STree "
|
||||
f"{title}>\nfontsize=30\nfontcolor=blue\nlabelloc=t\n"
|
||||
)
|
||||
for node in self:
|
||||
output += node.graph()
|
||||
output += "}\n"
|
||||
return output
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""String representation of the tree
|
||||
|
||||
|
@@ -1,7 +1,5 @@
|
||||
from .Strees import Stree, Siterator
|
||||
|
||||
__version__ = "1.2.1"
|
||||
|
||||
__author__ = "Ricardo Montañana Gómez"
|
||||
__copyright__ = "Copyright 2020-2021, Ricardo Montañana Gómez"
|
||||
__license__ = "MIT License"
|
||||
|
1
stree/_version.py
Normal file
1
stree/_version.py
Normal file
@@ -0,0 +1 @@
|
||||
__version__ = "1.2.4"
|
@@ -10,6 +10,7 @@ from sklearn.svm import LinearSVC
|
||||
from stree import Stree
|
||||
from stree.Splitter import Snode
|
||||
from .utils import load_dataset
|
||||
from .._version import __version__
|
||||
|
||||
|
||||
class Stree_test(unittest.TestCase):
|
||||
@@ -357,6 +358,7 @@ class Stree_test(unittest.TestCase):
|
||||
|
||||
# Tests of score
|
||||
def test_score_binary(self):
|
||||
"""Check score for binary classification."""
|
||||
X, y = load_dataset(self._random_state)
|
||||
accuracies = [
|
||||
0.9506666666666667,
|
||||
@@ -379,6 +381,7 @@ class Stree_test(unittest.TestCase):
|
||||
self.assertAlmostEqual(accuracy_expected, accuracy_score)
|
||||
|
||||
def test_score_max_features(self):
|
||||
"""Check score using max_features."""
|
||||
X, y = load_dataset(self._random_state)
|
||||
clf = Stree(
|
||||
kernel="liblinear",
|
||||
@@ -390,6 +393,7 @@ class Stree_test(unittest.TestCase):
|
||||
self.assertAlmostEqual(0.9453333333333334, clf.score(X, y))
|
||||
|
||||
def test_bogus_splitter_parameter(self):
|
||||
"""Check that bogus splitter parameter raises exception."""
|
||||
clf = Stree(splitter="duck")
|
||||
with self.assertRaises(ValueError):
|
||||
clf.fit(*load_dataset())
|
||||
@@ -445,6 +449,7 @@ class Stree_test(unittest.TestCase):
|
||||
self.assertListEqual([47], resdn[1].tolist())
|
||||
|
||||
def test_score_multiclass_rbf(self):
|
||||
"""Test score for multiclass classification with rbf kernel."""
|
||||
X, y = load_dataset(
|
||||
random_state=self._random_state,
|
||||
n_classes=3,
|
||||
@@ -462,6 +467,7 @@ class Stree_test(unittest.TestCase):
|
||||
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
||||
|
||||
def test_score_multiclass_poly(self):
|
||||
"""Test score for multiclass classification with poly kernel."""
|
||||
X, y = load_dataset(
|
||||
random_state=self._random_state,
|
||||
n_classes=3,
|
||||
@@ -483,6 +489,7 @@ class Stree_test(unittest.TestCase):
|
||||
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
||||
|
||||
def test_score_multiclass_liblinear(self):
|
||||
"""Test score for multiclass classification with liblinear kernel."""
|
||||
X, y = load_dataset(
|
||||
random_state=self._random_state,
|
||||
n_classes=3,
|
||||
@@ -508,6 +515,7 @@ class Stree_test(unittest.TestCase):
|
||||
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
||||
|
||||
def test_score_multiclass_sigmoid(self):
|
||||
"""Test score for multiclass classification with sigmoid kernel."""
|
||||
X, y = load_dataset(
|
||||
random_state=self._random_state,
|
||||
n_classes=3,
|
||||
@@ -528,6 +536,7 @@ class Stree_test(unittest.TestCase):
|
||||
self.assertEqual(0.9662921348314607, clf2.fit(X, y).score(X, y))
|
||||
|
||||
def test_score_multiclass_linear(self):
|
||||
"""Test score for multiclass classification with linear kernel."""
|
||||
warnings.filterwarnings("ignore", category=ConvergenceWarning)
|
||||
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
||||
X, y = load_dataset(
|
||||
@@ -555,11 +564,13 @@ class Stree_test(unittest.TestCase):
|
||||
self.assertEqual(1.0, clf2.fit(X, y).score(X, y))
|
||||
|
||||
def test_zero_all_sample_weights(self):
|
||||
"""Test exception raises when all sample weights are zero."""
|
||||
X, y = load_dataset(self._random_state)
|
||||
with self.assertRaises(ValueError):
|
||||
Stree().fit(X, y, np.zeros(len(y)))
|
||||
|
||||
def test_mask_samples_weighted_zero(self):
|
||||
"""Check that the weighted zero samples are masked."""
|
||||
X = np.array(
|
||||
[
|
||||
[1, 1],
|
||||
@@ -587,6 +598,7 @@ class Stree_test(unittest.TestCase):
|
||||
self.assertEqual(model2.score(X, y, w), 1)
|
||||
|
||||
def test_depth(self):
|
||||
"""Check depth of the tree."""
|
||||
X, y = load_dataset(
|
||||
random_state=self._random_state,
|
||||
n_classes=3,
|
||||
@@ -602,6 +614,7 @@ class Stree_test(unittest.TestCase):
|
||||
self.assertEqual(4, clf.depth_)
|
||||
|
||||
def test_nodes_leaves(self):
|
||||
"""Check number of nodes and leaves."""
|
||||
X, y = load_dataset(
|
||||
random_state=self._random_state,
|
||||
n_classes=3,
|
||||
@@ -621,6 +634,7 @@ class Stree_test(unittest.TestCase):
|
||||
self.assertEqual(6, leaves)
|
||||
|
||||
def test_nodes_leaves_artificial(self):
|
||||
"""Check leaves of artificial dataset."""
|
||||
n1 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test1")
|
||||
n2 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test2")
|
||||
n3 = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test3")
|
||||
@@ -639,12 +653,14 @@ class Stree_test(unittest.TestCase):
|
||||
self.assertEqual(2, leaves)
|
||||
|
||||
def test_bogus_multiclass_strategy(self):
|
||||
"""Check invalid multiclass strategy."""
|
||||
clf = Stree(multiclass_strategy="other")
|
||||
X, y = load_wine(return_X_y=True)
|
||||
with self.assertRaises(ValueError):
|
||||
clf.fit(X, y)
|
||||
|
||||
def test_multiclass_strategy(self):
|
||||
"""Check multiclass strategy."""
|
||||
X, y = load_wine(return_X_y=True)
|
||||
clf_o = Stree(multiclass_strategy="ovo")
|
||||
clf_r = Stree(multiclass_strategy="ovr")
|
||||
@@ -654,6 +670,7 @@ class Stree_test(unittest.TestCase):
|
||||
self.assertEqual(0.9269662921348315, score_r)
|
||||
|
||||
def test_incompatible_hyperparameters(self):
|
||||
"""Check incompatible hyperparameters."""
|
||||
X, y = load_wine(return_X_y=True)
|
||||
clf = Stree(kernel="liblinear", multiclass_strategy="ovo")
|
||||
with self.assertRaises(ValueError):
|
||||
@@ -661,3 +678,50 @@ class Stree_test(unittest.TestCase):
|
||||
clf = Stree(multiclass_strategy="ovo", split_criteria="max_samples")
|
||||
with self.assertRaises(ValueError):
|
||||
clf.fit(X, y)
|
||||
|
||||
def test_version(self):
|
||||
"""Check STree version."""
|
||||
clf = Stree()
|
||||
self.assertEqual(__version__, clf.version())
|
||||
|
||||
def test_graph(self):
|
||||
"""Check graphviz representation of the tree."""
|
||||
X, y = load_wine(return_X_y=True)
|
||||
clf = Stree(random_state=self._random_state)
|
||||
|
||||
expected_head = (
|
||||
"digraph STree {\nlabel=<STree >\nfontsize=30\n"
|
||||
"fontcolor=blue\nlabelloc=t\n"
|
||||
)
|
||||
expected_tail = (
|
||||
' [shape=box style=filled label="class=1 impurity=0.000 '
|
||||
'classes=[1] samples=[1]"];\n}\n'
|
||||
)
|
||||
self.assertEqual(clf.graph(), expected_head + "}\n")
|
||||
clf.fit(X, y)
|
||||
computed = clf.graph()
|
||||
computed_head = computed[: len(expected_head)]
|
||||
num = -len(expected_tail)
|
||||
computed_tail = computed[num:]
|
||||
self.assertEqual(computed_head, expected_head)
|
||||
self.assertEqual(computed_tail, expected_tail)
|
||||
|
||||
def test_graph_title(self):
|
||||
X, y = load_wine(return_X_y=True)
|
||||
clf = Stree(random_state=self._random_state)
|
||||
expected_head = (
|
||||
"digraph STree {\nlabel=<STree Sample title>\nfontsize=30\n"
|
||||
"fontcolor=blue\nlabelloc=t\n"
|
||||
)
|
||||
expected_tail = (
|
||||
' [shape=box style=filled label="class=1 impurity=0.000 '
|
||||
'classes=[1] samples=[1]"];\n}\n'
|
||||
)
|
||||
self.assertEqual(clf.graph("Sample title"), expected_head + "}\n")
|
||||
clf.fit(X, y)
|
||||
computed = clf.graph("Sample title")
|
||||
computed_head = computed[: len(expected_head)]
|
||||
num = -len(expected_tail)
|
||||
computed_tail = computed[num:]
|
||||
self.assertEqual(computed_head, expected_head)
|
||||
self.assertEqual(computed_tail, expected_tail)
|
||||
|
Reference in New Issue
Block a user