From 2f6ae648a1cfd943f84fc85644e6c827fe539746 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Monta=C3=B1ana=20G=C3=B3mez?= Date: Fri, 21 Oct 2022 12:26:46 +0200 Subject: [PATCH] New predict proba (#53) * Add complete classes counts to node and tests * Implement optimized predict and new predict_proba * Add predict_proba test * Add python 3.10 to CI --- .github/workflows/main.yml | 2 +- stree/Splitter.py | 18 +++---- stree/Strees.py | 108 +++++++++++++++++++------------------ stree/tests/Snode_test.py | 35 ++++++------ stree/tests/Stree_test.py | 36 ++++++++++++- 5 files changed, 120 insertions(+), 79 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index f7385f0..55f08fe 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -13,7 +13,7 @@ jobs: strategy: matrix: os: [macos-latest, ubuntu-latest, windows-latest] - python: [3.8] + python: [3.8, "3.10"] steps: - uses: actions/checkout@v2 diff --git a/stree/Splitter.py b/stree/Splitter.py index da58b07..b60f6cd 100644 --- a/stree/Splitter.py +++ b/stree/Splitter.py @@ -68,6 +68,7 @@ class Snode: self._impurity = impurity self._partition_column: int = -1 self._scaler = scaler + self._proba = None @classmethod def copy(cls, node: "Snode") -> "Snode": @@ -127,23 +128,22 @@ class Snode: def get_up(self) -> "Snode": return self._up - def make_predictor(self): + def make_predictor(self, num_classes: int) -> None: """Compute the class of the predictor and its belief based on the subdataset of the node only if it is a leaf """ if not self.is_leaf(): return classes, card = np.unique(self._y, return_counts=True) - if len(classes) > 1: + self._proba = np.zeros((num_classes,), dtype=np.int64) + for c, n in zip(classes, card): + self._proba[c] = n + try: max_card = max(card) self._class = classes[card == max_card][0] self._belief = max_card / np.sum(card) - else: - self._belief = 1 - try: - self._class = classes[0] - except IndexError: - self._class = None + except ValueError: + self._class = None def graph(self): """ @@ -155,7 +155,7 @@ class Snode: output += ( f'N{id(self)} [shape=box style=filled label="' f"class={self._class} impurity={self._impurity:.3f} " - f'classes={count_values[0]} samples={count_values[1]}"];\n' + f'counts={self._proba}"];\n' ) else: output += ( diff --git a/stree/Strees.py b/stree/Strees.py index 64329e3..1f93252 100644 --- a/stree/Strees.py +++ b/stree/Strees.py @@ -314,7 +314,7 @@ class Stree(BaseEstimator, ClassifierMixin): if np.unique(y).shape[0] == 1: # only 1 class => pure dataset node.set_title(title + ", ") - node.make_predictor() + node.make_predictor(self.n_classes_) return node # Train the model clf = self._build_clf() @@ -333,7 +333,7 @@ class Stree(BaseEstimator, ClassifierMixin): if X_U is None or X_D is None: # didn't part anything node.set_title(title + ", ") - node.make_predictor() + node.make_predictor(self.n_classes_) return node node.set_up( self._train(X_U, y_u, sw_u, depth + 1, title + f" - Up({depth+1})") @@ -367,28 +367,66 @@ class Stree(BaseEstimator, ClassifierMixin): ) ) - @staticmethod - def _reorder_results(y: np.array, indices: np.array) -> np.array: - """Reorder an array based on the array of indices passed + def __predict_class(self, X: np.array) -> np.array: + def compute_prediction(xp, indices, node): + if xp is None: + return + if node.is_leaf(): + # set a class for indices + result[indices] = node._proba + return + self.splitter_.partition(xp, node, train=False) + x_u, x_d = self.splitter_.part(xp) + i_u, i_d = self.splitter_.part(indices) + compute_prediction(x_u, i_u, node.get_up()) + compute_prediction(x_d, i_d, node.get_down()) + + # setup prediction & make it happen + result = np.zeros((X.shape[0], self.n_classes_)) + indices = np.arange(X.shape[0]) + compute_prediction(X, indices, self.tree_) + return result + + def check_predict(self, X) -> np.array: + check_is_fitted(self, ["tree_"]) + # Input validation + X = check_array(X) + if X.shape[1] != self.n_features_: + raise ValueError( + f"Expected {self.n_features_} features but got " + f"({X.shape[1]})" + ) + return X + + def predict_proba(self, X: np.array) -> np.array: + """Predict class probabilities of the input samples X. + + The predicted class probability is the fraction of samples of the same + class in a leaf. Parameters ---------- - y : np.array - data untidy - indices : np.array - indices used to set order + X : dataset of samples. Returns ------- - np.array - array y ordered + proba : array of shape (n_samples, n_classes) + The class probabilities of the input samples. + + Raises + ------ + ValueError + if dataset with inconsistent number of features + NotFittedError + if model is not fitted """ - # return array of same type given in y - y_ordered = y.copy() - indices = indices.astype(int) - for i, index in enumerate(indices): - y_ordered[index] = y[i] - return y_ordered + + X = self.check_predict(X) + # return # of samples of each class in leaf node + values = self.__predict_class(X) + normalizer = values.sum(axis=1)[:, np.newaxis] + normalizer[normalizer == 0.0] = 1.0 + return values / normalizer def predict(self, X: np.array) -> np.array: """Predict labels for each sample in dataset passed @@ -410,40 +448,8 @@ class Stree(BaseEstimator, ClassifierMixin): NotFittedError if model is not fitted """ - - def predict_class( - xp: np.array, indices: np.array, node: Snode - ) -> np.array: - if xp is None: - return [], [] - if node.is_leaf(): - # set a class for every sample in dataset - prediction = np.full((xp.shape[0], 1), node._class) - return prediction, indices - self.splitter_.partition(xp, node, train=False) - x_u, x_d = self.splitter_.part(xp) - i_u, i_d = self.splitter_.part(indices) - prx_u, prin_u = predict_class(x_u, i_u, node.get_up()) - prx_d, prin_d = predict_class(x_d, i_d, node.get_down()) - return np.append(prx_u, prx_d), np.append(prin_u, prin_d) - - # sklearn check - check_is_fitted(self, ["tree_"]) - # Input validation - X = check_array(X) - if X.shape[1] != self.n_features_: - raise ValueError( - f"Expected {self.n_features_} features but got " - f"({X.shape[1]})" - ) - # setup prediction & make it happen - indices = np.arange(X.shape[0]) - result = ( - self._reorder_results(*predict_class(X, indices, self.tree_)) - .astype(int) - .ravel() - ) - return self.classes_[result] + X = self.check_predict(X) + return self.classes_[np.argmax(self.__predict_class(X), axis=1)] def nodes_leaves(self) -> tuple: """Compute the number of nodes and leaves in the built tree diff --git a/stree/tests/Snode_test.py b/stree/tests/Snode_test.py index b168184..41cfe6b 100644 --- a/stree/tests/Snode_test.py +++ b/stree/tests/Snode_test.py @@ -67,10 +67,28 @@ class Snode_test(unittest.TestCase): def test_make_predictor_on_leaf(self): test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test") - test.make_predictor() + test.make_predictor(2) self.assertEqual(1, test._class) self.assertEqual(0.75, test._belief) self.assertEqual(-1, test._partition_column) + self.assertListEqual([1, 3], test._proba.tolist()) + + def test_make_predictor_on_not_leaf(self): + test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test") + test.set_up(Snode(None, [1], [1], [], 0.0, "another_test")) + test.make_predictor(2) + self.assertIsNone(test._class) + self.assertEqual(0, test._belief) + self.assertEqual(-1, test._partition_column) + self.assertEqual(-1, test.get_up()._partition_column) + self.assertIsNone(test._proba) + + def test_make_predictor_on_leaf_bogus_data(self): + test = Snode(None, [1, 2, 3, 4], [], [], 0.0, "test") + test.make_predictor(2) + self.assertIsNone(test._class) + self.assertEqual(-1, test._partition_column) + self.assertListEqual([0, 0], test._proba.tolist()) def test_set_title(self): test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test") @@ -97,21 +115,6 @@ class Snode_test(unittest.TestCase): test.set_features([1, 2]) self.assertListEqual([1, 2], test.get_features()) - def test_make_predictor_on_not_leaf(self): - test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test") - test.set_up(Snode(None, [1], [1], [], 0.0, "another_test")) - test.make_predictor() - self.assertIsNone(test._class) - self.assertEqual(0, test._belief) - self.assertEqual(-1, test._partition_column) - self.assertEqual(-1, test.get_up()._partition_column) - - def test_make_predictor_on_leaf_bogus_data(self): - test = Snode(None, [1, 2, 3, 4], [], [], 0.0, "test") - test.make_predictor() - self.assertIsNone(test._class) - self.assertEqual(-1, test._partition_column) - def test_copy_node(self): px = [1, 2, 3, 4] py = [1] diff --git a/stree/tests/Stree_test.py b/stree/tests/Stree_test.py index 63c210f..0f13eaa 100644 --- a/stree/tests/Stree_test.py +++ b/stree/tests/Stree_test.py @@ -115,6 +115,38 @@ class Stree_test(unittest.TestCase): yp = clf.fit(X, y).predict(X[:num, :]) self.assertListEqual(y[:num].tolist(), yp.tolist()) + def test_multiple_predict_proba(self): + expected = { + "liblinear": { + 0: [0.02401129943502825, 0.9759887005649718], + 17: [0.9282970550576184, 0.07170294494238157], + }, + "linear": { + 0: [0.029329608938547486, 0.9706703910614525], + 17: [0.9298469387755102, 0.07015306122448979], + }, + "rbf": { + 0: [0.023448275862068966, 0.976551724137931], + 17: [0.9458064516129032, 0.05419354838709677], + }, + "poly": { + 0: [0.01601164483260553, 0.9839883551673945], + 17: [0.9089790897908979, 0.0910209102091021], + }, + } + indices = [0, 17] + X, y = load_dataset(self._random_state) + for kernel in ["liblinear", "linear", "rbf", "poly"]: + clf = Stree( + kernel=kernel, + multiclass_strategy="ovr" if kernel == "liblinear" else "ovo", + random_state=self._random_state, + ) + yp = clf.fit(X, y).predict_proba(X) + for index in indices: + for exp, comp in zip(expected[kernel][index], yp[index]): + self.assertAlmostEqual(exp, comp) + def test_single_vs_multiple_prediction(self): """Check if predicting sample by sample gives the same result as predicting all samples at once @@ -695,7 +727,7 @@ class Stree_test(unittest.TestCase): ) expected_tail = ( ' [shape=box style=filled label="class=1 impurity=0.000 ' - 'classes=[1] samples=[1]"];\n}\n' + 'counts=[0 1 0]"];\n}\n' ) self.assertEqual(clf.graph(), expected_head + "}\n") clf.fit(X, y) @@ -715,7 +747,7 @@ class Stree_test(unittest.TestCase): ) expected_tail = ( ' [shape=box style=filled label="class=1 impurity=0.000 ' - 'classes=[1] samples=[1]"];\n}\n' + 'counts=[0 1 0]"];\n}\n' ) self.assertEqual(clf.graph("Sample title"), expected_head + "}\n") clf.fit(X, y)