From 1c869e154e96b6a4801292eabf5a7d79612c98e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Monta=C3=B1ana=20G=C3=B3mez?= Date: Tue, 3 Nov 2020 11:36:05 +0100 Subject: [PATCH] Enhance partition (#16) #15 Create impurity function in Stree (consistent name, same criteria as other splitter parameter) Create test for the new function Update init test Update test splitter parameters Rename old impurity function to partition_impurity close #15 * Complete implementation of splitter_type = impurity with tests Remove max_distance & min_distance splitter types * Fix mistake in computing multiclass node belief Set default criterion for split to entropy instead of gini Set default max_iter to 1e5 instead of 1e3 change up-down criterion to match SVC multiclass Fix impurity method of splitting nodes Update jupyter Notebooks --- notebooks/benchmark.ipynb | 199 +++++++++++++++++++++++++-- notebooks/ensemble.ipynb | 36 +++-- notebooks/features.ipynb | 184 +++++++++++++++++++++++-- notebooks/gridsearch.ipynb | 225 +++++++++++++++++++++++++++++-- setup.py | 4 +- stree/Strees.py | 165 ++++++++++++----------- stree/tests/Snode_test.py | 16 ++- stree/tests/Splitter_test.py | 81 +++++------ stree/tests/Stree_test.py | 254 +++++++++++++++++++---------------- stree/tests/utils.py | 4 +- 10 files changed, 879 insertions(+), 289 deletions(-) diff --git a/notebooks/benchmark.ipynb b/notebooks/benchmark.ipynb index ef41f09..d778d0f 100644 --- a/notebooks/benchmark.ipynb +++ b/notebooks/benchmark.ipynb @@ -72,7 +72,9 @@ { "output_type": "stream", "name": "stdout", - "text": "2020-06-27 23:33:07\n" + "text": [ + "2020-11-01 11:14:06\n" + ] } ], "source": [ @@ -108,7 +110,9 @@ { "output_type": "stream", "name": "stdout", - "text": "Fraud: 0.173% 492\nValid: 99.827% 284,315\n" + "text": [ + "Fraud: 0.173% 492\nValid: 99.827% 284,315\n" + ] } ], "source": [ @@ -138,7 +142,9 @@ { "output_type": "stream", "name": "stdout", - "text": "X shape: (284807, 29)\ny shape: (284807,)\n" + "text": [ + "X shape: (284807, 29)\ny shape: (284807,)\n" + ] } ], "source": [ @@ -193,7 +199,7 @@ "outputs": [], "source": [ "# Stree\n", - "stree = Stree(random_state=random_state, C=.01)" + "stree = Stree(random_state=random_state, C=.01, max_iter=1e3)" ] }, { @@ -258,7 +264,148 @@ { "output_type": "stream", "name": "stdout", - "text": "************************** Linear Tree **********************\nTrain Model Linear Tree took: 14.43 seconds\n=========== Linear Tree - Train 199,364 samples =============\n precision recall f1-score support\n\n 0 1.000000 1.000000 1.000000 199020\n 1 1.000000 1.000000 1.000000 344\n\n accuracy 1.000000 199364\n macro avg 1.000000 1.000000 1.000000 199364\nweighted avg 1.000000 1.000000 1.000000 199364\n\n=========== Linear Tree - Test 85,443 samples =============\n precision recall f1-score support\n\n 0 0.999578 0.999613 0.999596 85295\n 1 0.772414 0.756757 0.764505 148\n\n accuracy 0.999192 85443\n macro avg 0.885996 0.878185 0.882050 85443\nweighted avg 0.999184 0.999192 0.999188 85443\n\nConfusion Matrix in Train\n[[199020 0]\n [ 0 344]]\nConfusion Matrix in Test\n[[85262 33]\n [ 36 112]]\n************************** Random Forest **********************\nTrain Model Random Forest took: 165.2 seconds\n=========== Random Forest - Train 199,364 samples =============\n precision recall f1-score support\n\n 0 1.000000 1.000000 1.000000 199020\n 1 1.000000 1.000000 1.000000 344\n\n accuracy 1.000000 199364\n macro avg 1.000000 1.000000 1.000000 199364\nweighted avg 1.000000 1.000000 1.000000 199364\n\n=========== Random Forest - Test 85,443 samples =============\n precision recall f1-score support\n\n 0 0.999660 0.999965 0.999812 85295\n 1 0.975410 0.804054 0.881481 148\n\n accuracy 0.999625 85443\n macro avg 0.987535 0.902009 0.940647 85443\nweighted avg 0.999618 0.999625 0.999607 85443\n\nConfusion Matrix in Train\n[[199020 0]\n [ 0 344]]\nConfusion Matrix in Test\n[[85292 3]\n [ 29 119]]\n************************** Stree (SVM Tree) **********************\nTrain Model Stree (SVM Tree) took: 37.53 seconds\n=========== Stree (SVM Tree) - Train 199,364 samples =============\n precision recall f1-score support\n\n 0 0.999623 0.999864 0.999744 199020\n 1 0.908784 0.781977 0.840625 344\n\n accuracy 0.999488 199364\n macro avg 0.954204 0.890921 0.920184 199364\nweighted avg 0.999467 0.999488 0.999469 199364\n\n=========== Stree (SVM Tree) - Test 85,443 samples =============\n precision recall f1-score support\n\n 0 0.999637 0.999918 0.999777 85295\n 1 0.943548 0.790541 0.860294 148\n\n accuracy 0.999555 85443\n macro avg 0.971593 0.895229 0.930036 85443\nweighted avg 0.999540 0.999555 0.999536 85443\n\nConfusion Matrix in Train\n[[198993 27]\n [ 75 269]]\nConfusion Matrix in Test\n[[85288 7]\n [ 31 117]]\n************************** AdaBoost model **********************\nTrain Model AdaBoost model took: 46.98 seconds\n=========== AdaBoost model - Train 199,364 samples =============\n precision recall f1-score support\n\n 0 0.999392 0.999678 0.999535 199020\n 1 0.777003 0.648256 0.706815 344\n\n accuracy 0.999072 199364\n macro avg 0.888198 0.823967 0.853175 199364\nweighted avg 0.999008 0.999072 0.999030 199364\n\n=========== AdaBoost model - Test 85,443 samples =============\n precision recall f1-score support\n\n 0 0.999484 0.999707 0.999596 85295\n 1 0.806202 0.702703 0.750903 148\n\n accuracy 0.999192 85443\n macro avg 0.902843 0.851205 0.875249 85443\nweighted avg 0.999149 0.999192 0.999165 85443\n\nConfusion Matrix in Train\n[[198956 64]\n [ 121 223]]\nConfusion Matrix in Test\n[[85270 25]\n [ 44 104]]\n************************** Bagging model **********************\nTrain Model Bagging model took: 77.93 seconds\n=========== Bagging model - Train 199,364 samples =============\n precision recall f1-score support\n\n 0 0.999864 1.000000 0.999932 199020\n 1 1.000000 0.921512 0.959153 344\n\n accuracy 0.999865 199364\n macro avg 0.999932 0.960756 0.979542 199364\nweighted avg 0.999865 0.999865 0.999862 199364\n\n=========== Bagging model - Test 85,443 samples =============\n precision recall f1-score support\n\n 0 0.999637 0.999953 0.999795 85295\n 1 0.966942 0.790541 0.869888 148\n\n accuracy 0.999590 85443\n macro avg 0.983289 0.895247 0.934842 85443\nweighted avg 0.999580 0.999590 0.999570 85443\n\nConfusion Matrix in Train\n[[199020 0]\n [ 27 317]]\nConfusion Matrix in Test\n[[85291 4]\n [ 31 117]]\n" + "text": [ + "************************** Linear Tree **********************\n", + "Train Model Linear Tree took: 15.14 seconds\n", + "=========== Linear Tree - Train 199,364 samples =============\n", + " precision recall f1-score support\n", + "\n", + " 0 1.000000 1.000000 1.000000 199020\n", + " 1 1.000000 1.000000 1.000000 344\n", + "\n", + " accuracy 1.000000 199364\n", + " macro avg 1.000000 1.000000 1.000000 199364\n", + "weighted avg 1.000000 1.000000 1.000000 199364\n", + "\n", + "=========== Linear Tree - Test 85,443 samples =============\n", + " precision recall f1-score support\n", + "\n", + " 0 0.999578 0.999613 0.999596 85295\n", + " 1 0.772414 0.756757 0.764505 148\n", + "\n", + " accuracy 0.999192 85443\n", + " macro avg 0.885996 0.878185 0.882050 85443\n", + "weighted avg 0.999184 0.999192 0.999188 85443\n", + "\n", + "Confusion Matrix in Train\n", + "[[199020 0]\n", + " [ 0 344]]\n", + "Confusion Matrix in Test\n", + "[[85262 33]\n", + " [ 36 112]]\n", + "************************** Random Forest **********************\n", + "Train Model Random Forest took: 181.1 seconds\n", + "=========== Random Forest - Train 199,364 samples =============\n", + " precision recall f1-score support\n", + "\n", + " 0 1.000000 1.000000 1.000000 199020\n", + " 1 1.000000 1.000000 1.000000 344\n", + "\n", + " accuracy 1.000000 199364\n", + " macro avg 1.000000 1.000000 1.000000 199364\n", + "weighted avg 1.000000 1.000000 1.000000 199364\n", + "\n", + "=========== Random Forest - Test 85,443 samples =============\n", + " precision recall f1-score support\n", + "\n", + " 0 0.999660 0.999965 0.999812 85295\n", + " 1 0.975410 0.804054 0.881481 148\n", + "\n", + " accuracy 0.999625 85443\n", + " macro avg 0.987535 0.902009 0.940647 85443\n", + "weighted avg 0.999618 0.999625 0.999607 85443\n", + "\n", + "Confusion Matrix in Train\n", + "[[199020 0]\n", + " [ 0 344]]\n", + "Confusion Matrix in Test\n", + "[[85292 3]\n", + " [ 29 119]]\n", + "************************** Stree (SVM Tree) **********************\n", + "Train Model Stree (SVM Tree) took: 36.6 seconds\n", + "=========== Stree (SVM Tree) - Train 199,364 samples =============\n", + " precision recall f1-score support\n", + "\n", + " 0 0.999623 0.999864 0.999744 199020\n", + " 1 0.908784 0.781977 0.840625 344\n", + "\n", + " accuracy 0.999488 199364\n", + " macro avg 0.954204 0.890921 0.920184 199364\n", + "weighted avg 0.999467 0.999488 0.999469 199364\n", + "\n", + "=========== Stree (SVM Tree) - Test 85,443 samples =============\n", + " precision recall f1-score support\n", + "\n", + " 0 0.999637 0.999918 0.999777 85295\n", + " 1 0.943548 0.790541 0.860294 148\n", + "\n", + " accuracy 0.999555 85443\n", + " macro avg 0.971593 0.895229 0.930036 85443\n", + "weighted avg 0.999540 0.999555 0.999536 85443\n", + "\n", + "Confusion Matrix in Train\n", + "[[198993 27]\n", + " [ 75 269]]\n", + "Confusion Matrix in Test\n", + "[[85288 7]\n", + " [ 31 117]]\n", + "************************** AdaBoost model **********************\n", + "Train Model AdaBoost model took: 46.14 seconds\n", + "=========== AdaBoost model - Train 199,364 samples =============\n", + " precision recall f1-score support\n", + "\n", + " 0 0.999392 0.999678 0.999535 199020\n", + " 1 0.777003 0.648256 0.706815 344\n", + "\n", + " accuracy 0.999072 199364\n", + " macro avg 0.888198 0.823967 0.853175 199364\n", + "weighted avg 0.999008 0.999072 0.999030 199364\n", + "\n", + "=========== AdaBoost model - Test 85,443 samples =============\n", + " precision recall f1-score support\n", + "\n", + " 0 0.999484 0.999707 0.999596 85295\n", + " 1 0.806202 0.702703 0.750903 148\n", + "\n", + " accuracy 0.999192 85443\n", + " macro avg 0.902843 0.851205 0.875249 85443\n", + "weighted avg 0.999149 0.999192 0.999165 85443\n", + "\n", + "Confusion Matrix in Train\n", + "[[198956 64]\n", + " [ 121 223]]\n", + "Confusion Matrix in Test\n", + "[[85270 25]\n", + " [ 44 104]]\n", + "************************** Bagging model **********************\n", + "Train Model Bagging model took: 77.73 seconds\n", + "=========== Bagging model - Train 199,364 samples =============\n", + " precision recall f1-score support\n", + "\n", + " 0 0.999864 1.000000 0.999932 199020\n", + " 1 1.000000 0.921512 0.959153 344\n", + "\n", + " accuracy 0.999865 199364\n", + " macro avg 0.999932 0.960756 0.979542 199364\n", + "weighted avg 0.999865 0.999865 0.999862 199364\n", + "\n", + "=========== Bagging model - Test 85,443 samples =============\n", + " precision recall f1-score support\n", + "\n", + " 0 0.999637 0.999953 0.999795 85295\n", + " 1 0.966942 0.790541 0.869888 148\n", + "\n", + " accuracy 0.999590 85443\n", + " macro avg 0.983289 0.895247 0.934842 85443\n", + "weighted avg 0.999580 0.999590 0.999570 85443\n", + "\n", + "Confusion Matrix in Train\n", + "[[199020 0]\n", + " [ 27 317]]\n", + "Confusion Matrix in Test\n", + "[[85291 4]\n", + " [ 31 117]]\n" + ] } ], "source": [ @@ -289,7 +436,9 @@ { "output_type": "stream", "name": "stdout", - "text": "**************************************************************************************************************\n*The best f1 model is Random Forest, with a f1 score: 0.8815 in 165.193 seconds with 0.7 samples in train dataset\n**************************************************************************************************************\nModel: Linear Tree\t Time: 14.43 seconds\t f1: 0.7645\nModel: Random Forest\t Time: 165.19 seconds\t f1: 0.8815\nModel: Stree (SVM Tree)\t Time: 37.53 seconds\t f1: 0.8603\nModel: AdaBoost model\t Time: 46.98 seconds\t f1: 0.7509\nModel: Bagging model\t Time: 77.93 seconds\t f1: 0.8699\n" + "text": [ + "**************************************************************************************************************\n*The best f1 model is Random Forest, with a f1 score: 0.8815 in 181.07 seconds with 0.7 samples in train dataset\n**************************************************************************************************************\nModel: Linear Tree\t Time: 15.14 seconds\t f1: 0.7645\nModel: Random Forest\t Time: 181.07 seconds\t f1: 0.8815\nModel: Stree (SVM Tree)\t Time: 36.60 seconds\t f1: 0.8603\nModel: AdaBoost model\t Time: 46.14 seconds\t f1: 0.7509\nModel: Bagging model\t Time: 77.73 seconds\t f1: 0.8699\n" + ] } ], "source": [ @@ -331,14 +480,46 @@ "\n", "```" ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'C': 0.01,\n", + " 'criterion': 'entropy',\n", + " 'degree': 3,\n", + " 'gamma': 'scale',\n", + " 'kernel': 'linear',\n", + " 'max_depth': None,\n", + " 'max_features': None,\n", + " 'max_iter': 1000.0,\n", + " 'min_samples_split': 0,\n", + " 'random_state': 2020,\n", + " 'split_criteria': 'impurity',\n", + " 'splitter': 'random',\n", + " 'tol': 0.0001}" + ] + }, + "metadata": {}, + "execution_count": 18 + } + ], + "source": [ + "stree.get_params()" + ] } ], "metadata": { "hide_input": false, "kernelspec": { - "display_name": "Python 3.7.6 64-bit ('general': venv)", + "display_name": "Python 3.8.4 64-bit ('general': venv)", "language": "python", - "name": "python37664bitgeneralvenvfbd0a23e74cf4e778460f5ffc6761f39" + "name": "python38464bitgeneralvenv77203c0a6afd4428bd66253ef62753dc" }, "language_info": { "codemirror_mode": { @@ -350,7 +531,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.6-final" + "version": "3.8.4-final" }, "toc": { "base_numbering": 1, diff --git a/notebooks/ensemble.ipynb b/notebooks/ensemble.ipynb index 5462d69..3b54262 100644 --- a/notebooks/ensemble.ipynb +++ b/notebooks/ensemble.ipynb @@ -61,7 +61,13 @@ { "output_type": "stream", "name": "stdout", - "text": "Fraud: 0.173% 492\nValid: 99.827% 284315\nX.shape (100492, 28) y.shape (100492,)\nFraud: 0.644% 647\nValid: 99.356% 99845\n" + "text": [ + "Fraud: 0.173% 492\n", + "Valid: 99.827% 284315\n", + "X.shape (100492, 28) y.shape (100492,)\n", + "Fraud: 0.652% 655\n", + "Valid: 99.348% 99837\n" + ] } ], "source": [ @@ -129,12 +135,14 @@ { "output_type": "stream", "name": "stdout", - "text": "Score Train: 0.9985784146480154\nScore Test: 0.9981093273185617\nTook 73.27 seconds\n" + "text": [ + "Score Train: 0.9985073353804162\nScore Test: 0.9983746848878864\nTook 35.80 seconds\n" + ] } ], "source": [ "now = time.time()\n", - "clf = Stree(max_depth=3, random_state=random_state)\n", + "clf = Stree(max_depth=3, random_state=random_state, max_iter=1e3)\n", "clf.fit(Xtrain, ytrain)\n", "print(\"Score Train: \", clf.score(Xtrain, ytrain))\n", "print(\"Score Test: \", clf.score(Xtest, ytest))\n", @@ -169,13 +177,17 @@ { "output_type": "stream", "name": "stdout", - "text": "Kernel: linear\tTime: 93.78 seconds\tScore Train: 0.9983083\tScore Test: 0.9983083\nKernel: rbf\tTime: 18.32 seconds\tScore Train: 0.9935602\tScore Test: 0.9935651\nKernel: poly\tTime: 69.68 seconds\tScore Train: 0.9973132\tScore Test: 0.9972801\n" + "text": [ + "Kernel: linear\tTime: 49.66 seconds\tScore Train: 0.9983225\tScore Test: 0.9983083\n", + "Kernel: rbf\tTime: 12.73 seconds\tScore Train: 0.9934891\tScore Test: 0.9934656\n", + "Kernel: poly\tTime: 76.24 seconds\tScore Train: 0.9972706\tScore Test: 0.9969152\n" + ] } ], "source": [ "for kernel in ['linear', 'rbf', 'poly']:\n", " now = time.time()\n", - " clf = AdaBoostClassifier(base_estimator=Stree(C=C, kernel=kernel, max_depth=max_depth, random_state=random_state), algorithm=\"SAMME\", n_estimators=n_estimators, random_state=random_state)\n", + " clf = AdaBoostClassifier(base_estimator=Stree(C=C, kernel=kernel, max_depth=max_depth, random_state=random_state, max_iter=1e3), algorithm=\"SAMME\", n_estimators=n_estimators, random_state=random_state)\n", " clf.fit(Xtrain, ytrain)\n", " score_train = clf.score(Xtrain, ytrain)\n", " score_test = clf.score(Xtest, ytest)\n", @@ -210,13 +222,17 @@ { "output_type": "stream", "name": "stdout", - "text": "Kernel: linear\tTime: 387.06 seconds\tScore Train: 0.9985784\tScore Test: 0.9981093\nKernel: rbf\tTime: 144.00 seconds\tScore Train: 0.9992750\tScore Test: 0.9983415\nKernel: poly\tTime: 101.78 seconds\tScore Train: 0.9992466\tScore Test: 0.9981757\n" + "text": [ + "Kernel: linear\tTime: 231.51 seconds\tScore Train: 0.9984931\tScore Test: 0.9983083\n", + "Kernel: rbf\tTime: 114.77 seconds\tScore Train: 0.9992323\tScore Test: 0.9983083\n", + "Kernel: poly\tTime: 67.87 seconds\tScore Train: 0.9993319\tScore Test: 0.9985074\n" + ] } ], "source": [ "for kernel in ['linear', 'rbf', 'poly']:\n", " now = time.time()\n", - " clf = BaggingClassifier(base_estimator=Stree(C=C, kernel=kernel, max_depth=max_depth, random_state=random_state), n_estimators=n_estimators, random_state=random_state)\n", + " clf = BaggingClassifier(base_estimator=Stree(C=C, kernel=kernel, max_depth=max_depth, random_state=random_state, max_iter=1e3), n_estimators=n_estimators, random_state=random_state)\n", " clf.fit(Xtrain, ytrain)\n", " score_train = clf.score(Xtrain, ytrain)\n", " score_test = clf.score(Xtest, ytest)\n", @@ -235,12 +251,12 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.6-final" + "version": "3.8.4-final" }, "orig_nbformat": 2, "kernelspec": { - "name": "python37664bitgeneralvenve3128601eb614c5da59c5055670b6040", - "display_name": "Python 3.7.6 64-bit ('general': venv)" + "name": "python38464bitgeneralf6de308d3831407c8bd68d4a5e328a38", + "display_name": "Python 3.8.4 64-bit ('general')" } }, "nbformat": 4, diff --git a/notebooks/features.ipynb b/notebooks/features.ipynb index 13508af..297bad7 100644 --- a/notebooks/features.ipynb +++ b/notebooks/features.ipynb @@ -68,7 +68,9 @@ { "output_type": "stream", "name": "stdout", - "text": "Fraud: 0.173% 492\nValid: 99.827% 284315\nX.shape (5492, 28) y.shape (5492,)\nFraud: 9.086% 499\nValid: 90.914% 4993\n[0.09157128 0.09157128 0.09157128 0.09157128] [0.08919903 0.08919903 0.08919903 0.08919903]\n" + "text": [ + "Fraud: 0.173% 492\nValid: 99.827% 284315\nX.shape (5492, 28) y.shape (5492,)\nFraud: 9.141% 502\nValid: 90.859% 4990\n[0.09183143 0.09183143 0.09183143 0.09183143] [0.09041262 0.09041262 0.09041262 0.09041262]\n" + ] } ], "source": [ @@ -148,7 +150,12 @@ { "output_type": "stream", "name": "stdout", - "text": "Accuracy of Train without weights 0.9875130072840791\nAccuracy of Train with weights 0.9919354838709677\nAccuracy of Tests without weights 0.9866504854368932\nAccuracy of Tests with weights 0.9872572815533981\n" + "text": [ + "Accuracy of Train without weights 0.9851716961498439\n", + "Accuracy of Train with weights 0.986732570239334\n", + "Accuracy of Tests without weights 0.9866504854368932\n", + "Accuracy of Tests with weights 0.9781553398058253\n" + ] } ], "source": [ @@ -177,7 +184,11 @@ { "output_type": "stream", "name": "stdout", - "text": "Time: 1.45s\tKernel: linear\tAccuracy_train: 0.9854318418314256\tAccuracy_test: 0.9842233009708737\nTime: 0.50s\tKernel: rbf\tAccuracy_train: 0.9940166493236212\tAccuracy_test: 0.9908980582524272\nTime: 0.42s\tKernel: poly\tAccuracy_train: 0.9945369406867846\tAccuracy_test: 0.9872572815533981\n" + "text": [ + "Time: 26.03s\tKernel: linear\tAccuracy_train: 0.9851716961498439\tAccuracy_test: 0.9866504854368932\n", + "Time: 0.54s\tKernel: rbf\tAccuracy_train: 0.9947970863683663\tAccuracy_test: 0.9878640776699029\n", + "Time: 0.43s\tKernel: poly\tAccuracy_train: 0.9960978147762747\tAccuracy_test: 0.9854368932038835\n" + ] } ], "source": [ @@ -208,7 +219,59 @@ { "output_type": "stream", "name": "stdout", - "text": "************** C=0.001 ****************************\nClassifier's accuracy (train): 0.9826\nClassifier's accuracy (test) : 0.9854\nroot feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1664\nroot - Down, - Leaf class=1 belief= 0.986348 impurity=0.0269 counts=(array([0, 1]), array([ 4, 289]))\nroot - Up, - Leaf class=0 belief= 0.982259 impurity=0.0349 counts=(array([0, 1]), array([3488, 63]))\n\n**************************************************\n************** C=0.01 ****************************\nClassifier's accuracy (train): 0.9826\nClassifier's accuracy (test) : 0.9860\nroot feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1664\nroot - Down, - Leaf class=1 belief= 0.986348 impurity=0.0269 counts=(array([0, 1]), array([ 4, 289]))\nroot - Up, - Leaf class=0 belief= 0.982259 impurity=0.0349 counts=(array([0, 1]), array([3488, 63]))\n\n**************************************************\n************** C=1 ****************************\nClassifier's accuracy (train): 0.9841\nClassifier's accuracy (test) : 0.9848\nroot feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1664\nroot - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0268\nroot - Down - Down, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([286]))\nroot - Down - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4938\nroot - Down - Up - Down, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([5]))\nroot - Down - Up - Up, - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([4]))\nroot - Up, - Leaf class=0 belief= 0.982812 impurity=0.0338 counts=(array([0, 1]), array([3488, 61]))\n\n**************************************************\n************** C=5 ****************************\nClassifier's accuracy (train): 0.9854\nClassifier's accuracy (test) : 0.9873\nroot feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1664\nroot - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0199\nroot - Down - Down, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([295]))\nroot - Down - Up, - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([3]))\nroot - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0316\nroot - Up - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.5000\nroot - Up - Down - Down, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([1]))\nroot - Up - Down - Up, - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([1]))\nroot - Up - Up, - Leaf class=0 belief= 0.984199 impurity=0.0311 counts=(array([0, 1]), array([3488, 56]))\n\n**************************************************\n************** C=17 ****************************\nClassifier's accuracy (train): 0.9841\nClassifier's accuracy (test) : 0.9836\nroot feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1664\nroot - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0524\nroot - Down - Down, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([287]))\nroot - Down - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.3200\nroot - Down - Up - Down, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([2]))\nroot - Down - Up - Up, - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([8]))\nroot - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0349\nroot - Up - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.2975\nroot - Up - Down - Down, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([2]))\nroot - Up - Down - Up, - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([9]))\nroot - Up - Up, - Leaf class=0 belief= 0.982749 impurity=0.0339 counts=(array([0, 1]), array([3475, 61]))\n\n**************************************************\n2.3880 secs\n" + "text": [ + "************** C=0.001 ****************************\n", + "Classifier's accuracy (train): 0.9828\n", + "Classifier's accuracy (test) : 0.9848\n", + "root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4426 counts=(array([0, 1]), array([3491, 353]))\n", + "root - Down, - Leaf class=0 belief= 0.981716 impurity=0.1317 counts=(array([0, 1]), array([3490, 65]))\n", + "root - Up, - Leaf class=1 belief= 0.996540 impurity=0.0333 counts=(array([0, 1]), array([ 1, 288]))\n", + "\n", + "**************************************************\n", + "************** C=0.01 ****************************\n", + "Classifier's accuracy (train): 0.9834\n", + "Classifier's accuracy (test) : 0.9854\n", + "root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4426 counts=(array([0, 1]), array([3491, 353]))\n", + "root - Down, - Leaf class=0 belief= 0.982269 impurity=0.1285 counts=(array([0, 1]), array([3490, 63]))\n", + "root - Up, - Leaf class=1 belief= 0.996564 impurity=0.0331 counts=(array([0, 1]), array([ 1, 290]))\n", + "\n", + "**************************************************\n", + "************** C=1 ****************************\n", + "Classifier's accuracy (train): 0.9847\n", + "Classifier's accuracy (test) : 0.9867\n", + "root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4426 counts=(array([0, 1]), array([3491, 353]))\n", + "root - Down, - Leaf class=0 belief= 0.983371 impurity=0.1221 counts=(array([0, 1]), array([3489, 59]))\n", + "root - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0584 counts=(array([0, 1]), array([ 2, 294]))\n", + "root - Up - Down, - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([2]))\n", + "root - Up - Up, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([294]))\n", + "\n", + "**************************************************\n", + "************** C=5 ****************************\n", + "Classifier's accuracy (train): 0.9852\n", + "Classifier's accuracy (test) : 0.9867\n", + "root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4426 counts=(array([0, 1]), array([3491, 353]))\n", + "root - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1205 counts=(array([0, 1]), array([3488, 58]))\n", + "root - Down - Down, - Leaf class=0 belief= 0.983921 impurity=0.1188 counts=(array([0, 1]), array([3488, 57]))\n", + "root - Down - Up, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([1]))\n", + "root - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0812 counts=(array([0, 1]), array([ 3, 295]))\n", + "root - Up - Down, - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([3]))\n", + "root - Up - Up, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([295]))\n", + "\n", + "**************************************************\n", + "************** C=17 ****************************\n", + "Classifier's accuracy (train): 0.9852\n", + "Classifier's accuracy (test) : 0.9867\n", + "root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4426 counts=(array([0, 1]), array([3491, 353]))\n", + "root - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1205 counts=(array([0, 1]), array([3488, 58]))\n", + "root - Down - Down, - Leaf class=0 belief= 0.983921 impurity=0.1188 counts=(array([0, 1]), array([3488, 57]))\n", + "root - Down - Up, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([1]))\n", + "root - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0812 counts=(array([0, 1]), array([ 3, 295]))\n", + "root - Up - Down, - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([3]))\n", + "root - Up - Up, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([295]))\n", + "\n", + "**************************************************\n", + "64.5792 secs\n" + ] } ], "source": [ @@ -242,7 +305,9 @@ { "output_type": "stream", "name": "stdout", - "text": "root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1664\nroot - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0524\nroot - Down - Down, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([287]))\nroot - Down - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.3200\nroot - Down - Up - Down, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([2]))\nroot - Down - Up - Up, - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([8]))\nroot - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0349\nroot - Up - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.2975\nroot - Up - Down - Down, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([2]))\nroot - Up - Down - Up, - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([9]))\nroot - Up - Up, - Leaf class=0 belief= 0.982749 impurity=0.0339 counts=(array([0, 1]), array([3475, 61]))\n" + "text": [ + "root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4426 counts=(array([0, 1]), array([3491, 353]))\nroot - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1205 counts=(array([0, 1]), array([3488, 58]))\nroot - Down - Down, - Leaf class=0 belief= 0.983921 impurity=0.1188 counts=(array([0, 1]), array([3488, 57]))\nroot - Down - Up, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([1]))\nroot - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0812 counts=(array([0, 1]), array([ 3, 295]))\nroot - Up - Down, - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([3]))\nroot - Up - Up, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([295]))\n" + ] } ], "source": [ @@ -261,7 +326,9 @@ { "output_type": "stream", "name": "stdout", - "text": "root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1664\nroot - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0524\nroot - Down - Down, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([287]))\nroot - Down - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.3200\nroot - Down - Up - Down, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([2]))\nroot - Down - Up - Up, - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([8]))\nroot - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0349\nroot - Up - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.2975\nroot - Up - Down - Down, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([2]))\nroot - Up - Down - Up, - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([9]))\nroot - Up - Up, - Leaf class=0 belief= 0.982749 impurity=0.0339 counts=(array([0, 1]), array([3475, 61]))\n" + "text": [ + "root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4426 counts=(array([0, 1]), array([3491, 353]))\nroot - Down feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1205 counts=(array([0, 1]), array([3488, 58]))\nroot - Down - Down, - Leaf class=0 belief= 0.983921 impurity=0.1188 counts=(array([0, 1]), array([3488, 57]))\nroot - Down - Up, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([1]))\nroot - Up feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.0812 counts=(array([0, 1]), array([ 3, 295]))\nroot - Up - Down, - Leaf class=0 belief= 1.000000 impurity=0.0000 counts=(array([0]), array([3]))\nroot - Up - Up, - Leaf class=1 belief= 1.000000 impurity=0.0000 counts=(array([1]), array([295]))\n" + ] } ], "source": [ @@ -287,7 +354,51 @@ { "output_type": "stream", "name": "stdout", - "text": "1 functools.partial(, 'Stree')\n2 functools.partial(, 'Stree')\n3 functools.partial(, 'Stree')\n4 functools.partial(, 'Stree')\n5 functools.partial(, 'Stree')\n6 functools.partial(, 'Stree')\n7 functools.partial(, 'Stree')\n8 functools.partial(, 'Stree')\n9 functools.partial(, 'Stree')\n10 functools.partial(, 'Stree', readonly_memmap=True)\n11 functools.partial(, 'Stree')\n12 functools.partial(, 'Stree')\n13 functools.partial(, 'Stree')\n14 functools.partial(, 'Stree')\n15 functools.partial(, 'Stree')\n16 functools.partial(, 'Stree')\n17 functools.partial(, 'Stree')\n18 functools.partial(, 'Stree')\n19 functools.partial(, 'Stree')\n20 functools.partial(, 'Stree')\n21 functools.partial(, 'Stree')\n22 functools.partial(, 'Stree')\n23 functools.partial(, 'Stree')\n24 functools.partial(, 'Stree', readonly_memmap=True)\n25 functools.partial(, 'Stree', readonly_memmap=True, X_dtype='float32')\n26 functools.partial(, 'Stree')\n27 functools.partial(, 'Stree')\n28 functools.partial(, 'Stree')\n29 functools.partial(, 'Stree')\n30 functools.partial(, 'Stree')\n31 functools.partial(, 'Stree')\n32 functools.partial(, 'Stree')\n33 functools.partial(, 'Stree')\n34 functools.partial(, 'Stree')\n35 functools.partial(, 'Stree')\n36 functools.partial(, 'Stree')\n37 functools.partial(, 'Stree')\n38 functools.partial(, 'Stree')\n39 functools.partial(, 'Stree')\n40 functools.partial(, 'Stree')\n41 functools.partial(, 'Stree')\n42 functools.partial(, 'Stree')\n43 functools.partial(, 'Stree')\n" + "text": [ + "1 functools.partial(, 'Stree')\n", + "2 functools.partial(, 'Stree')\n", + "3 functools.partial(, 'Stree')\n", + "4 functools.partial(, 'Stree')\n", + "5 functools.partial(, 'Stree')\n", + "6 functools.partial(, 'Stree')\n", + "7 functools.partial(, 'Stree')\n", + "8 functools.partial(, 'Stree')\n", + "9 functools.partial(, 'Stree')\n", + "10 functools.partial(, 'Stree', readonly_memmap=True)\n", + "11 functools.partial(, 'Stree')\n", + "12 functools.partial(, 'Stree')\n", + "13 functools.partial(, 'Stree')\n", + "14 functools.partial(, 'Stree')\n", + "15 functools.partial(, 'Stree')\n", + "16 functools.partial(, 'Stree')\n", + "17 functools.partial(, 'Stree')\n", + "18 functools.partial(, 'Stree')\n", + "19 functools.partial(, 'Stree')\n", + "20 functools.partial(, 'Stree')\n", + "21 functools.partial(, 'Stree')\n", + "22 functools.partial(, 'Stree')\n", + "23 functools.partial(, 'Stree')\n", + "24 functools.partial(, 'Stree', readonly_memmap=True)\n", + "25 functools.partial(, 'Stree', readonly_memmap=True, X_dtype='float32')\n", + "26 functools.partial(, 'Stree')\n", + "27 functools.partial(, 'Stree')\n", + "28 functools.partial(, 'Stree')\n", + "29 functools.partial(, 'Stree')\n", + "30 functools.partial(, 'Stree')\n", + "31 functools.partial(, 'Stree')\n", + "32 functools.partial(, 'Stree')\n", + "33 functools.partial(, 'Stree')\n", + "34 functools.partial(, 'Stree')\n", + "35 functools.partial(, 'Stree')\n", + "36 functools.partial(, 'Stree')\n", + "37 functools.partial(, 'Stree')\n", + "38 functools.partial(, 'Stree')\n", + "39 functools.partial(, 'Stree')\n", + "40 functools.partial(, 'Stree')\n", + "41 functools.partial(, 'Stree')\n", + "42 functools.partial(, 'Stree')\n", + "43 functools.partial(, 'Stree')\n" + ] } ], "source": [ @@ -327,7 +438,20 @@ { "output_type": "stream", "name": "stdout", - "text": "== Not Weighted ===\nSVC train score ..: 0.9823100936524454\nSTree train score : 0.9830905306971904\nSVC test score ...: 0.9842233009708737\nSTree test score .: 0.9860436893203883\n==== Weighted =====\nSVC train score ..: 0.9799687825182102\nSTree train score : 0.9807492195629552\nSVC test score ...: 0.9848300970873787\nSTree test score .: 0.9830097087378641\n*SVC test score ..: 0.9487167285301864\n*STree test score : 0.9538538933228189\n" + "text": [ + "== Not Weighted ===\n", + "SVC train score ..: 0.9825702393340271\n", + "STree train score : 0.9841311134235172\n", + "SVC test score ...: 0.9830097087378641\n", + "STree test score .: 0.9848300970873787\n", + "==== Weighted =====\n", + "SVC train score ..: 0.9786680541103018\n", + "STree train score : 0.9802289281997919\n", + "SVC test score ...: 0.9805825242718447\n", + "STree test score .: 0.9817961165048543\n", + "*SVC test score ..: 0.9439939825655582\n", + "*STree test score : 0.9476832429673473\n" + ] } ], "source": [ @@ -361,7 +485,9 @@ { "output_type": "stream", "name": "stdout", - "text": "root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.1664\nroot - Down, - Leaf class=1 belief= 0.888268 impurity=0.1985 counts=(array([0, 1]), array([ 40, 318]))\nroot - Up, - Leaf class=0 belief= 0.990247 impurity=0.0193 counts=(array([0, 1]), array([3452, 34]))\n\n" + "text": [ + "root feaures=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27) impurity=0.4426 counts=(array([0, 1]), array([3491, 353]))\nroot - Down, - Leaf class=0 belief= 0.990520 impurity=0.0773 counts=(array([0, 1]), array([3448, 33]))\nroot - Up, - Leaf class=1 belief= 0.881543 impurity=0.5249 counts=(array([0, 1]), array([ 43, 320]))\n\n" + ] } ], "source": [ @@ -385,7 +511,43 @@ { "output_type": "stream", "name": "stdout", - "text": "****************************************\nmax_features None = 28\nTrain score : 0.9841311134235172\nTest score .: 0.9848300970873787\nTook 0.35 seconds\n****************************************\nmax_features auto = 5\nTrain score : 0.981009365244537\nTest score .: 0.9830097087378641\nTook 0.71 seconds\n****************************************\nmax_features log2 = 4\nTrain score : 0.9763267429760666\nTest score .: 0.9775485436893204\nTook 0.32 seconds\n****************************************\nmax_features 7 = 7\nTrain score : 0.9750260145681582\nTest score .: 0.9775485436893204\nTook 2.35 seconds\n****************************************\nmax_features 0.5 = 14\nTrain score : 0.9820499479708636\nTest score .: 0.9824029126213593\nTook 84.80 seconds\n****************************************\nmax_features 0.1 = 2\nTrain score : 0.9513527575442248\nTest score .: 0.9526699029126213\nTook 0.25 seconds\n****************************************\nmax_features 0.7 = 19\nTrain score : 0.9841311134235172\nTest score .: 0.9830097087378641\nTook 14.16 seconds\n" + "text": [ + "****************************************\n", + "max_features None = 28\n", + "Train score : 0.9846514047866806\n", + "Test score .: 0.9866504854368932\n", + "Took 10.18 seconds\n", + "****************************************\n", + "max_features auto = 5\n", + "Train score : 0.9836108220603538\n", + "Test score .: 0.9842233009708737\n", + "Took 5.22 seconds\n", + "****************************************\n", + "max_features log2 = 4\n", + "Train score : 0.9791883454734651\n", + "Test score .: 0.9793689320388349\n", + "Took 2.05 seconds\n", + "****************************************\n", + "max_features 7 = 7\n", + "Train score : 0.9737252861602498\n", + "Test score .: 0.9739077669902912\n", + "Took 2.86 seconds\n", + "****************************************\n", + "max_features 0.5 = 14\n", + "Train score : 0.981789802289282\n", + "Test score .: 0.9824029126213593\n", + "Took 48.35 seconds\n", + "****************************************\n", + "max_features 0.1 = 2\n", + "Train score : 0.9638397502601457\n", + "Test score .: 0.9648058252427184\n", + "Took 0.35 seconds\n", + "****************************************\n", + "max_features 0.7 = 19\n", + "Train score : 0.9841311134235172\n", + "Test score .: 0.9860436893203883\n", + "Took 20.89 seconds\n" + ] } ], "source": [ @@ -417,7 +579,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.6-final" + "version": "3.8.4-final" } }, "nbformat": 4, diff --git a/notebooks/gridsearch.ipynb b/notebooks/gridsearch.ipynb index fe7402a..221e063 100644 --- a/notebooks/gridsearch.ipynb +++ b/notebooks/gridsearch.ipynb @@ -113,7 +113,9 @@ { "output_type": "stream", "name": "stdout", - "text": "Fraud: 0.173% 492\nValid: 99.827% 284315\nX.shape (1492, 28) y.shape (1492,)\nFraud: 32.976% 492\nValid: 67.024% 1000\n" + "text": [ + "Fraud: 0.173% 492\nValid: 99.827% 284315\nX.shape (1492, 28) y.shape (1492,)\nFraud: 33.177% 495\nValid: 66.823% 997\n" + ] } ] }, @@ -132,15 +134,38 @@ "colab": {} }, "source": [ - "parameters = {\n", + "parameters = [{\n", " 'base_estimator': [Stree()],\n", " 'n_estimators': [10, 25],\n", " 'learning_rate': [.5, 1],\n", + " 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n", " 'base_estimator__tol': [.1, 1e-02],\n", - " 'base_estimator__max_depth': [3, 5],\n", - " 'base_estimator__C': [7, 55],\n", - " 'base_estimator__kernel': ['linear', 'poly', 'rbf']\n", - "}" + " 'base_estimator__max_depth': [3, 5, 7],\n", + " 'base_estimator__C': [1, 7, 55],\n", + " 'base_estimator__kernel': ['linear']\n", + "},\n", + "{\n", + " 'base_estimator': [Stree()],\n", + " 'n_estimators': [10, 25],\n", + " 'learning_rate': [.5, 1],\n", + " 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n", + " 'base_estimator__tol': [.1, 1e-02],\n", + " 'base_estimator__max_depth': [3, 5, 7],\n", + " 'base_estimator__C': [1, 7, 55],\n", + " 'base_estimator__degree': [3, 5, 7],\n", + " 'base_estimator__kernel': ['poly']\n", + "},\n", + "{\n", + " 'base_estimator': [Stree()],\n", + " 'n_estimators': [10, 25],\n", + " 'learning_rate': [.5, 1],\n", + " 'base_estimator__split_criteria': ['max_samples', 'impurity'],\n", + " 'base_estimator__tol': [.1, 1e-02],\n", + " 'base_estimator__max_depth': [3, 5, 7],\n", + " 'base_estimator__C': [1, 7, 55],\n", + " 'base_estimator__gamma': [.1, 1, 10],\n", + " 'base_estimator__kernel': ['rbf']\n", + "}]" ], "execution_count": 5, "outputs": [] @@ -153,7 +178,21 @@ { "output_type": "execute_result", "data": { - "text/plain": "{'C': 1.0,\n 'criterion': 'gini',\n 'degree': 3,\n 'gamma': 'scale',\n 'kernel': 'linear',\n 'max_depth': None,\n 'max_features': None,\n 'max_iter': 1000,\n 'min_samples_split': 0,\n 'random_state': None,\n 'split_criteria': 'max_samples',\n 'splitter': 'random',\n 'tol': 0.0001}" + "text/plain": [ + "{'C': 1.0,\n", + " 'criterion': 'entropy',\n", + " 'degree': 3,\n", + " 'gamma': 'scale',\n", + " 'kernel': 'linear',\n", + " 'max_depth': None,\n", + " 'max_features': None,\n", + " 'max_iter': 100000.0,\n", + " 'min_samples_split': 0,\n", + " 'random_state': None,\n", + " 'split_criteria': 'impurity',\n", + " 'splitter': 'random',\n", + " 'tol': 0.0001}" + ] }, "metadata": {}, "execution_count": 6 @@ -183,18 +222,156 @@ { "output_type": "stream", "name": "stdout", - "text": "Fitting 5 folds for each of 96 candidates, totalling 480 fits\n[Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers.\n[Parallel(n_jobs=-1)]: Done 2 tasks | elapsed: 2.0s\n[Parallel(n_jobs=-1)]: Done 9 tasks | elapsed: 2.4s\n[Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 2.7s\n[Parallel(n_jobs=-1)]: Done 25 tasks | elapsed: 3.3s\n[Parallel(n_jobs=-1)]: Done 34 tasks | elapsed: 4.3s\n[Parallel(n_jobs=-1)]: Done 45 tasks | elapsed: 5.3s\n[Parallel(n_jobs=-1)]: Done 56 tasks | elapsed: 6.6s\n[Parallel(n_jobs=-1)]: Done 69 tasks | elapsed: 8.1s\n[Parallel(n_jobs=-1)]: Done 82 tasks | elapsed: 9.4s\n[Parallel(n_jobs=-1)]: Done 97 tasks | elapsed: 10.1s\n[Parallel(n_jobs=-1)]: Done 112 tasks | elapsed: 11.1s\n[Parallel(n_jobs=-1)]: Done 129 tasks | elapsed: 12.3s\n[Parallel(n_jobs=-1)]: Done 146 tasks | elapsed: 13.6s\n[Parallel(n_jobs=-1)]: Done 165 tasks | elapsed: 14.9s\n[Parallel(n_jobs=-1)]: Done 184 tasks | elapsed: 16.2s\n[Parallel(n_jobs=-1)]: Done 205 tasks | elapsed: 17.6s\n[Parallel(n_jobs=-1)]: Done 226 tasks | elapsed: 19.1s\n[Parallel(n_jobs=-1)]: Done 249 tasks | elapsed: 21.6s\n[Parallel(n_jobs=-1)]: Done 272 tasks | elapsed: 25.9s\n[Parallel(n_jobs=-1)]: Done 297 tasks | elapsed: 30.4s\n[Parallel(n_jobs=-1)]: Done 322 tasks | elapsed: 36.7s\n[Parallel(n_jobs=-1)]: Done 349 tasks | elapsed: 38.1s\n[Parallel(n_jobs=-1)]: Done 376 tasks | elapsed: 39.6s\n[Parallel(n_jobs=-1)]: Done 405 tasks | elapsed: 41.9s\n[Parallel(n_jobs=-1)]: Done 434 tasks | elapsed: 44.9s\n[Parallel(n_jobs=-1)]: Done 465 tasks | elapsed: 48.2s\n[Parallel(n_jobs=-1)]: Done 480 out of 480 | elapsed: 49.2s finished\n" + "text": [ + "Fitting 5 folds for each of 1008 candidates, totalling 5040 fits\n", + "[Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers.\n", + "[Parallel(n_jobs=-1)]: Done 2 tasks | elapsed: 2.6s\n", + "[Parallel(n_jobs=-1)]: Done 9 tasks | elapsed: 3.2s\n", + "[Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 3.5s\n", + "[Parallel(n_jobs=-1)]: Done 25 tasks | elapsed: 4.0s\n", + "[Parallel(n_jobs=-1)]: Done 34 tasks | elapsed: 4.5s\n", + "[Parallel(n_jobs=-1)]: Done 45 tasks | elapsed: 5.0s\n", + "[Parallel(n_jobs=-1)]: Done 56 tasks | elapsed: 5.5s\n", + "[Parallel(n_jobs=-1)]: Done 69 tasks | elapsed: 6.2s\n", + "[Parallel(n_jobs=-1)]: Done 82 tasks | elapsed: 7.1s\n", + "[Parallel(n_jobs=-1)]: Done 97 tasks | elapsed: 8.2s\n", + "[Parallel(n_jobs=-1)]: Done 112 tasks | elapsed: 9.6s\n", + "[Parallel(n_jobs=-1)]: Done 129 tasks | elapsed: 11.0s\n", + "[Parallel(n_jobs=-1)]: Done 146 tasks | elapsed: 12.5s\n", + "[Parallel(n_jobs=-1)]: Done 165 tasks | elapsed: 14.3s\n", + "[Parallel(n_jobs=-1)]: Done 184 tasks | elapsed: 16.0s\n", + "[Parallel(n_jobs=-1)]: Done 205 tasks | elapsed: 18.1s\n", + "[Parallel(n_jobs=-1)]: Done 226 tasks | elapsed: 20.1s\n", + "[Parallel(n_jobs=-1)]: Done 249 tasks | elapsed: 21.9s\n", + "[Parallel(n_jobs=-1)]: Done 272 tasks | elapsed: 23.4s\n", + "[Parallel(n_jobs=-1)]: Done 297 tasks | elapsed: 24.9s\n", + "[Parallel(n_jobs=-1)]: Done 322 tasks | elapsed: 26.6s\n", + "[Parallel(n_jobs=-1)]: Done 349 tasks | elapsed: 29.3s\n", + "[Parallel(n_jobs=-1)]: Done 376 tasks | elapsed: 31.9s\n", + "[Parallel(n_jobs=-1)]: Done 405 tasks | elapsed: 35.5s\n", + "[Parallel(n_jobs=-1)]: Done 434 tasks | elapsed: 38.7s\n", + "[Parallel(n_jobs=-1)]: Done 465 tasks | elapsed: 42.1s\n", + "[Parallel(n_jobs=-1)]: Done 496 tasks | elapsed: 46.1s\n", + "[Parallel(n_jobs=-1)]: Done 529 tasks | elapsed: 52.7s\n", + "[Parallel(n_jobs=-1)]: Done 562 tasks | elapsed: 58.1s\n", + "[Parallel(n_jobs=-1)]: Done 597 tasks | elapsed: 1.1min\n", + "[Parallel(n_jobs=-1)]: Done 632 tasks | elapsed: 1.3min\n", + "[Parallel(n_jobs=-1)]: Done 669 tasks | elapsed: 1.5min\n", + "[Parallel(n_jobs=-1)]: Done 706 tasks | elapsed: 1.6min\n", + "[Parallel(n_jobs=-1)]: Done 745 tasks | elapsed: 1.7min\n", + "[Parallel(n_jobs=-1)]: Done 784 tasks | elapsed: 1.8min\n", + "[Parallel(n_jobs=-1)]: Done 825 tasks | elapsed: 1.8min\n", + "[Parallel(n_jobs=-1)]: Done 866 tasks | elapsed: 1.8min\n", + "[Parallel(n_jobs=-1)]: Done 909 tasks | elapsed: 1.9min\n", + "[Parallel(n_jobs=-1)]: Done 952 tasks | elapsed: 1.9min\n", + "[Parallel(n_jobs=-1)]: Done 997 tasks | elapsed: 2.0min\n", + "[Parallel(n_jobs=-1)]: Done 1042 tasks | elapsed: 2.0min\n", + "[Parallel(n_jobs=-1)]: Done 1089 tasks | elapsed: 2.1min\n", + "[Parallel(n_jobs=-1)]: Done 1136 tasks | elapsed: 2.2min\n", + "[Parallel(n_jobs=-1)]: Done 1185 tasks | elapsed: 2.2min\n", + "[Parallel(n_jobs=-1)]: Done 1234 tasks | elapsed: 2.3min\n", + "[Parallel(n_jobs=-1)]: Done 1285 tasks | elapsed: 2.4min\n", + "[Parallel(n_jobs=-1)]: Done 1336 tasks | elapsed: 2.4min\n", + "[Parallel(n_jobs=-1)]: Done 1389 tasks | elapsed: 2.5min\n", + "[Parallel(n_jobs=-1)]: Done 1442 tasks | elapsed: 2.6min\n", + "[Parallel(n_jobs=-1)]: Done 1497 tasks | elapsed: 2.6min\n", + "[Parallel(n_jobs=-1)]: Done 1552 tasks | elapsed: 2.7min\n", + "[Parallel(n_jobs=-1)]: Done 1609 tasks | elapsed: 2.8min\n", + "[Parallel(n_jobs=-1)]: Done 1666 tasks | elapsed: 2.8min\n", + "[Parallel(n_jobs=-1)]: Done 1725 tasks | elapsed: 2.9min\n", + "[Parallel(n_jobs=-1)]: Done 1784 tasks | elapsed: 3.0min\n", + "[Parallel(n_jobs=-1)]: Done 1845 tasks | elapsed: 3.0min\n", + "[Parallel(n_jobs=-1)]: Done 1906 tasks | elapsed: 3.1min\n", + "[Parallel(n_jobs=-1)]: Done 1969 tasks | elapsed: 3.2min\n", + "[Parallel(n_jobs=-1)]: Done 2032 tasks | elapsed: 3.3min\n", + "[Parallel(n_jobs=-1)]: Done 2097 tasks | elapsed: 3.3min\n", + "[Parallel(n_jobs=-1)]: Done 2162 tasks | elapsed: 3.4min\n", + "[Parallel(n_jobs=-1)]: Done 2229 tasks | elapsed: 3.5min\n", + "[Parallel(n_jobs=-1)]: Done 2296 tasks | elapsed: 3.6min\n", + "[Parallel(n_jobs=-1)]: Done 2365 tasks | elapsed: 3.6min\n", + "[Parallel(n_jobs=-1)]: Done 2434 tasks | elapsed: 3.7min\n", + "[Parallel(n_jobs=-1)]: Done 2505 tasks | elapsed: 3.8min\n", + "[Parallel(n_jobs=-1)]: Done 2576 tasks | elapsed: 3.8min\n", + "[Parallel(n_jobs=-1)]: Done 2649 tasks | elapsed: 3.9min\n", + "[Parallel(n_jobs=-1)]: Done 2722 tasks | elapsed: 4.0min\n", + "[Parallel(n_jobs=-1)]: Done 2797 tasks | elapsed: 4.1min\n", + "[Parallel(n_jobs=-1)]: Done 2872 tasks | elapsed: 4.2min\n", + "[Parallel(n_jobs=-1)]: Done 2949 tasks | elapsed: 4.3min\n", + "[Parallel(n_jobs=-1)]: Done 3026 tasks | elapsed: 4.5min\n", + "[Parallel(n_jobs=-1)]: Done 3105 tasks | elapsed: 4.7min\n", + "[Parallel(n_jobs=-1)]: Done 3184 tasks | elapsed: 4.9min\n", + "[Parallel(n_jobs=-1)]: Done 3265 tasks | elapsed: 5.0min\n", + "[Parallel(n_jobs=-1)]: Done 3346 tasks | elapsed: 5.2min\n", + "[Parallel(n_jobs=-1)]: Done 3429 tasks | elapsed: 5.4min\n", + "[Parallel(n_jobs=-1)]: Done 3512 tasks | elapsed: 5.6min\n", + "[Parallel(n_jobs=-1)]: Done 3597 tasks | elapsed: 5.9min\n", + "[Parallel(n_jobs=-1)]: Done 3682 tasks | elapsed: 6.1min\n", + "[Parallel(n_jobs=-1)]: Done 3769 tasks | elapsed: 6.3min\n", + "[Parallel(n_jobs=-1)]: Done 3856 tasks | elapsed: 6.6min\n", + "[Parallel(n_jobs=-1)]: Done 3945 tasks | elapsed: 6.9min\n", + "[Parallel(n_jobs=-1)]: Done 4034 tasks | elapsed: 7.1min\n", + "[Parallel(n_jobs=-1)]: Done 4125 tasks | elapsed: 7.4min\n", + "[Parallel(n_jobs=-1)]: Done 4216 tasks | elapsed: 7.6min\n", + "[Parallel(n_jobs=-1)]: Done 4309 tasks | elapsed: 7.8min\n", + "[Parallel(n_jobs=-1)]: Done 4402 tasks | elapsed: 8.1min\n", + "[Parallel(n_jobs=-1)]: Done 4497 tasks | elapsed: 8.5min\n", + "[Parallel(n_jobs=-1)]: Done 4592 tasks | elapsed: 8.8min\n", + "[Parallel(n_jobs=-1)]: Done 4689 tasks | elapsed: 9.0min\n", + "[Parallel(n_jobs=-1)]: Done 4786 tasks | elapsed: 9.3min\n", + "[Parallel(n_jobs=-1)]: Done 4885 tasks | elapsed: 9.6min\n", + "[Parallel(n_jobs=-1)]: Done 4984 tasks | elapsed: 9.8min\n", + "[Parallel(n_jobs=-1)]: Done 5040 out of 5040 | elapsed: 10.0min finished\n" + ] }, { "output_type": "execute_result", "data": { - "text/plain": "GridSearchCV(estimator=AdaBoostClassifier(algorithm='SAMME', random_state=2020),\n n_jobs=-1,\n param_grid={'base_estimator': [Stree(C=55, max_depth=3, tol=0.01)],\n 'base_estimator__C': [7, 55],\n 'base_estimator__kernel': ['linear', 'poly', 'rbf'],\n 'base_estimator__max_depth': [3, 5],\n 'base_estimator__tol': [0.1, 0.01],\n 'learning_rate': [0.5, 1], 'n_estimators': [10, 25]},\n return_train_score=True, verbose=10)" + "text/plain": [ + "GridSearchCV(estimator=AdaBoostClassifier(algorithm='SAMME', random_state=2020),\n", + " n_jobs=-1,\n", + " param_grid=[{'base_estimator': [Stree(C=7, max_depth=5,\n", + " split_criteria='max_samples',\n", + " tol=0.01)],\n", + " 'base_estimator__C': [1, 7, 55],\n", + " 'base_estimator__kernel': ['linear'],\n", + " 'base_estimator__max_depth': [3, 5, 7],\n", + " 'base_estimator__split_criteria': ['max_samples',\n", + " 'impurity'],\n", + " 'base_e...\n", + " 'learning_rate': [0.5, 1], 'n_estimators': [10, 25]},\n", + " {'base_estimator': [Stree()],\n", + " 'base_estimator__C': [1, 7, 55],\n", + " 'base_estimator__gamma': [0.1, 1, 10],\n", + " 'base_estimator__kernel': ['rbf'],\n", + " 'base_estimator__max_depth': [3, 5, 7],\n", + " 'base_estimator__split_criteria': ['max_samples',\n", + " 'impurity'],\n", + " 'base_estimator__tol': [0.1, 0.01],\n", + " 'learning_rate': [0.5, 1],\n", + " 'n_estimators': [10, 25]}],\n", + " return_train_score=True, verbose=10)" + ] }, "metadata": {}, "execution_count": 7 } ] }, + { + "source": [ + "GridSearchCV(estimator=AdaBoostClassifier(algorithm='SAMME', random_state=2020),\n", + " n_jobs=-1,\n", + " param_grid={'base_estimator': [Stree(C=55, max_depth=3, tol=0.01)],\n", + " 'base_estimator__C': [7, 55],\n", + " 'base_estimator__kernel': ['linear', 'poly', 'rbf'],\n", + " 'base_estimator__max_depth': [3, 5],\n", + " 'base_estimator__tol': [0.1, 0.01],\n", + " 'learning_rate': [0.5, 1], 'n_estimators': [10, 25]},\n", + " return_train_score=True, verbose=10)" + ], + "cell_type": "markdown", + "metadata": {} + }, { "cell_type": "code", "metadata": { @@ -214,9 +391,31 @@ { "output_type": "stream", "name": "stdout", - "text": "Best estimator: AdaBoostClassifier(algorithm='SAMME',\n base_estimator=Stree(C=55, max_depth=3, tol=0.01),\n learning_rate=0.5, n_estimators=25, random_state=2020)\nBest hyperparameters: {'base_estimator': Stree(C=55, max_depth=3, tol=0.01), 'base_estimator__C': 55, 'base_estimator__kernel': 'linear', 'base_estimator__max_depth': 3, 'base_estimator__tol': 0.01, 'learning_rate': 0.5, 'n_estimators': 25}\nBest accuracy: 0.9559440559440558\n" + "text": [ + "Best estimator: AdaBoostClassifier(algorithm='SAMME',\n base_estimator=Stree(C=7, max_depth=5,\n split_criteria='max_samples',\n tol=0.01),\n learning_rate=0.5, n_estimators=25, random_state=2020)\nBest hyperparameters: {'base_estimator': Stree(C=7, max_depth=5, split_criteria='max_samples', tol=0.01), 'base_estimator__C': 7, 'base_estimator__kernel': 'linear', 'base_estimator__max_depth': 5, 'base_estimator__split_criteria': 'max_samples', 'base_estimator__tol': 0.01, 'learning_rate': 0.5, 'n_estimators': 25}\nBest accuracy: 0.9549825174825175\n" + ] } ] + }, + { + "source": [ + "Best estimator: AdaBoostClassifier(algorithm='SAMME',\n", + " base_estimator=Stree(C=55, max_depth=3, tol=0.01),\n", + " learning_rate=0.5, n_estimators=25, random_state=2020)\n", + "\n", + "Best hyperparameters: {'base_estimator': Stree(C=55, max_depth=3, tol=0.01), 'base_estimator__C': 55, 'base_estimator__kernel': 'linear', 'base_estimator__max_depth': 3, 'base_estimator__tol': 0.01, 'learning_rate': 0.5, 'n_estimators': 25}\n", + "\n", + "Best accuracy: 0.9559440559440558" + ], + "cell_type": "markdown", + "metadata": {} + }, + { + "source": [ + "0.9511547662863451" + ], + "cell_type": "markdown", + "metadata": {} } ], "metadata": { @@ -230,12 +429,12 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.6-final" + "version": "3.8.4-final" }, "orig_nbformat": 2, "kernelspec": { - "name": "python37664bitgeneralvenvfbd0a23e74cf4e778460f5ffc6761f39", - "display_name": "Python 3.7.6 64-bit ('general': venv)" + "name": "python38464bitgeneralvenv77203c0a6afd4428bd66253ef62753dc", + "display_name": "Python 3.8.4 64-bit ('general': venv)" }, "colab": { "name": "gridsearch.ipynb", diff --git a/setup.py b/setup.py index a75e418..a5c0cac 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,6 @@ import setuptools -__version__ = "0.9rc5" +__version__ = "0.9rc6" __author__ = "Ricardo Montañana Gómez" @@ -25,7 +25,7 @@ setuptools.setup( classifiers=[ "Development Status :: 4 - Beta", "License :: OSI Approved :: MIT License", - "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", "Natural Language :: English", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Intended Audience :: Science/Research", diff --git a/stree/Strees.py b/stree/Strees.py index c67308e..c88e581 100644 --- a/stree/Strees.py +++ b/stree/Strees.py @@ -57,6 +57,7 @@ class Snode: ) self._features = features self._impurity = impurity + self._partition_column: int = -1 @classmethod def copy(cls, node: "Snode") -> "Snode": @@ -69,6 +70,12 @@ class Snode: node._title, ) + def set_partition_column(self, col: int): + self._partition_column = col + + def get_partition_column(self) -> int: + return self._partition_column + def set_down(self, son): self._down = son @@ -93,9 +100,8 @@ class Snode: classes, card = np.unique(self._y, return_counts=True) if len(classes) > 1: max_card = max(card) - min_card = min(card) self._class = classes[card == max_card][0] - self._belief = max_card / (max_card + min_card) + self._belief = max_card / np.sum(card) else: self._belief = 1 try: @@ -104,24 +110,23 @@ class Snode: self._class = None def __str__(self) -> str: + count_values = np.unique(self._y, return_counts=True) if self.is_leaf(): - count_values = np.unique(self._y, return_counts=True) - result = ( + return ( f"{self._title} - Leaf class={self._class} belief=" f"{self._belief: .6f} impurity={self._impurity:.4f} " f"counts={count_values}" ) - return result else: return ( f"{self._title} feaures={self._features} impurity=" - f"{self._impurity:.4f}" + f"{self._impurity:.4f} " + f"counts={count_values}" ) class Siterator: - """Stree preorder iterator - """ + """Stree preorder iterator""" def __init__(self, tree: Snode): self._stack = [] @@ -167,20 +172,22 @@ class Splitter: f"criterion must be gini or entropy got({criterion})" ) - if criteria not in ["min_distance", "max_samples", "max_distance"]: + if criteria not in [ + "max_samples", + "impurity", + ]: raise ValueError( - "split_criteria has to be min_distance " - f"max_distance or max_samples got ({criteria})" + f"criteria has to be max_samples or impurity; got ({criteria})" ) if splitter_type not in ["random", "best"]: raise ValueError( - f"splitter must be either random or best got({splitter_type})" + f"splitter must be either random or best, got({splitter_type})" ) self.criterion_function = getattr(self, f"_{self._criterion}") self.decision_criteria = getattr(self, f"_{self._criteria}") - def impurity(self, y: np.array) -> np.array: + def partition_impurity(self, y: np.array) -> np.array: return self.criterion_function(y) @staticmethod @@ -238,7 +245,7 @@ class Splitter: node = Snode( self._clf, dataset, labels, feature_set, 0.0, "subset" ) - self.partition(dataset, node) + self.partition(dataset, node, train=True) y1, y2 = self.part(labels) gain = self.information_gain(labels, y1, y2) if gain > max_gain: @@ -265,48 +272,36 @@ class Splitter: def get_subspace( self, dataset: np.array, labels: np.array, max_features: int - ) -> list: - """Return the best subspace to make a split - """ + ) -> tuple: + """Return the best/random subspace to make a split""" indices = self._get_subspaces_set(dataset, labels, max_features) return dataset[:, indices], indices - @staticmethod - def _min_distance(data: np.array, _) -> np.array: - """Assign class to min distances + def _impurity(self, data: np.array, y: np.array) -> np.array: + """return column of dataset to be taken into account to split dataset - return a vector of classes so partition can separate class 0 from - the rest of classes, ie. class 0 goes to one splitted node and the - rest of classes go to the other :param data: distances to hyper plane of every class :type data: np.array (m, n_classes) - :param _: enable call compat with other measures - :type _: None - :return: vector with the class assigned to each sample - :rtype: np.array shape (m,) - """ - return np.argmin(data, axis=1) - - @staticmethod - def _max_distance(data: np.array, _) -> np.array: - """Assign class to max distances - - return a vector of classes so partition can separate class 0 from - the rest of classes, ie. class 0 goes to one splitted node and the - rest of classes go to the other - :param data: distances to hyper plane of every class - :type data: np.array (m, n_classes) - :param _: enable call compat with other measures - :type _: None + :param y: vector of labels (classes) + :type y: np.array (m,) :return: vector with the class assigned to each sample values - (can be 0, 1, ...) + (can be 0, 1, ...) -1 if none produces information gain :rtype: np.array shape (m,) """ - return np.argmax(data, axis=1) + max_gain = 0 + selected = -1 + for col in range(data.shape[1]): + tup = y[data[:, col] > 0] + tdn = y[data[:, col] <= 0] + info_gain = self.information_gain(y, tup, tdn) + if info_gain > max_gain: + selected = col + max_gain = info_gain + return selected @staticmethod def _max_samples(data: np.array, y: np.array) -> np.array: - """return distances of the class with more samples + """return column of dataset to be taken into account to split dataset :param data: distances to hyper plane of every class :type data: np.array (m, n_classes) @@ -317,22 +312,54 @@ class Splitter: """ # select the class with max number of samples _, samples = np.unique(y, return_counts=True) - selected = np.argmax(samples) - return data[:, selected] + return np.argmax(samples) - def partition(self, samples: np.array, node: Snode): + def partition(self, samples: np.array, node: Snode, train: bool): """Set the criteria to split arrays. Compute the indices of the samples that should go to one side of the tree (down) """ + # data contains the distances of every sample to every class hyperplane + # array of (m, nc) nc = # classes data = self._distances(node, samples) if data.shape[0] < self._min_samples_split: - self._down = np.ones((data.shape[0]), dtype=bool) + # there aren't enough samples to split + self._up = np.ones((data.shape[0]), dtype=bool) return if data.ndim > 1: # split criteria for multiclass - data = self.decision_criteria(data, node._y) - self._down = data > 0 + # Convert data to a (m, 1) array selecting values for samples + if train: + # in train time we have to compute the column to take into + # account to split the dataset + col = self.decision_criteria(data, node._y) + node.set_partition_column(col) + else: + # in predcit time just use the column computed in train time + # is taking the classifier of class + col = node.get_partition_column() + if col == -1: + # No partition is producing information gain + data = np.ones(data.shape) + data = data[:, col] + self._up = data > 0 + + def part(self, origin: np.array) -> list: + """Split an array in two based on indices (down) and its complement + partition has to be called first to establish down indices + + :param origin: dataset to split + :type origin: np.array + :param down: indices to use to split array + :type down: np.array + :return: list with two splits of the array + :rtype: list + """ + down = ~self._up + return [ + origin[self._up] if any(self._up) else None, + origin[down] if any(down) else None, + ] @staticmethod def _distances(node: Snode, data: np.ndarray) -> np.array: @@ -342,28 +369,12 @@ class Splitter: :type node: Snode :param data: samples to find out distance to hyperplane :type data: np.ndarray - :return: array of shape (m, 1) with the distances of every sample to - the hyperplane of the node + :return: array of shape (m, nc) with the distances of every sample to + the hyperplane of every class. nc = # of classes :rtype: np.array """ return node._clf.decision_function(data[:, node._features]) - def part(self, origin: np.array) -> list: - """Split an array in two based on indices (down) and its complement - - :param origin: dataset to split - :type origin: np.array - :param down: indices to use to split array - :type down: np.array - :return: list with two splits of the array - :rtype: list - """ - up = ~self._down - return [ - origin[up] if any(up) else None, - origin[self._down] if any(self._down) else None, - ] - class Stree(BaseEstimator, ClassifierMixin): """Estimator that is based on binary trees of svm nodes @@ -377,14 +388,14 @@ class Stree(BaseEstimator, ClassifierMixin): self, C: float = 1.0, kernel: str = "linear", - max_iter: int = 1000, + max_iter: int = 1e5, random_state: int = None, max_depth: int = None, tol: float = 1e-4, degree: int = 3, gamma="scale", - split_criteria: str = "max_samples", - criterion: str = "gini", + split_criteria: str = "impurity", + criterion: str = "entropy", min_samples_split: int = 0, max_features=None, splitter: str = "random", @@ -521,10 +532,10 @@ class Stree(BaseEstimator, ClassifierMixin): if np.unique(y_next).shape[0] != self.n_classes_: sample_weight += 1e-5 clf.fit(Xs, y, sample_weight=sample_weight) - impurity = self.splitter_.impurity(y) + impurity = self.splitter_.partition_impurity(y) node = Snode(clf, X, y, features, impurity, title, sample_weight) self.depth_ = max(depth, self.depth_) - self.splitter_.partition(X, node) + self.splitter_.partition(X, node, True) X_U, X_D = self.splitter_.part(X) y_u, y_d = self.splitter_.part(y) sw_u, sw_d = self.splitter_.part(sample_weight) @@ -544,8 +555,7 @@ class Stree(BaseEstimator, ClassifierMixin): return node def _build_predictor(self): - """Process the leaves to make them predictors - """ + """Process the leaves to make them predictors""" def run_tree(node: Snode): if node.is_leaf(): @@ -557,8 +567,7 @@ class Stree(BaseEstimator, ClassifierMixin): run_tree(self.tree_) def _build_clf(self): - """ Build the correct classifier for the node - """ + """Build the correct classifier for the node""" return ( LinearSVC( max_iter=self.max_iter, @@ -613,7 +622,7 @@ class Stree(BaseEstimator, ClassifierMixin): # set a class for every sample in dataset prediction = np.full((xp.shape[0], 1), node._class) return prediction, indices - self.splitter_.partition(xp, node) + self.splitter_.partition(xp, node, train=False) x_u, x_d = self.splitter_.part(xp) i_u, i_d = self.splitter_.part(indices) prx_u, prin_u = predict_class(x_u, i_u, node.get_up()) diff --git a/stree/tests/Snode_test.py b/stree/tests/Snode_test.py index 27e5d0a..b32880a 100644 --- a/stree/tests/Snode_test.py +++ b/stree/tests/Snode_test.py @@ -40,12 +40,13 @@ class Snode_test(unittest.TestCase): # Check Class class_computed = classes[card == max_card] self.assertEqual(class_computed, node._class) + # Check Partition column + self.assertEqual(node._partition_column, -1) check_leave(self._clf.tree_) def test_nodes_coefs(self): - """Check if the nodes of the tree have the right attributes filled - """ + """Check if the nodes of the tree have the right attributes filled""" def run_tree(node: Snode): if node._belief < 1: @@ -54,16 +55,19 @@ class Snode_test(unittest.TestCase): self.assertIsNotNone(node._clf.coef_) if node.is_leaf(): return - run_tree(node.get_down()) run_tree(node.get_up()) + run_tree(node.get_down()) - run_tree(self._clf.tree_) + model = Stree(self._random_state) + model.fit(*load_dataset(self._random_state, 3, 4)) + run_tree(model.tree_) def test_make_predictor_on_leaf(self): test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test") test.make_predictor() self.assertEqual(1, test._class) self.assertEqual(0.75, test._belief) + self.assertEqual(-1, test._partition_column) def test_make_predictor_on_not_leaf(self): test = Snode(None, [1, 2, 3, 4], [1, 0, 1, 1], [], 0.0, "test") @@ -71,11 +75,14 @@ class Snode_test(unittest.TestCase): test.make_predictor() self.assertIsNone(test._class) self.assertEqual(0, test._belief) + self.assertEqual(-1, test._partition_column) + self.assertEqual(-1, test.get_up()._partition_column) def test_make_predictor_on_leaf_bogus_data(self): test = Snode(None, [1, 2, 3, 4], [], [], 0.0, "test") test.make_predictor() self.assertIsNone(test._class) + self.assertEqual(-1, test._partition_column) def test_copy_node(self): px = [1, 2, 3, 4] @@ -86,3 +93,4 @@ class Snode_test(unittest.TestCase): self.assertListEqual(computed._y, py) self.assertEqual("test", computed._title) self.assertIsInstance(computed._clf, Stree) + self.assertEqual(test._partition_column, computed._partition_column) diff --git a/stree/tests/Splitter_test.py b/stree/tests/Splitter_test.py index 8417779..a0dbc96 100644 --- a/stree/tests/Splitter_test.py +++ b/stree/tests/Splitter_test.py @@ -19,7 +19,7 @@ class Splitter_test(unittest.TestCase): min_samples_split=0, splitter_type="random", criterion="gini", - criteria="min_distance", + criteria="max_samples", random_state=None, ): return Splitter( @@ -46,11 +46,7 @@ class Splitter_test(unittest.TestCase): _ = Splitter(clf=None) for splitter_type in ["best", "random"]: for criterion in ["gini", "entropy"]: - for criteria in [ - "min_distance", - "max_samples", - "max_distance", - ]: + for criteria in ["max_samples", "impurity"]: tcl = self.build( splitter_type=splitter_type, criterion=criterion, @@ -138,43 +134,37 @@ class Splitter_test(unittest.TestCase): [0.7, 0.01, -0.1], [0.7, -0.9, 0.5], [0.1, 0.2, 0.3], + [-0.1, 0.2, 0.3], + [-0.1, 0.2, 0.3], ] ) - expected = np.array([0.2, 0.01, -0.9, 0.2]) - y = [1, 2, 1, 0] + expected = data[:, 0] + y = [1, 2, 1, 0, 0, 0] computed = tcl._max_samples(data, y) - self.assertEqual((4,), computed.shape) - self.assertListEqual(expected.tolist(), computed.tolist()) + self.assertEqual(0, computed) + computed_data = data[:, computed] + self.assertEqual((6,), computed_data.shape) + self.assertListEqual(expected.tolist(), computed_data.tolist()) - def test_min_distance(self): - tcl = self.build() + def test_impurity(self): + tcl = self.build(criteria="impurity") data = np.array( [ [-0.1, 0.2, -0.3], [0.7, 0.01, -0.1], [0.7, -0.9, 0.5], [0.1, 0.2, 0.3], + [-0.1, 0.2, 0.3], + [-0.1, 0.2, 0.3], ] ) - expected = np.array([2, 2, 1, 0]) - computed = tcl._min_distance(data, None) - self.assertEqual((4,), computed.shape) - self.assertListEqual(expected.tolist(), computed.tolist()) - - def test_max_distance(self): - tcl = self.build(criteria="max_distance") - data = np.array( - [ - [-0.1, 0.2, -0.3], - [0.7, 0.01, -0.1], - [0.7, -0.9, 0.5], - [0.1, 0.2, 0.3], - ] - ) - expected = np.array([1, 0, 0, 2]) - computed = tcl._max_distance(data, None) - self.assertEqual((4,), computed.shape) - self.assertListEqual(expected.tolist(), computed.tolist()) + expected = data[:, 2] + y = np.array([1, 2, 1, 0, 0, 0]) + computed = tcl._impurity(data, y) + self.assertEqual(2, computed) + computed_data = data[:, computed] + self.assertEqual((6,), computed_data.shape) + self.assertListEqual(expected.tolist(), computed_data.tolist()) def test_best_splitter_few_sets(self): X, y = load_iris(return_X_y=True) @@ -186,27 +176,22 @@ class Splitter_test(unittest.TestCase): def test_splitter_parameter(self): expected_values = [ - [2, 3, 5, 7], # best entropy min_distance - [0, 2, 4, 5], # best entropy max_samples - [0, 2, 8, 12], # best entropy max_distance - [1, 2, 5, 12], # best gini min_distance - [0, 3, 4, 10], # best gini max_samples - [1, 2, 9, 12], # best gini max_distance - [3, 9, 11, 12], # random entropy min_distance - [1, 5, 6, 9], # random entropy max_samples - [1, 2, 4, 8], # random entropy max_distance - [2, 6, 7, 12], # random gini min_distance - [3, 9, 10, 11], # random gini max_samples - [2, 5, 8, 12], # random gini max_distance + [0, 1, 7, 9], # best entropy max_samples + [3, 8, 10, 11], # best entropy impurity + [0, 2, 8, 12], # best gini max_samples + [1, 2, 5, 12], # best gini impurity + [1, 2, 5, 10], # random entropy max_samples + [4, 8, 9, 12], # random entropy impurity + [3, 9, 11, 12], # random gini max_samples + [1, 5, 6, 9], # random gini impurity ] X, y = load_wine(return_X_y=True) rn = 0 for splitter_type in ["best", "random"]: for criterion in ["entropy", "gini"]: for criteria in [ - "min_distance", "max_samples", - "max_distance", + "impurity", ]: tcl = self.build( splitter_type=splitter_type, @@ -219,8 +204,10 @@ class Splitter_test(unittest.TestCase): dataset, computed = tcl.get_subspace(X, y, max_features=4) # print( # "{}, # {:7s}{:8s}{:15s}".format( - # list(computed), splitter_type, criterion, - # criteria, + # list(computed), + # splitter_type, + # criterion, + # criteria, # ) # ) self.assertListEqual(expected, list(computed)) diff --git a/stree/tests/Stree_test.py b/stree/tests/Stree_test.py index e16a69f..77fa82a 100644 --- a/stree/tests/Stree_test.py +++ b/stree/tests/Stree_test.py @@ -5,6 +5,7 @@ import warnings import numpy as np from sklearn.datasets import load_iris, load_wine from sklearn.exceptions import ConvergenceWarning +from sklearn.svm import LinearSVC from stree import Stree, Snode from .utils import load_dataset @@ -41,23 +42,22 @@ class Stree_test(unittest.TestCase): _, count_u = np.unique(y_up, return_counts=True) # for i in unique_y: - number_down = count_d[i] + number_up = count_u[i] try: - number_up = count_u[i] + number_down = count_d[i] except IndexError: - number_up = 0 + number_down = 0 self.assertEqual(count_y[i], number_down + number_up) # Is the partition made the same as the prediction? # as the node is not a leaf... _, count_yp = np.unique(y_prediction, return_counts=True) - self.assertEqual(count_yp[0], y_up.shape[0]) - self.assertEqual(count_yp[1], y_down.shape[0]) + self.assertEqual(count_yp[1], y_up.shape[0]) + self.assertEqual(count_yp[0], y_down.shape[0]) self._check_tree(node.get_down()) self._check_tree(node.get_up()) def test_build_tree(self): - """Check if the tree is built the same way as predictions of models - """ + """Check if the tree is built the same way as predictions of models""" warnings.filterwarnings("ignore") for kernel in self._kernels: clf = Stree(kernel=kernel, random_state=self._random_state) @@ -99,20 +99,22 @@ class Stree_test(unittest.TestCase): self.assertListEqual(yp_line.tolist(), yp_once.tolist()) def test_iterator_and_str(self): - """Check preorder iterator - """ + """Check preorder iterator""" expected = [ - "root feaures=(0, 1, 2) impurity=0.5000", - "root - Down feaures=(0, 1, 2) impurity=0.0671", - "root - Down - Down, - Leaf class=1 belief= 0.975989 " - "impurity=0.0469 counts=(array([0, 1]), array([ 17, 691]))", - "root - Down - Up feaures=(0, 1, 2) impurity=0.3967", - "root - Down - Up - Down, - Leaf class=1 belief= 0.750000 " - "impurity=0.3750 counts=(array([0, 1]), array([1, 3]))", - "root - Down - Up - Up, - Leaf class=0 belief= 1.000000 " - "impurity=0.0000 counts=(array([0]), array([7]))", - "root - Up, - Leaf class=0 belief= 0.928297 impurity=0.1331" - " counts=(array([0, 1]), array([725, 56]))", + "root feaures=(0, 1, 2) impurity=1.0000 counts=(array([0, 1]), arr" + "ay([750, 750]))", + "root - Down, - Leaf class=0 belief= 0.928297 impurity=0.37" + "22 counts=(array([0, 1]), array([725, 56]))", + "root - Up feaures=(0, 1, 2) impurity=0.2178 counts=(array([0, 1])" + ", array([ 25, 694]))", + "root - Up - Down feaures=(0, 1, 2) impurity=0.8454 counts=(array(" + "[0, 1]), array([8, 3]))", + "root - Up - Down - Down, - Leaf class=0 belief= 1.000000 i" + "mpurity=0.0000 counts=(array([0]), array([7]))", + "root - Up - Down - Up, - Leaf class=1 belief= 0.750000 imp" + "urity=0.8113 counts=(array([0, 1]), array([1, 3]))", + "root - Up - Up, - Leaf class=1 belief= 0.975989 impurity=0" + ".1634 counts=(array([0, 1]), array([ 17, 691]))", ] computed = [] expected_string = "" @@ -188,44 +190,43 @@ class Stree_test(unittest.TestCase): def test_muticlass_dataset(self): datasets = { "Synt": load_dataset(random_state=self._random_state, n_classes=3), - "Iris": load_iris(return_X_y=True), + "Iris": load_wine(return_X_y=True), } outcomes = { "Synt": { - "max_samples linear": 0.9533333333333334, - "max_samples rbf": 0.836, - "max_samples poly": 0.9473333333333334, - "min_distance linear": 0.9533333333333334, - "min_distance rbf": 0.836, - "min_distance poly": 0.9473333333333334, - "max_distance linear": 0.9533333333333334, - "max_distance rbf": 0.836, - "max_distance poly": 0.9473333333333334, + "max_samples linear": 0.9606666666666667, + "max_samples rbf": 0.7133333333333334, + "max_samples poly": 0.49066666666666664, + "impurity linear": 0.9606666666666667, + "impurity rbf": 0.7133333333333334, + "impurity poly": 0.49066666666666664, }, "Iris": { - "max_samples linear": 0.98, - "max_samples rbf": 1.0, - "max_samples poly": 1.0, - "min_distance linear": 0.98, - "min_distance rbf": 1.0, - "min_distance poly": 1.0, - "max_distance linear": 0.98, - "max_distance rbf": 1.0, - "max_distance poly": 1.0, + "max_samples linear": 1.0, + "max_samples rbf": 0.6910112359550562, + "max_samples poly": 0.6966292134831461, + "impurity linear": 1, + "impurity rbf": 0.6910112359550562, + "impurity poly": 0.6966292134831461, }, } + for name, dataset in datasets.items(): px, py = dataset - for criteria in ["max_samples", "min_distance", "max_distance"]: + for criteria in ["max_samples", "impurity"]: for kernel in self._kernels: clf = Stree( - C=1e4, - max_iter=1e4, + C=55, + max_iter=1e5, kernel=kernel, random_state=self._random_state, ) clf.fit(px, py) outcome = outcomes[name][f"{criteria} {kernel}"] + # print( + # f"{name} {criteria} {kernel} {outcome} {clf.score(px" + # ", py)}" + # ) self.assertAlmostEqual(outcome, clf.score(px, py)) def test_max_features(self): @@ -297,7 +298,10 @@ class Stree_test(unittest.TestCase): 0.9433333333333334, ] for kernel, accuracy_expected in zip(self._kernels, accuracies): - clf = Stree(random_state=self._random_state, kernel=kernel,) + clf = Stree( + random_state=self._random_state, + kernel=kernel, + ) clf.fit(X, y) accuracy_score = clf.score(X, y) yp = clf.predict(X) @@ -309,75 +313,7 @@ class Stree_test(unittest.TestCase): X, y = load_dataset(self._random_state) clf = Stree(random_state=self._random_state, max_features=2) clf.fit(X, y) - self.assertAlmostEqual(0.9426666666666667, clf.score(X, y)) - - def test_score_multi_class(self): - warnings.filterwarnings("ignore") - accuracies = [ - 0.8258427, # Wine linear min_distance - 0.6741573, # Wine linear max_distance - 0.8314607, # Wine linear max_samples - 0.6629213, # Wine rbf min_distance - 1.0000000, # Wine rbf max_distance - 0.4044944, # Wine rbf max_samples - 0.9157303, # Wine poly min_distance - 1.0000000, # Wine poly max_distance - 0.7640449, # Wine poly max_samples - 0.9933333, # Iris linear min_distance - 0.9666667, # Iris linear max_distance - 0.9666667, # Iris linear max_samples - 0.9800000, # Iris rbf min_distance - 0.9800000, # Iris rbf max_distance - 0.9800000, # Iris rbf max_samples - 1.0000000, # Iris poly min_distance - 1.0000000, # Iris poly max_distance - 1.0000000, # Iris poly max_samples - 0.8993333, # Synthetic linear min_distance - 0.6533333, # Synthetic linear max_distance - 0.9313333, # Synthetic linear max_samples - 0.8320000, # Synthetic rbf min_distance - 0.6660000, # Synthetic rbf max_distance - 0.8320000, # Synthetic rbf max_samples - 0.6066667, # Synthetic poly min_distance - 0.6840000, # Synthetic poly max_distance - 0.6340000, # Synthetic poly max_samples - ] - datasets = [ - ("Wine", load_wine(return_X_y=True)), - ("Iris", load_iris(return_X_y=True)), - ( - "Synthetic", - load_dataset(self._random_state, n_classes=3, n_features=5), - ), - ] - for dataset_name, dataset in datasets: - X, y = dataset - for kernel in self._kernels: - for criteria in [ - "min_distance", - "max_distance", - "max_samples", - ]: - clf = Stree( - C=17, - random_state=self._random_state, - kernel=kernel, - split_criteria=criteria, - degree=5, - gamma="auto", - ) - clf.fit(X, y) - accuracy_score = clf.score(X, y) - yp = clf.predict(X) - accuracy_computed = np.mean(yp == y) - # print( - # "{:.7f}, # {:7} {:5} {}".format( - # accuracy_score, dataset_name, kernel, criteria - # ) - # ) - accuracy_expected = accuracies.pop(0) - self.assertEqual(accuracy_score, accuracy_computed) - self.assertAlmostEqual(accuracy_expected, accuracy_score) + self.assertAlmostEqual(0.944, clf.score(X, y)) def test_bogus_splitter_parameter(self): clf = Stree(splitter="duck") @@ -407,10 +343,102 @@ class Stree_test(unittest.TestCase): original = weights_no_zero.copy() clf = Stree() clf.fit(X, y) - node = clf.train(X, y, weights, 1, "test",) + node = clf.train( + X, + y, + weights, + 1, + "test", + ) # if a class is lost with zero weights the patch adds epsilon self.assertListEqual(weights.tolist(), weights_epsilon) self.assertListEqual(node._sample_weight.tolist(), weights_epsilon) # zero weights are ok when they don't erase a class _ = clf.train(X, y, weights_no_zero, 1, "test") self.assertListEqual(weights_no_zero.tolist(), original.tolist()) + + def test_multiclass_classifier_integrity(self): + """Checks if the multiclass operation is done right""" + X, y = load_iris(return_X_y=True) + clf = Stree(random_state=0) + clf.fit(X, y) + score = clf.score(X, y) + # Check accuracy of the whole model + self.assertAlmostEquals(0.98, score, 5) + svm = LinearSVC(random_state=0) + svm.fit(X, y) + self.assertAlmostEquals(0.9666666666666667, svm.score(X, y), 5) + data = svm.decision_function(X) + expected = [ + 0.4444444444444444, + 0.35777777777777775, + 0.4569777777777778, + ] + ty = data.copy() + ty[data <= 0] = 0 + ty[data > 0] = 1 + ty = ty.astype(int) + for i in range(3): + self.assertAlmostEquals( + expected[i], + clf.splitter_._gini(ty[:, i]), + ) + # 1st Branch + # up has to have 50 samples of class 0 + # down should have 100 [50, 50] + up = data[:, 2] > 0 + resup = np.unique(y[up], return_counts=True) + resdn = np.unique(y[~up], return_counts=True) + self.assertListEqual([1, 2], resup[0].tolist()) + self.assertListEqual([3, 50], resup[1].tolist()) + self.assertListEqual([0, 1], resdn[0].tolist()) + self.assertListEqual([50, 47], resdn[1].tolist()) + # 2nd Branch + # up should have 53 samples of classes [1, 2] [3, 50] + # down shoud have 47 samples of class 1 + node_up = clf.tree_.get_down().get_up() + node_dn = clf.tree_.get_down().get_down() + resup = np.unique(node_up._y, return_counts=True) + resdn = np.unique(node_dn._y, return_counts=True) + self.assertListEqual([1, 2], resup[0].tolist()) + self.assertListEqual([3, 50], resup[1].tolist()) + self.assertListEqual([1], resdn[0].tolist()) + self.assertListEqual([47], resdn[1].tolist()) + + def test_score_multiclass_rbf(self): + X, y = load_dataset( + random_state=self._random_state, + n_classes=3, + n_features=5, + n_samples=500, + ) + clf = Stree(kernel="rbf", random_state=self._random_state) + self.assertEqual(0.824, clf.fit(X, y).score(X, y)) + X, y = load_wine(return_X_y=True) + self.assertEqual(0.6741573033707865, clf.fit(X, y).score(X, y)) + + def test_score_multiclass_poly(self): + X, y = load_dataset( + random_state=self._random_state, + n_classes=3, + n_features=5, + n_samples=500, + ) + clf = Stree( + kernel="poly", random_state=self._random_state, C=10, degree=5 + ) + self.assertEqual(0.786, clf.fit(X, y).score(X, y)) + X, y = load_wine(return_X_y=True) + self.assertEqual(0.702247191011236, clf.fit(X, y).score(X, y)) + + def test_score_multiclass_linear(self): + X, y = load_dataset( + random_state=self._random_state, + n_classes=3, + n_features=5, + n_samples=1500, + ) + clf = Stree(kernel="linear", random_state=self._random_state) + self.assertEqual(0.9533333333333334, clf.fit(X, y).score(X, y)) + X, y = load_wine(return_X_y=True) + self.assertEqual(0.9550561797752809, clf.fit(X, y).score(X, y)) diff --git a/stree/tests/utils.py b/stree/tests/utils.py index 94b0506..61a3640 100644 --- a/stree/tests/utils.py +++ b/stree/tests/utils.py @@ -1,9 +1,9 @@ from sklearn.datasets import make_classification -def load_dataset(random_state=0, n_classes=2, n_features=3): +def load_dataset(random_state=0, n_classes=2, n_features=3, n_samples=1500): X, y = make_classification( - n_samples=1500, + n_samples=n_samples, n_features=n_features, n_informative=3, n_redundant=0,