diff --git a/Makefile b/Makefile index 53b8218..ffc0216 100644 --- a/Makefile +++ b/Makefile @@ -37,6 +37,11 @@ install: ## Build extension audit: ## Audit pip pip-audit +version: + @echo "Current Python version .: $(shell python --version)" + @echo "Current FImdlp version .: $(shell python -c "from fimdlp import _version; print(_version.__version__)")" + @echo "Installed FImdlp version: $(shell pip show fimdlp | grep Version | cut -d' ' -f2)" + help: ## Show help message @IFS=$$'\n' ; \ help_lines=(`fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##/:/'`); \ diff --git a/k.py b/k.py new file mode 100644 index 0000000..47e0856 --- /dev/null +++ b/k.py @@ -0,0 +1,12 @@ +from sklearn.datasets import load_wine +from fimdlp.mdlp import FImdlp + +X, y = load_wine(return_X_y=True) +trans = FImdlp() +Xt = trans.join_transform(X, y, 12) +print("X shape = ", X.shape) +print("Xt.shape=", Xt.shape) +print("Xt ", Xt[:10]) +print("trans.X_ shape = ", trans.X_.shape) +print("trans.y_ ", trans.y_[:10]) +print("y_join ", trans.y_join_[:10]) diff --git a/setup.py b/setup.py index db8a696..0ba294e 100644 --- a/setup.py +++ b/setup.py @@ -14,10 +14,13 @@ setup( "src/fimdlp/cfimdlp.pyx", "src/cppmdlp/CPPFImdlp.cpp", "src/cppmdlp/Metrics.cpp", + "src/fimdlp/Factorize.cpp", ], language="c++", include_dirs=["fimdlp"], - extra_compile_args=["-std=c++2a"], + extra_compile_args=[ + "-std=c++11", + ], ), ] ) diff --git a/src/cppmdlp b/src/cppmdlp index 1b89f59..e97aea2 160000 --- a/src/cppmdlp +++ b/src/cppmdlp @@ -1 +1 @@ -Subproject commit 1b89f5927c3add921b19fe29094d354780f98b5f +Subproject commit e97aea2a4de7e4e4a24e87744d8987b899b1a239 diff --git a/src/fimdlp/Factorize.cpp b/src/fimdlp/Factorize.cpp new file mode 100644 index 0000000..f814d6f --- /dev/null +++ b/src/fimdlp/Factorize.cpp @@ -0,0 +1,18 @@ +#include "Factorize.h" + +namespace utils { + vector cppFactorize(const vector& labels_t) + { + vector yy; + yy.reserve(labels_t.size()); + map labelMap; + int i = 0; + for (string label : labels_t) { + if (labelMap.find(label) == labelMap.end()) { + labelMap[label] = i++; + } + yy.push_back(labelMap[label]); + } + return yy; + } +} \ No newline at end of file diff --git a/src/fimdlp/Factorize.h b/src/fimdlp/Factorize.h new file mode 100644 index 0000000..28f4c74 --- /dev/null +++ b/src/fimdlp/Factorize.h @@ -0,0 +1,10 @@ +#ifndef FACTORIZE_H +#define FACTORIZE_H +#include +#include +#include +namespace utils { + using namespace std; + vector cppFactorize(const vector&); +} +#endif \ No newline at end of file diff --git a/src/fimdlp/__init__.py b/src/fimdlp/__init__.py index 3a99d3b..0abf8ef 100644 --- a/src/fimdlp/__init__.py +++ b/src/fimdlp/__init__.py @@ -1,8 +1,4 @@ from ._version import __version__ -def version(): - return __version__ - - all = ["FImdlp", "__version__"] diff --git a/src/fimdlp/_version.py b/src/fimdlp/_version.py index a2fecb4..c598173 100644 --- a/src/fimdlp/_version.py +++ b/src/fimdlp/_version.py @@ -1 +1 @@ -__version__ = "0.9.2" +__version__ = "0.9.3" diff --git a/src/fimdlp/cfimdlp.pyx b/src/fimdlp/cfimdlp.pyx index 9b548dd..8892e8b 100644 --- a/src/fimdlp/cfimdlp.pyx +++ b/src/fimdlp/cfimdlp.pyx @@ -6,15 +6,15 @@ from libcpp.string cimport string cdef extern from "../cppmdlp/CPPFImdlp.h" namespace "mdlp": ctypedef float precision_t cdef cppclass CPPFImdlp: - CPPFImdlp(int) except + + CPPFImdlp() except + CPPFImdlp& fit(vector[precision_t]&, vector[int]&) vector[precision_t] getCutPoints() string version() cdef class CFImdlp: cdef CPPFImdlp *thisptr - def __cinit__(self, algorithm): - self.thisptr = new CPPFImdlp(algorithm) + def __cinit__(self): + self.thisptr = new CPPFImdlp() def __dealloc__(self): del self.thisptr def fit(self, X, y): @@ -24,3 +24,10 @@ cdef class CFImdlp: return self.thisptr.getCutPoints() def get_version(self): return self.thisptr.version() + def __reduce__(self): + return (CFImdlp, ()) + +cdef extern from "Factorize.h" namespace "utils": + vector[int] cppFactorize(vector[string] &input_vector) +def factorize(input_vector): + return cppFactorize(input_vector) \ No newline at end of file diff --git a/src/fimdlp/mdlp.py b/src/fimdlp/mdlp.py index e4e4437..2a2114a 100644 --- a/src/fimdlp/mdlp.py +++ b/src/fimdlp/mdlp.py @@ -1,25 +1,22 @@ import numpy as np -from .cppfimdlp import CFImdlp +from .cppfimdlp import CFImdlp, factorize from sklearn.base import BaseEstimator, TransformerMixin from sklearn.utils.multiclass import unique_labels from sklearn.utils.validation import check_X_y, check_array, check_is_fitted from joblib import Parallel, delayed +from ._version import __version__ + +# from ._version import __version__ class FImdlp(TransformerMixin, BaseEstimator): - def __init__(self, algorithm=0, n_jobs=-1): - self.algorithm = algorithm + def __init__(self, n_jobs=-1): self.n_jobs = n_jobs """Fayyad - Irani MDLP discretization algorithm based implementation. Parameters ---------- - algorithm : int, default=0 - The type of algorithm to use computing the cut points. - 0 - Definitive implementation - 1 - Alternative proposal - 2 - Classic proposal n_jobs : int, default=-1 The number of jobs to run in parallel. :meth:`fit` and :meth:`transform`, are parallelized over the features. ``-1`` means @@ -27,27 +24,26 @@ class FImdlp(TransformerMixin, BaseEstimator): Attributes ---------- - n_features_ : int + n_features_in_ : int The number of features of the data passed to :meth:`fit`. discretizer_ : list The list of discretizers, one for each feature. cut_points_ : list The list of cut points for each feature. - X_ : array - the samples used to fit, shape (n_samples, n_features) - y_ : array - the labels used to fit, shape (n_samples,) + X_ : array, shape (n_samples, n_features) + the samples used to fit + y_ : array, shape(n_samples,) + the labels used to fit features_ : list the list of features to be discretized """ - def _check_params_fit(self, X, y, expected_args, kwargs): - """Check the common parameters passed to fit""" + def _more_tags(self): + return {"preserves_dtype": [np.int32], "requires_y": True} + + def _check_args(self, X, y, expected_args, kwargs): # Check that X and y have correct shape X, y = check_X_y(X, y) - # Store the classes seen during fit - self.classes_ = unique_labels(y) - self.n_classes_ = self.classes_.shape[0] # Default values self.features_ = [i for i in range(X.shape[1])] for key, value in kwargs.items(): @@ -68,15 +64,24 @@ class FImdlp(TransformerMixin, BaseEstimator): raise ValueError("Feature index out of range") return X, y + def _update_params(self, X, y): + # Store the classes seen during fit + self.classes_ = unique_labels(y) + self.n_classes_ = self.classes_.shape[0] + self.n_features_in_ = X.shape[1] + + @staticmethod + def get_version(): + return f"{__version__}({CFImdlp().get_version().decode()})" + def fit(self, X, y, **kwargs): """A reference implementation of a fitting function for a transformer. Parameters ---------- - X : {array-like, sparse matrix}, shape (n_samples, n_features) + X : array, shape (n_samples, n_features) The training input samples. - y : None - There is no need of a target in a transformer, yet the pipeline API - requires this parameter. + y : array, shape (n_samples,) + the labels used to fit features : list, default=[i for i in range(n_features)] The list of features to be discretized. Returns @@ -84,23 +89,23 @@ class FImdlp(TransformerMixin, BaseEstimator): self : object Returns self. """ - X, y = self._check_params_fit( + X, y = self._check_args( X, y, expected_args=["features"], kwargs=kwargs ) - self.n_features_ = X.shape[1] + self._update_params(X, y) self.X_ = X self.y_ = y - self.discretizer_ = [None] * self.n_features_ - self.cut_points_ = [None] * self.n_features_ + self.discretizer_ = [None] * self.n_features_in_ + self.cut_points_ = [None] * self.n_features_in_ Parallel(n_jobs=self.n_jobs, prefer="threads")( delayed(self._fit_discretizer)(feature) - for feature in range(self.n_features_) + for feature in range(self.n_features_in_) ) return self def _fit_discretizer(self, feature): if feature in self.features_: - self.discretizer_[feature] = CFImdlp(algorithm=self.algorithm) + self.discretizer_[feature] = CFImdlp() self.discretizer_[feature].fit(self.X_[:, feature], self.y_) self.cut_points_[feature] = self.discretizer_[ feature @@ -119,7 +124,7 @@ class FImdlp(TransformerMixin, BaseEstimator): """Discretize X values. Parameters ---------- - X : {array-like}, shape (n_samples, n_features) + X : array, shape (n_samples, n_features) The input samples. Returns ------- @@ -127,25 +132,41 @@ class FImdlp(TransformerMixin, BaseEstimator): The array containing the discretized values of ``X``. """ # Check is fit had been called - check_is_fitted(self, "n_features_") + check_is_fitted(self, "n_features_in_") # Input validation X = check_array(X) # Check that the input is of the same shape as the one passed # during fit. - if X.shape[1] != self.n_features_: + if X.shape[1] != self.n_features_in_: raise ValueError( "Shape of input is different from what was seen in `fit`" ) - if len(self.features_) == self.n_features_: + if len(self.features_) == self.n_features_in_: result = np.zeros_like(X, dtype=np.int32) - 1 else: result = np.zeros_like(X) - 1 Parallel(n_jobs=self.n_jobs, prefer="threads")( delayed(self._discretize_feature)(feature, X[:, feature], result) - for feature in range(self.n_features_) + for feature in range(self.n_features_in_) ) return result + @staticmethod + def factorize(yy): + """Factorize the input labels + + Parameters + ---------- + yy : array, shape (n_samples,) + Labels to be factorized, MUST be bytes, i.e. b"0", b"1", ... + + Returns + ------- + array, shape (n_samples,) + Factorized labels + """ + return factorize(yy) + def get_cut_points(self): """Get the cut points for each feature. Returns @@ -154,6 +175,70 @@ class FImdlp(TransformerMixin, BaseEstimator): The list of cut points for each feature. """ result = [] - for feature in range(self.n_features_): + for feature in range(self.n_features_in_): result.append(self.cut_points_[feature]) return result + + def get_states_feature(self, feature): + """Return the states a feature can take + + Parameters + ---------- + feature : int + feature to get the states + + Returns + ------- + list + states of the feature + """ + if feature in self.features_: + return list(range(len(self.cut_points_[feature]) + 1)) + return None + + def join_fit(self, features, target, data): + """Join the selected features with the labels and fit the discretizer + of the target variable + join - fit - transform + + Parameters + ---------- + features : [list] + index of the features to join with the labels + target : [int] + index of the target variable to discretize + data: [array] shape (n_samples, n_features) + dataset that contains the features to join + + Returns + ------- + result: np.array + The target variable newly discretized + """ + check_is_fitted(self, "n_features_in_") + if len(features) < 1 or len(features) > self.n_features_in_: + raise ValueError( + "Number of features must be in range [1, " + f"{self.n_features_in_}]" + ) + for feature in features: + if feature < 0 or feature >= self.n_features_in_: + raise ValueError( + f"Feature {feature} not in range [0, " + f"{self.n_features_in_})" + ) + if target < 0 or target >= self.n_features_in_: + raise ValueError( + f"Target {target} not in range [0, {self.n_features_in_})" + ) + if target in features: + raise ValueError("Target cannot in features to join") + y_join = [ + f"{str(item_y)}{''.join([str(x) for x in items_x])}".encode() + for item_y, items_x in zip(self.y_, data[:, features]) + ] + self.y_join_ = y_join + self.discretizer_[target].fit(self.X_[:, target], factorize(y_join)) + self.cut_points_[target] = self.discretizer_[target].get_cut_points() + # return the discretized target variable with the new cut points + return np.searchsorted(self.cut_points_[target], self.X_[:, target]) diff --git a/src/fimdlp/tests/FImdlp_test.py b/src/fimdlp/tests/FImdlp_test.py index 99c5864..068a4e8 100644 --- a/src/fimdlp/tests/FImdlp_test.py +++ b/src/fimdlp/tests/FImdlp_test.py @@ -1,67 +1,46 @@ import unittest import sklearn -from sklearn.datasets import load_iris import numpy as np +from sklearn.datasets import load_iris +from sklearn.utils.estimator_checks import check_estimator +from ..cppfimdlp import CFImdlp, factorize from ..mdlp import FImdlp -from .. import version -from .._version import __version__ +from .. import __version__ + +# from .._version import __version__ class FImdlpTest(unittest.TestCase): def test_version(self): - self.assertEqual(version(), __version__) + clf = FImdlp() + self.assertEqual( + clf.get_version(), + f"{__version__}({CFImdlp().get_version().decode()})", + ) def test_init(self): clf = FImdlp() self.assertEqual(-1, clf.n_jobs) - self.assertEqual(0, clf.algorithm) - clf = FImdlp(algorithm=1, n_jobs=7) - self.assertEqual(1, clf.algorithm) + clf = FImdlp(n_jobs=7) self.assertEqual(7, clf.n_jobs) def test_fit_definitive(self): - clf = FImdlp(algorithm=0) - clf.fit([[1, 2], [3, 4]], [1, 2]) - self.assertEqual(clf.n_features_, 2) - self.assertListEqual(clf.X_.tolist(), [[1, 2], [3, 4]]) - self.assertListEqual(clf.y_.tolist(), [1, 2]) - self.assertListEqual([[2.0], [3.0]], clf.get_cut_points()) + clf = FImdlp() X, y = load_iris(return_X_y=True) clf.fit(X, y) - self.assertEqual(clf.n_features_, 4) + self.assertEqual(clf.n_features_in_, 4) self.assertTrue(np.array_equal(X, clf.X_)) self.assertTrue(np.array_equal(y, clf.y_)) - expected = [ - [5.449999809265137, 6.25], - [2.8499999046325684, 3.0, 3.049999952316284, 3.3499999046325684], - [2.450000047683716, 4.75, 5.050000190734863], - [0.800000011920929, 1.4500000476837158, 1.75], - ] - self.assertListEqual(expected, clf.get_cut_points()) - self.assertListEqual([0, 1, 2, 3], clf.features_) - clf.fit(X, y, features=[0, 2, 3]) - self.assertListEqual([0, 2, 3], clf.features_) - - def test_fit_alternative(self): - clf = FImdlp(algorithm=1) - clf.fit([[1, 2], [3, 4]], [1, 2]) - self.assertEqual(clf.n_features_, 2) - self.assertListEqual(clf.X_.tolist(), [[1, 2], [3, 4]]) - self.assertListEqual(clf.y_.tolist(), [1, 2]) - self.assertListEqual([[2], [3]], clf.get_cut_points()) - X, y = load_iris(return_X_y=True) - clf.fit(X, y) - self.assertEqual(clf.n_features_, 4) - self.assertTrue(np.array_equal(X, clf.X_)) - self.assertTrue(np.array_equal(y, clf.y_)) - expected = [ [5.449999809265137, 5.75], - [2.8499999046325684, 3.3499999046325684], - [2.450000047683716, 4.75], - [0.800000011920929, 1.75], + [2.75, 2.8499999046325684, 2.95, 3.05, 3.3499999046325684], + [2.45, 4.75, 5.050000190734863], + [0.8, 1.75], ] - self.assertListEqual(expected, clf.get_cut_points()) + computed = clf.get_cut_points() + for item_computed, item_expected in zip(computed, expected): + for x_, y_ in zip(item_computed, item_expected): + self.assertAlmostEqual(x_, y_) self.assertListEqual([0, 1, 2, 3], clf.features_) clf.fit(X, y, features=[0, 2, 3]) self.assertListEqual([0, 2, 3], clf.features_) @@ -82,8 +61,12 @@ class FImdlpTest(unittest.TestCase): clf.fit([[1, 2], [3, 4]], [1, 2], features=[0, 2]) def test_fit_features(self): - clf = FImdlp() + clf = FImdlp(n_jobs=-1) + # Two samples doesn't have enough information to split clf.fit([[1, -2], [3, 4]], [1, 2], features=[0]) + self.assertListEqual(clf.get_cut_points(), [[], []]) + clf.fit([[1, -2], [3, 4], [5, 6]], [1, 2, 2], features=[0]) + self.assertListEqual(clf.get_cut_points(), [[2], []]) res = clf.transform([[1, -2], [3, 4]]) self.assertListEqual(res.tolist(), [[0, -2], [1, 4]]) X, y = load_iris(return_X_y=True) @@ -98,15 +81,15 @@ class FImdlpTest(unittest.TestCase): ) self.assertEqual(X_computed.dtype, np.float64) - def test_transform_definitive(self): - clf = FImdlp(algorithm=0) - clf.fit([[1, 2], [3, 4]], [1, 2]) + def test_transform(self): + clf = FImdlp() + clf.fit([[1, 2], [3, 4], [5, 6]], [1, 2, 2]) self.assertEqual( clf.transform([[1, 2], [3, 4]]).tolist(), [[0, 0], [1, 1]] ) X, y = load_iris(return_X_y=True) clf.fit(X, y) - self.assertEqual(clf.n_features_, 4) + self.assertEqual(clf.n_features_in_, 4) self.assertTrue(np.array_equal(X, clf.X_)) self.assertTrue(np.array_equal(y, clf.y_)) X_transformed = clf.transform(X) @@ -116,46 +99,131 @@ class FImdlpTest(unittest.TestCase): self.assertEqual(X_transformed.dtype, np.int32) expected = [ [1, 0, 1, 1], - [1, 1, 1, 1], - [1, 0, 1, 1], - [0, 0, 1, 1], - [1, 0, 1, 1], - [1, 1, 1, 1], - [1, 1, 1, 1], - ] - self.assertTrue(np.array_equal(clf.transform(X[90:97]), expected)) - with self.assertRaises(ValueError): - clf.transform([[1, 2, 3], [4, 5, 6]]) - with self.assertRaises(sklearn.exceptions.NotFittedError): - clf = FImdlp(algorithm=0) - clf.transform([[1, 2], [3, 4]]) - - def test_transform_alternative(self): - clf = FImdlp(algorithm=1) - clf.fit([[1, 2], [3, 4]], [1, 2]) - self.assertEqual( - clf.transform([[1, 2], [3, 4]]).tolist(), [[0, 0], [1, 1]] - ) - X, y = load_iris(return_X_y=True) - clf.fit(X, y) - self.assertEqual(clf.n_features_, 4) - self.assertTrue(np.array_equal(X, clf.X_)) - self.assertTrue(np.array_equal(y, clf.y_)) - self.assertListEqual( - clf.transform(X).tolist(), clf.fit(X, y).transform(X).tolist() - ) - expected = [ - [1, 0, 1, 1], - [2, 1, 1, 1], + [2, 3, 1, 1], [2, 0, 1, 1], [0, 0, 1, 1], [1, 0, 1, 1], - [1, 1, 1, 1], - [1, 1, 1, 1], + [1, 3, 1, 1], + [1, 2, 1, 1], ] self.assertTrue(np.array_equal(clf.transform(X[90:97]), expected)) with self.assertRaises(ValueError): clf.transform([[1, 2, 3], [4, 5, 6]]) with self.assertRaises(sklearn.exceptions.NotFittedError): - clf = FImdlp(algorithm=1) + clf = FImdlp() clf.transform([[1, 2], [3, 4]]) + + def test_cppfactorize(self): + source = [ + b"f0", + b"f1", + b"f2", + b"f3", + b"f4", + b"f5", + b"f6", + b"f1", + b"f1", + b"f7", + b"f8", + ] + expected = [0, 1, 2, 3, 4, 5, 6, 1, 1, 7, 8] + computed = factorize(source) + self.assertListEqual(expected, computed) + + def test_join_fit(self): + y = np.array([b"f0", b"f0", b"f2", b"f3", b"f4"]) + x = np.array( + [ + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [1, 2, 3, 4, 5], + [2, 3, 4, 5, 6], + [3, 4, 5, 6, 7], + ] + ) + expected = [0, 0, 1, 2, 2] + clf = FImdlp() + clf.fit(x, factorize(y)) + computed = clf.join_fit([0, 2], 1, x) + self.assertListEqual(computed.tolist(), expected) + expected_y = [b"002", b"002", b"113", b"224", b"335"] + self.assertListEqual(expected_y, clf.y_join_) + + def test_join_fit_error(self): + y = np.array([b"f0", b"f0", b"f2", b"f3", b"f4"]) + x = np.array( + [ + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [1, 2, 3, 4, 5], + [2, 3, 4, 5, 6], + [3, 4, 5, 6, 7], + ] + ) + clf = FImdlp() + clf.fit(x, factorize(y)) + with self.assertRaises(ValueError) as exception: + clf.join_fit([], 1, x) + self.assertEqual( + str(exception.exception), + "Number of features must be in range [1, 5]", + ) + with self.assertRaises(ValueError) as exception: + FImdlp().join_fit([0, 4], 1, x) + self.assertTrue( + str(exception.exception).startswith( + "This FImdlp instance is not fitted yet." + ) + ) + with self.assertRaises(ValueError) as exception: + clf.join_fit([0, 5], 1, x) + self.assertEqual( + str(exception.exception), + "Feature 5 not in range [0, 5)", + ) + with self.assertRaises(ValueError) as exception: + clf.join_fit([0, 2], 5, x) + self.assertEqual( + str(exception.exception), + "Target 5 not in range [0, 5)", + ) + with self.assertRaises(ValueError) as exception: + clf.join_fit([0, 2], 2, x) + self.assertEqual( + str(exception.exception), + "Target cannot in features to join", + ) + + def test_factorize(self): + y = np.array([b"f0", b"f0", b"f2", b"f3", b"f4"]) + clf = FImdlp() + computed = clf.factorize(y) + self.assertListEqual([0, 0, 1, 2, 3], computed) + y = [b"f4", b"f0", b"f0", b"f2", b"f3"] + clf = FImdlp() + computed = clf.factorize(y) + self.assertListEqual([0, 1, 1, 2, 3], computed) + + @staticmethod + def test_sklearn_transformer(): + for check, test in check_estimator(FImdlp(), generate_only=True): + test(check) + + def test_states_feature(self): + clf = FImdlp() + X, y = load_iris(return_X_y=True) + clf.fit(X, y) + expected = [] + for i in [3, 6, 4, 3]: + expected.append(list(range(i))) + for feature in range(X.shape[1]): + self.assertListEqual( + expected[feature], clf.get_states_feature(feature) + ) + + def test_states_no_feature(self): + clf = FImdlp() + X, y = load_iris(return_X_y=True) + clf.fit(X, y) + self.assertIsNone(clf.get_states_feature(4))