mirror of
https://github.com/Doctorado-ML/mufs.git
synced 2025-08-16 16:15:56 +00:00
39 lines
937 B
Python
39 lines
937 B
Python
##Entropy
|
|
def entropy(Y):
|
|
"""
|
|
Also known as Shanon Entropy
|
|
Reference: https://en.wikipedia.org/wiki/Entropy_(information_theory)
|
|
"""
|
|
unique, count = np.unique(Y, return_counts=True, axis=0)
|
|
prob = count / len(Y)
|
|
en = -np.sum(prob * np.log2(prob))
|
|
return en
|
|
|
|
|
|
# Joint Entropy
|
|
def jEntropy(Y, X):
|
|
"""
|
|
H(Y;X)
|
|
Reference: https://en.wikipedia.org/wiki/Joint_entropy
|
|
"""
|
|
YX = np.c_[Y, X]
|
|
return entropy(YX)
|
|
|
|
|
|
# Conditional Entropy
|
|
def cEntropy(Y, X):
|
|
"""
|
|
conditional entropy = Joint Entropy - Entropy of X
|
|
H(Y|X) = H(Y;X) - H(X)
|
|
Reference: https://en.wikipedia.org/wiki/Conditional_entropy
|
|
"""
|
|
return jEntropy(Y, X) - entropy(X)
|
|
|
|
|
|
# Information Gain
|
|
def gain(Y, X):
|
|
"""
|
|
Information Gain, I(Y;X) = H(Y) - H(Y|X)
|
|
Reference: https://en.wikipedia.org/wiki/Information_gain_in_decision_trees#Formal_definition
|
|
"""
|
|
return entropy(Y) - cEntropy(Y, X) |