FCM模糊聚类算法python实现
FCM模糊聚类算法python实现
FCM原理
FCM隶属度矩阵以及聚类中心的更新关系可以参考这个文章,推倒过程已经很详细了,本博客在理论的基础上对公式进行解读和变换使之对应到相应的矩阵操作,并最终完成模糊聚类算法的编写。https://www.cnblogs.com/wxl845235800/p/11053261.html
公式解读
python实现
import numpy as np
import pandas as pd
import copy
#from core.myviews.views import viewResult,viewScore
from caculationDistance import euclideanDistance,squaredNornal,relativeDist,distTwoSample
from sklearn.metrics import silhouette_score
class FCM:def __init__(self,matrix_data,c_cluster=3,tol=1e-8,maxItem=1000):""":param matrix_data: 输入的参数矩阵,每一行为一个样本:param c_cluster: 制定聚类的类别数:param tol: 迭代误差:param maxItem: 最大迭代上限"""self.matrix_data=matrix_dataself.c_cluster=c_clusterself.tol=tolself.maxItem=maxItemdef initCenter(self,*args):""":param args: 留作未来初始化扩充接口:return:"""n_samples, n_feature = self.matrix_data.shape# 用kmeas++初始化聚类中心center = np.zeros((n_cluster, n_feature))center[0] = self.matrix_data[np.random.randint(n_samples)]for i in range(1, n_cluster):# 计算每个样本点到已有中心点的距离distance_to_centers = euclideanDistance(self.matrix_data, center[[j for j in range(i)]], square=True)# 选取据距离最小值closed_distance = np.min(distance_to_centers, axis=1)# 轮盘赌denominator = closed_distance.sum()point = np.random.rand() * denominator # 轮盘赌的指针be_choosed = np.searchsorted(np.cumsum(closed_distance), point)be_choosed = min(be_choosed, n_samples - 1) # 避免选的是最后一个而造成下标越界center[i] = self.matrix_data[be_choosed]return centerdef fit(self):n_samples, n_feature = self.matrix_data.shapeself.center=self.initCenter()#用kmeans++初始化聚类中心# 初始化隶属度矩阵u = np.zeros((n_samples, n_cluster))for i in range(self.maxItem): # 假设运行300次dist_matrix_squared = euclideanDistance(self.matrix_data, self.center, square=True)# 准备对dist_matrix_squared取倒数"""此处的运算参考...."""np.maximum(dist_matrix_squared, 0.001, out=dist_matrix_squared)#防止求倒数的时候出现0np.reciprocal(dist_matrix_squared, dtype=float, out=dist_matrix_squared)sum_row = dist_matrix_squared.sum(axis=1)# 对sum_row取倒数np.reciprocal(sum_row, dtype=float, out=sum_row)# 每一行乘以这个倒数,得到隶属度矩阵oldU = copy.deepcopy(u)np.einsum("ij,i->ij", dist_matrix_squared, sum_row.T, out=u)# 考察新的隶属度与旧的隶属度之间的差距,用二范数# u_shift=((u-oldU)**2).sum(),这种计算方法不如下面的方法效率高u_shift = squaredNornal(u - oldU)#2范数计算偏移度print("本次的移动误差为:", u_shift)if u_shift < eps:print("到达收敛条件,提前结束")break# 计算新的聚类中心,隶属度的m次方,这里m取2u2 = u ** 2for i in range(u2.shape[1]):# centroid = np.dot(u[:, i] ** 2, matrix_data) / (np.sum(u[:, i] ** 2))centroid = np.einsum("ij,i->ij", matrix_data, u2[:, i].T).sum(axis=0) / (u2[:, i].sum() + 0.001) # 防止除0self.center[i] = centroidself.labels = np.argmax(u, axis=1)return self.center,self.labelsdef getSse(self):sse_sum = 0for r, c in enumerate(self.labels):sse_sum += distTwoSample(self.center[c], matrix_data[r], square=True)def getSilhouette(self):"""计算 a(i) = average(i向量到所有它属于的簇中其它点的距离)计算 b(i) = min (i向量到与它相邻最近的一簇内的所有点的平均距离)b-a/max(b,a):return:"""silhouette_average=silhouette_score(self.matrix_data,self.labels)return silhouette_average
if __name__ == '__main__':matrix_data = np.asarray(pd.read_csv("../../state/rawData/CompleteOrder.csv", header=None))n_cluster=4eps=1e-4myFcm=FCM(matrix_data,c_cluster=4,tol=eps,maxItem=1000)center,labels=myFcm.fit()#viewResult(matrix_data,labels,center,"FCM")
注:数据以矩阵形式输入,一行为一条样本
补充说明上文中所提到的caculationDistance.py涉及到的代码如下
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import euclidean_distancesdef rowNorms(X):#对行每个元素取平方加和return np.einsum("ij,ij->i",X,X)
def euclideanDistance(x,y,square=False):#x的每个样本与y之间的距离""":param x: 矩阵x:param y: 矩阵y:param squared: 表示是否返回二者欧式距离的平方值,很明显如果返回欧式距离的平方,计算量又小一些:return: 矩阵x中的每一个样本与y中样本之间的距离""""""对于这个操作的理解一定要理解下面这个操作np.array([[1],[2],[1]])+np.array([[1,2,3],[4,5,6],[7,8,9]])Out[28]:array([[ 2, 3, 4],[ 6, 7, 8],[ 8, 9, 10]])np.array([[ 2, 3, 4],[ 6, 7, 8],[ 8, 9, 10]])+np.array([1,2,3])Out[28]:array([[ 3, 5, 7],[ 7, 9, 11],[ 9, 11, 13]])P矩阵与C矩阵每一行之间的距离可以用公式[[p1^2],[p2^2],[...]]-2PC^T+[c1^2,c2^2,c3^2...](p1^2是p1行向量平方和,c1^2是c1行向量平方和)"""xx=rowNorms(x)[:,np.newaxis]#转化为列向量,便于让dot的每一行都相加同一个数yy=rowNorms(y)[np.newaxis,:]#与xx同理dot=np.dot(x,y.T)res = xx + yy - 2 * dotnp.maximum(res,0,out=res)#对于无效数据填充return res if square else np.sqrt(res)
def distTwoSample(x,y,square=False):tol=x-yreturn np.einsum("i,i->",tol,tol) if square else np.sqrt(np.einsum("i,i->",tol,tol))
def relativeDist(x,y):#x的每个样本与y之间的相对距离"""通过对newMethod的分析,我们知道,如果单纯选出x行向量与y之间最小的距离,完全可以同时减去xx也就是(x-y)^2-x^2"""yy=rowNorms(y)[np.newaxis,:]dot=np.dot(x,y.T)res=-2*dot+yyreturn res
def squaredNornal(x):"""矩阵的f范数,当x为向量的时候为欧几里得范数,也就是我们所说的2范数比rowNorm(x)**2快:param x::return:"""x=np.ravel(x)return np.dot(x,x)