数据形式如下:
前期数据整合:
import pandas as pd import scipy import scipy.cluster.hierarchy as sch from scipy.cluster.vq import vq,kmeans,whiten import numpy as np import matplotlib.pylab as plt df1 = pd.read_csv(r"D: 1RiverPro 1DATA 1HeadwaterCSVdem.csv") df2 = pd.read_csv(r"D: 1RiverPro 1DATA 1HeadwaterCSV dvi_mean.csv") df3 = pd.read_csv(r"D: 1RiverPro 1DATA 1HeadwaterCSVpop_mean.csv") result = pd.merge(df1, df2, how='inner', on=['GRIDCODE'])#取交集 result = pd.merge(result, df3, how='inner', on=['GRIDCODE']) df=result[['GRIDCODE','dem_mean','ndvi_mean','pop_mean']] #新增一列其他方法进行的分类标签 ishw = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] df['Headwater_label'] = ishw #转为array dataset = df.values points = dataset [:,1:4]#第2列到第4属性列 ishw_label = dataset[:,-1] #print("points: ",points) # k-means聚类 #将原始数据做归一化处理 data=whiten(points) #使用kmeans函数进行聚类,输入第一维为数据,第二维为聚类个数k. #有些时候我们可能不知道最终究竟聚成多少类,一个办法是用层次聚类的结果进行初始化.当然也可以直接输入某个数值. #k-means最后输出的结果其实是两维的,第一维是聚类中心,第二维是损失distortion,我们在这里只取第一维,所以最后有个[0] #centroid = kmeans(data,max(cluster))[0] centroid = kmeans(data,2)[0]#分为2类 print(centroid)#输出中心 #使用vq函数根据聚类中心对所有数据进行分类,vq的输出也是两维的,[0]表示的是所有数据的label label=vq(data,centroid)[0] label #输出两类的数量 num = [0,0] for i in label: if(i == 0): num[0] = num[0] + 1 else: num[1] = num[1] + 1 print('num =',num) #输出符合预期的比例等 print("Final clustering by k-means: ",label) result = np.subtract(label,ishw_label) print("result: ",result) count = [0,0] for i in result: if(i == 0): count[0] = count[0] + 1 else: count[1] = count[1] + 1 print(count) print(float(count[0])/(count[0]+count[1]))