-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathPC_Kmeans.py
More file actions
165 lines (135 loc) · 6.21 KB
/
PC_Kmeans.py
File metadata and controls
165 lines (135 loc) · 6.21 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
import numpy as np
import cv2
class PC_Kmeans:
def __init__(self, k, ml, cl, neighborhoods, y, num, f_name, w=1, tolerance=0.00001, max_iterations=3):
self.k = k
self.ml = ml # is transitive graph
self.cl = cl # is graph
self.neighborhoods = neighborhoods
self.y = y
self.num = num
self.f_name = f_name
self.w = w
self.tolerance = tolerance
self.max_iterations = max_iterations
def fit(self, data):
# Initialize centroids
self.centroids = {}
cluster_centers = self.init_centers(data, self.neighborhoods, self.y)
for i in range(len(cluster_centers)):
self.centroids[i] = cluster_centers[i]
# Repeat until convergence
for i in range(self.max_iterations):
# print("iteration :",i)
self.clusters = {} # {0:[] , 1:[] , ... , k:[]}
for i in range(self.k):
self.clusters[i] = set()
# Assign clusters
alter = self.assign_clusters(
data, cluster_centers, self.ml, self.cl, self.w)
if alter == "empty":
return "Cluster Not Found"
# average the cluster data points to re-calculate the centroids
previous_centers = cluster_centers
cluster_centers = self.compute_mean_cluster_centers(data)
isOptimal = True
for centert_index in range(len(cluster_centers)):
original_centroid = previous_centers[centert_index]
curr = cluster_centers[centert_index]
diff = curr - original_centroid
if np.sum(diff / original_centroid * 100.0) > self.tolerance:
isOptimal = False
# break out of the main loop if the results are optimal, ie. the centroids don't change their positions much(more than our tolerance)
if isOptimal:
break
def assign_clusters(self, data, cluster_centers, ml, cl, w):
self.is_clustered = [-1] * len(data)
for x_index in range(len(data)):
h = [self.objective_function(data, x_index, cluster_centers, cluster_index, self.is_clustered, ml, cl, w)
for cluster_index in range(self.k)]
center_index = np.argmin(h)
self.is_clustered[x_index] = center_index
self.clusters[center_index].add(x_index)
# Handle empty clusters
empty_cluster_flag = False
for i in range(self.k):
if i not in self.is_clustered:
empty_cluster_flag = True
break
if empty_cluster_flag:
return "empty"
def objective_function(self, data, x_index, centroids, cluster_index, is_clustered, ml, cl, w):
if self.f_name == "SIFT":
bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
matches = bf.match(data[x_index], centroids[cluster_index])
matches = len(sorted(matches, key=lambda x: x.distance))
distance = 1 - (matches / self.num)
else:
distance = 1 / 2 * \
np.sum((data[x_index] - centroids[cluster_index]) ** 2)
ml_penalty = 0
for i in ml[x_index]:
if is_clustered[i] != -1 and is_clustered[i] != cluster_index:
ml_penalty += w
cl_penalty = 0
for i in cl[x_index]:
if is_clustered[i] == cluster_index:
cl_penalty += w
return distance + ml_penalty + cl_penalty
def init_centers(self, data, neighborhoods, y):
data = np.array(data)
# srot neighborhoods base on size
neighborhoods = sorted(neighborhoods, key=len, reverse=True)
neighborhood_centers = np.array(
[data[neighborhood].mean(axis=0) for neighborhood in neighborhoods])
# neighborhood_sizes = np.array([len(neighborhood) for neighborhood in neighborhoods])
if len(neighborhoods) > self.k:
# Select K largest neighborhoods' centroids
# cluster_centers1 = neighborhood_centers[np.argsort(neighborhood_sizes)[-self.k:]]
cluster_centers = neighborhood_centers[:self.k]
else:
if len(neighborhoods) > 0:
cluster_centers = neighborhood_centers
else:
cluster_centers = np.empty((0, data.shape[1]))
# look for a point that is connected by cannot-links to every neighborhood set
for x_index in range(len(data)):
find_flag = 1
for neighborhood_set in neighborhoods:
if y[x_index] == y[neighborhood_set[0]]:
find_flag = 0
break
if find_flag == 1: # find a point that is connected by cannot-links to every neighborhood set
np.append(cluster_centers, [data[x_index]], axis=0)
break
if len(neighborhoods) < self.k:
remaining_cluster_centers = data[
np.random.choice(len(data), self.k - len(neighborhoods), replace=False), :]
cluster_centers = np.concatenate(
[cluster_centers, remaining_cluster_centers])
return cluster_centers
def compute_mean_cluster_centers(self, data):
for _center in self.clusters:
lst = []
for index_value in self.clusters[_center]:
lst.append(data[index_value])
avgArr = np.array(lst)
if len(lst) != 0:
self.centroids[_center] = np.average(avgArr, axis=0)
clusters_center = []
for key, value in self.centroids.items():
clusters_center.append(value)
return np.array(clusters_center)
def pred(self, data):
distances = [np.linalg.norm(data - self.centroids[centroid])
for centroid in self.centroids]
classification = distances.index(min(distances))
return classification
def predict(self, data):
labels = []
for row in data:
dist = [np.linalg.norm(row - self.centroids[centroid])
for centroid in self.centroids]
classification = dist.index(min(dist))
labels.append(classification)
return labels