-
Notifications
You must be signed in to change notification settings - Fork 0
/
fair_kmeans_scalable.py
319 lines (228 loc) · 11.2 KB
/
fair_kmeans_scalable.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
# © 2021, Universität Bern, Chair of Quantitative Methods, Manuel Kammermann, Philipp Baumann
import gurobipy as gb
import multiprocessing
import numpy as np
import pandas as pd
from scipy.spatial.distance import cdist
from sklearn.cluster import kmeans_plusplus
from sklearn.cluster import MiniBatchKMeans
import time
def create_minibatch_cluster(X, colors, mb_clusters, random_state=42):
"""Create representatives using mini-batch k-means
Args:
X (np.array): feature vectors of objects
colors (np.array): colors of objects (0=red; 1=blue)
mb_clusters (int): number of mini-batch k-means sub-clusters
random_state (int, RandomState instance): random state
Returns:
X_ (np.array): feature vectors of representatives
num_blue (np.array): number of blue points represented by representative
num_red (np.array): number of blue points represented by representative
labels (np.array): current cluster assignments of representatives
"""
# Initialize mini-batch k-means algorithm
mb_kmeans = MiniBatchKMeans(n_clusters=mb_clusters, random_state=random_state,
batch_size=256*multiprocessing.cpu_count())
mb_kmeans.fit(X)
# Store assigment of objects to mini-batch clusters
labels = mb_kmeans.labels_
# Store unique mini-batch clusters
minibatch_cluster, indices = np.unique(labels, return_index=True)
# Initialize dictionaries
num_red = dict()
num_blue = dict()
mb_index = dict()
for cluster in minibatch_cluster:
points = colors[mb_kmeans.labels_ == cluster]
reds = sum(points)
blues = len(points) - sum(points)
num_red[cluster] = reds
num_blue[cluster] = blues
mb_index[cluster] = np.argwhere(mb_kmeans.labels_ == cluster).ravel().tolist()
# Create new representative using mini-batch cluster center
X_ = mb_kmeans.cluster_centers_[list(mb_index.keys())]
return X_, num_red, num_blue, labels
def update_centers(X_, centers, n_clusters, labels, algorithm):
"""Update positions of cluster centers
Args:
X_ (np.array): feature vectors of representatives
centers (np.array): current positions of cluster centers
n_clusters (int): predefined number of clusters
labels (np.array): current cluster assignments of objects
algorithm (str): clustering algorithm (k-means, k-median etc.)
Returns:
centers (np.array): the updated positions of cluster centers
"""
for i in range(n_clusters):
# Compute mean of each cluster (k-means or k-center)
if algorithm == 'kmeans' or algorithm == 'kcenter':
centers[i] = X_[labels == i, :].mean(axis=0)
# Compute median of each cluster (k-median)
if algorithm == 'kmedian':
centers[i] = np.median(X_[labels == i, :], axis=0)
# Compute medoid of each cluster (k-medoid)
if algorithm == 'kmedoid':
# Sample 50'000 random data points if size of cluster is too big
x_sample = X_[labels == i, :][np.random.choice(X_[labels == i, :].shape[0], min(X_[labels == i, :].shape[0],
50000), replace=False)]
# Compute pairwise euclidean distance of each data point in cluster_i
distances = cdist(x_sample, x_sample, metric='euclidean')
centers[i] = x_sample[np.argmin(distances.sum(axis=0))]
return centers
def assign_objects(X_, centers, num_red, num_blue, p, q):
"""Assigns objects to clusters
Args:
X_ (np.array): feature vectors of representatives
centers (np.array): current positions of cluster centers
num_red (dict): tbd
num_blue (dict): tbd
p (int): integer used to compute minimum balance
q (int): integer used to compute minimum balance
Returns:
labels (np.array): cluster labels for objects
"""
# Compute model input
k = centers.shape[0]
index_ = list(num_red.keys())
distances = pd.DataFrame(cdist(X_, centers), index=index_)
assignments = {(i, j): distances.loc[i, j] for i in index_ for j in range(k)}
ratio = min(p, q) / max(p, q)
# Create model
m = gb.Model()
# Add binary decision variables
y = m.addVars(assignments, obj=assignments, vtype=gb.GRB.BINARY)
# Add constraints
m.addConstrs(y.sum(i, '*') == 1 for i in index_)
m.addConstrs(y.sum('*', j) >= 1 for j in range(k))
m.addConstrs(gb.quicksum(y[i, j] * num_red[i] for i in index_) >= ratio * gb.quicksum(y[i, j] * num_blue[i] for i in
index_) for j in range(k))
m.addConstrs(gb.quicksum(y[i, j] * num_blue[i] for i in index_) >= ratio * gb.quicksum(y[i, j] * num_red[i] for i in
index_) for j in range(k))
# Determine optimal solution
m.setParam('Outputflag', 0)
m.optimize()
# Get labels from optimal assignment
if m.status != gb.GRB.INFEASIBLE:
labels = np.array([j for i, j in y.keys() if y[i, j].X > 0.5])
else:
labels = np.array([])
return labels
def get_total_distance(X, X_, centers, labels_, labels, algorithm):
"""Computes total distance between objects and cluster centers
Args:
X (np.array): feature vectors of objects
X_ (np.array): feature vectors of representatives
centers (np.array): current positions of cluster centers
labels (np.array): current cluster assignments of objects
labels_ (np.array): current cluster assignments of representatives
algorithm (str): clustering algorithm (k-means, k-median etc.)
Returns:
dist (float): total distance
"""
assignment = dict(zip(np.unique(labels).tolist(), labels_))
if algorithm == 'kmeans':
dist_ = np.sqrt(((X_ - centers[labels_, :]) ** 2).sum(axis=1).sum())
dist = np.sqrt(((X - centers[np.vectorize(assignment.get)(labels), :]) ** 2).sum(axis=1).sum())
if algorithm == 'kmedian':
dist = sum(np.linalg.norm(X_ - centers[labels_, :], axis=1))
dist_ = sum(np.linalg.norm(X_ - centers[labels_, :], axis=1))
if algorithm == 'kcenter':
dist = np.max(np.abs(X_ - centers[labels_, :]), axis=1)
dist_ = np.max(np.abs(X_ - centers[labels_, :]), axis=1)
if algorithm == 'kmedoid':
# dist = np.sqrt(((X - centers[labels, :]) ** 2).sum(axis=1)).sum()
dist = sum(np.linalg.norm(X_ - centers[labels_, :], axis=1))
dist_ = sum(np.linalg.norm(X_ - centers[labels_, :], axis=1))
return dist, dist_
def get_balance(labels, labels_, colors):
""" Computes balance of the clustering
Args:
labels (np.array): current cluster assignments of objects
labels_ (np.array): current cluster assignments of representatives
colors (np.array): colors of objects (0=red; 1=blue)
Returns:
balance (float): achieved balance
"""
assignment = dict(zip(np.unique(labels).tolist(), labels_))
min_ = 1
for cluster in np.unique(labels_):
color_dist = colors[np.vectorize(assignment.get)(labels) == cluster]
r = sum(color_dist)
b = len(color_dist) - r
if r == 0 or b == 0:
min_ = 0
else:
min_ = min(min_, r / b, b / r)
return min_
def scalable_fair_kmeans(X, n_clusters, colors, p, q, algorithm, mb_clusters, random_state, max_iter=100):
"""Finds partition of X subject to balance constraint
Args:
X (np.array): feature vectors of objects
n_clusters (int): predefined number of clusters
colors (np.array): colors of objects (0=red; 1=blue)
p (int): integer used to compute minimum balance
q (int): integer used to compute minimum balance
algorithm (str): clustering algorithm
mb_clusters (int): number of mini-batch subclusters
random_state (int, RandomState instance): random state
max_iter (int): maximum number of iterations of fair_kmeans algorithm
Returns:
best_labels (np.array): cluster labels of objects
best_total_distance (float): minimal distance (objective function value)
total_time (float): total running time
best_total_balance (float): achieved balance in best solution
"""
# Initialize start time
start_time = time.time()
X_, num_red, num_blue, labels = create_minibatch_cluster(X, colors, mb_clusters, random_state=random_state)
# Choose initial cluster centers randomly
# np.random.seed(random_state)
# center_ids = np.random.choice(np.arange(X.shape[0]), size=n_clusters, replace=False)
# centers = X[center_ids, :]
# Choose initial cluster using the k-means++ algorithm
centers, indices = kmeans_plusplus(X_, n_clusters=n_clusters, random_state=random_state, n_local_trials=1000)
# print(centers)
# Assign objects
labels_ = assign_objects(X_, centers, num_red, num_blue, p, q)
if labels_.size > 0:
# Initialize best labels
best_labels = labels_
# Update centers
centers = update_centers(X_, centers, n_clusters, labels_, algorithm)
# Compute total distance
best_total_distance, best_total_distance_ = get_total_distance(X, X_, centers, labels_, labels,
algorithm)
# Compute balance
best_total_balance = get_balance(labels, labels_, colors)
# balance = 1
n_iter = 1
while n_iter < max_iter:
# Assign objects
labels_ = assign_objects(X_, centers, num_red, num_blue, p, q)
# Update centers
centers = update_centers(X_, centers, n_clusters, labels_, algorithm)
# Compute total distance
total_distance, total_distance_ = get_total_distance(X, X_, centers, labels_, labels, algorithm)
# Compute balance
# balance = get_balance(labels, labels_, colors)
# Check stopping criterion
if total_distance_ >= best_total_distance_:
break
else:
# Update best labels and best total distance
best_labels = labels_
best_total_distance_ = total_distance_
best_total_distance = total_distance
# Increase iteration counter
n_iter += 1
total_time = time.time() - start_time
print('K-Means cost representative: ' + str(best_total_distance_))
print('K-Means cost overall: ' + str(best_total_distance))
print('Total running time: ' + str(total_time))
else:
best_labels = []
best_total_distance = 'infeasible'
total_time = 'infeasible'
best_total_balance = 'infeasible'
print('Model is infeasible')
return best_labels, best_total_distance, total_time, best_total_balance