-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdbscan.py
More file actions
359 lines (296 loc) · 12.5 KB
/
dbscan.py
File metadata and controls
359 lines (296 loc) · 12.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
import numpy as np
import pandas as pd
from scipy.spatial import cKDTree, ConvexHull
from sklearn.cluster import DBSCAN
from tqdm import tqdm
import h5py
import yaml
import os
import os.path as ospath
# Define cluster centers dtype
CLUSTER_CENTERS_DTYPE_2D = [
("frame", "f4"),
("std_frame", "f4"),
("x", "f4"),
("y", "f4"),
("std_x", "f4"),
("std_y", "f4"),
("photons", "f4"),
("sx", "f4"),
("sy", "f4"),
("bg", "f4"),
("lpx", "f4"),
("lpy", "f4"),
("ellipticity", "f4"),
("net_gradient", "f4"),
("n", "u4"),
("area", "f4"),
("convexhull", "f4"),
("group", "i4"),
]
CLUSTER_CENTERS_DTYPE_3D = [
("frame", "f4"),
("std_frame", "f4"),
("x", "f4"),
("y", "f4"),
("std_x", "f4"),
("std_y", "f4"),
("z", "f4"),
("photons", "f4"),
("sx", "f4"),
("sy", "f4"),
("bg", "f4"),
("lpx", "f4"),
("lpy", "f4"),
("std_z", "f4"),
("ellipticity", "f4"),
("net_gradient", "f4"),
("n", "u4"),
("volume", "f4"),
("convexhull", "f4"),
("group", "i4"),
]
def process_dbscan_clustering(input_folder, input_extension,
output_clusters_extension, output_centers_extension,
min_samples, nena_multiplier, pixelsize, nena_data):
"""Process all HDF5 files in the input folder for DBSCAN clustering."""
hdf5_files = []
for root, _, files in os.walk(input_folder):
for f in files:
if (f.endswith(f'{input_extension}.hdf5') and
output_clusters_extension not in f and
output_centers_extension not in f):
hdf5_files.append((root, f))
print(f"Found {len(hdf5_files)} files to process")
for root, filename in hdf5_files:
input_file = os.path.join(root, filename)
# Save in same directory as input file
output_dir = os.path.dirname(input_file)
output_filename = filename.replace('.hdf5', f"{output_clusters_extension}.hdf5")
output_file = os.path.join(output_dir, output_filename)
centers_filename = filename.replace('.hdf5',
f"{output_clusters_extension}{output_centers_extension}.hdf5")
centers_file = os.path.join(output_dir, centers_filename)
print(f"\nProcessing file: {filename}")
try:
# Check if the current file's name contains any of the base names from NeNA calculation
nena_value = None
for base_name in nena_data['values'].keys():
if base_name in filename:
nena_value = nena_data['values'][base_name]
print(f"Found matching NeNA value for base name: {base_name}")
break
if nena_value is None:
raise ValueError(f"No matching NeNA value found for file {filename}")
print(f"Using stored NeNA value: {nena_value:.4f}")
# Set DBSCAN parameters!!!!
radius = nena_value * nena_multiplier
print("Loading localizations...")
locs, info = load_locs(input_file)
print("Performing DBSCAN clustering...")
clustered_locs = dbscan_clustering(locs, radius, min_samples)
print(f"Number of localizations after clustering: {len(clustered_locs)}")
# Calculate cluster centers
print("Calculating cluster centers...")
centers = find_cluster_centers(clustered_locs, pixelsize)
print(f"Number of cluster centers calculated: {len(centers)}")
clustering_info = {
'Generated by': 'Picasso DBSCAN',
'Last driftfile': None,
'Clustering parameters': {
'method': 'DBSCAN',
'radius': float(radius),
'min_samples': int(min_samples),
'nena_value': float(nena_value),
'nena_multiplier': float(nena_multiplier),
'n_clusters': int(len(centers))
}
}
print(f"Saving clustered localizations to {output_filename}")
info.append(clustering_info)
save_locs(output_file, clustered_locs, info)
print(f"Saving cluster centers to {centers_filename}")
centers_info = info.copy()
centers_info.append(clustering_info)
save_locs(centers_file, centers, centers_info)
except Exception as e:
print(f"Error processing {filename}: {str(e)}")
continue
print("\nAll files clustered")
###############################################################################################
# Loading, saving functions
###############################################################################################
def load_locs(path):
"""Load localizations from HDF5 file."""
with h5py.File(path, "r") as locs_file:
locs = locs_file["locs"][...]
locs = np.rec.array(locs, dtype=locs.dtype)
info = load_info(path)
return locs, info
def load_info(path):
"""Load metadata information from the corresponding YAML file."""
path_base = path.rsplit(".", 1)[0]
filename = path_base + ".yaml"
try:
with open(filename, "r") as info_file:
info = list(yaml.load_all(info_file, Loader=yaml.UnsafeLoader))
except FileNotFoundError as e:
print(f"Could not find metadata file: {filename}")
raise FileNotFoundError(e)
return info
def save_locs(path, locs, info):
"""Save localizations to an HDF5 file."""
with h5py.File(path, "w") as locs_file:
locs_file.create_dataset("locs", data=locs)
base = path.rsplit(".", 1)[0]
save_info(base + ".yaml", info)
def save_info(path, info):
"""Save metadata information to a YAML file."""
with open(path, "w") as file:
yaml.dump_all(info, file, default_flow_style=False)
def _to_dict_walk(node):
"""Converts mapping objects (subclassed from dict)
to actual dict objects, including nested ones
"""
node = dict(node)
for key, val in node.items():
if isinstance(val, dict):
node[key] = _to_dict_walk(val)
return node
def save_user_settings(settings):
settings = _to_dict_walk(settings)
settings_filename = _user_settings_filename()
os.makedirs(ospath.dirname(settings_filename), exist_ok=True)
with open(settings_filename, "w") as settings_file:
yaml.dump(dict(settings), settings_file, default_flow_style=False)
def _user_settings_filename():
home = ospath.expanduser("~")
return ospath.join(home, ".picasso", "settings.yaml")
###############################################################################################
# DBSCAN Clustering Functions
###############################################################################################
def append_to_rec(array, values, field_name):
"""Appends a new field to a structured NumPy array."""
new_dtype = array.dtype.descr + [(field_name, values.dtype)]
new_array = np.empty(array.shape, dtype=new_dtype)
for name in array.dtype.names:
new_array[name] = array[name]
new_array[field_name] = values
return new_array
def error_sums_wtd(x, w):
"""
Function used for finding localization precision for cluster centers.
"""
return (w * (x - (w * x).sum() / w.sum())**2).sum() / w.sum()
def dbscan_clustering(locs, radius, min_samples):
"""
Performs DBSCAN clustering on localizations.
"""
# Prepare coordinates for clustering
if hasattr(locs, "z"):
coords = np.vstack((locs.x, locs.y, locs.z)).T
else:
coords = np.vstack((locs.x, locs.y)).T
# Perform DBSCAN
print("Performing DBSCAN clustering...")
db = DBSCAN(eps=radius, min_samples=min_samples).fit(coords)
labels = db.labels_
# Add cluster labels to localizations
locs = append_to_rec(locs, labels, "group")
# Convert to record array to enable attribute access
locs = np.rec.array(locs)
# Remove unclustered localizations (label = -1)
print(f"Found {len(np.unique(labels[labels != -1]))} clusters")
locs = locs[locs.group != -1]
return locs
def cluster_center(grouplocs, pixelsize):
"""
Calculates properties of a cluster center.
"""
try:
# Convert to DataFrame if not already
if not isinstance(grouplocs, pd.DataFrame):
grouplocs = pd.DataFrame(grouplocs)
# mean and std frame
frame = float(grouplocs.frame.mean())
std_frame = float(grouplocs.frame.std())
# average x and y, weighted by localization precision
weights_x = 1/np.square(grouplocs.lpx)
weights_y = 1/np.square(grouplocs.lpy)
x = float(np.average(grouplocs.x, weights=weights_x))
y = float(np.average(grouplocs.y, weights=weights_y))
std_x = float(grouplocs.x.std())
std_y = float(grouplocs.y.std())
# mean values
photons = float(grouplocs.photons.mean())
sx = float(grouplocs.sx.mean())
sy = float(grouplocs.sy.mean())
bg = float(grouplocs.bg.mean())
# weighted mean loc precision
lpx = float(np.sqrt(error_sums_wtd(grouplocs.x, weights_x) / (len(grouplocs) - 1)))
lpy = float(np.sqrt(error_sums_wtd(grouplocs.y, weights_y) / (len(grouplocs) - 1)))
# other attributes
ellipticity = float(sx / sy)
net_gradient = float(grouplocs.net_gradient.mean())
n = int(len(grouplocs))
if 'z' in grouplocs.columns:
if pixelsize is None:
raise ValueError("Camera pixel size must be specified for 3D cluster centers")
weights_z = 1/np.square(grouplocs.lpx + grouplocs.lpy)
z = float(np.average(grouplocs.z, weights=weights_z))
std_z = float(grouplocs.z.std())
volume = float(np.power((std_x + std_y + std_z / pixelsize) / 3 * 2, 3) * 4.18879)
try:
X = np.column_stack((grouplocs.x, grouplocs.y, grouplocs.z / pixelsize))
hull = ConvexHull(X)
convexhull = float(hull.volume)
except:
convexhull = 0.0
return [frame, std_frame, x, y, std_x, std_y, z, photons, sx, sy, bg, lpx, lpy,
std_z, ellipticity, net_gradient, n, volume, convexhull]
else:
area = float(np.power(std_x + std_y, 2) * np.pi)
try:
X = np.column_stack((grouplocs.x, grouplocs.y))
hull = ConvexHull(X)
convexhull = float(hull.area)
except:
convexhull = 0.0
return [frame, std_frame, x, y, std_x, std_y, photons, sx, sy, bg, lpx, lpy,
ellipticity, net_gradient, n, area, convexhull]
except Exception as e:
print(f"Error calculating cluster center: {str(e)}")
return None
def find_cluster_centers(locs, pixelsize):
"""
Calculate cluster centers from clustered localizations.
"""
locs_pd = pd.DataFrame(locs)
grouplocs = locs_pd.groupby('group')
centers_list = []
for group_id, group_locs in tqdm(grouplocs, desc="Calculating cluster centers"):
center_props = cluster_center(group_locs, pixelsize) # This should return a flat list
if center_props is not None:
center_props.append(int(group_id)) # Add group ID
centers_list.append(center_props) # Append the flat list
if not centers_list:
raise ValueError("No valid cluster centers were calculated")
# Initialize the structured array
if 'z' in locs_pd.columns:
centers = np.zeros(len(centers_list), dtype=CLUSTER_CENTERS_DTYPE_3D)
else:
centers = np.zeros(len(centers_list), dtype=CLUSTER_CENTERS_DTYPE_2D)
# Fill the structured array using a for loop
for i, center in enumerate(centers_list):
centers[i] = tuple(center) # Convert the list to a tuple for assignment
return centers
# if __name__ == "__main__":
# # Load config file
# with open('config.yaml', 'r') as f:
# config = yaml.safe_load(f)
# # Process files using config parameters
# process_dbscan_clustering(
# input_folder=config['paths']['input_folder'],
# output_folder=config['paths']['output_folder'],
# **config['dbscan']
# )