-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdata_preprocessing.py
323 lines (236 loc) · 10.5 KB
/
data_preprocessing.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
import networkx as nx
from shapely.geometry import Point
import numpy as np
import geopandas as gpd
from shapely.geometry import shape
from sklearn.neighbors import KernelDensity
import rasterio.features
import pandas as pd
from typing import Union
from scipy.stats import entropy
def calculate_buffer(
df: gpd.GeoDataFrame,
buffer_size: float
) -> gpd.GeoDataFrame:
buff_df = df.copy()[['geometry', 'length', 'index']]
buff_df['geometry'] = buff_df.buffer(buffer_size)
buff_df['buff_area'] = buff_df.area
return buff_df
def merge_spatial_boolean(
buff_edges: gpd.GeoDataFrame,
edges: gpd.GeoDataFrame,
spatial_data: gpd.GeoDataFrame,
target_col: str,
threshold_col: str,
merge_col: str = "index",
threshold: float = 0
) -> gpd.GeoDataFrame:
overlaps = gpd.overlay(buff_edges, spatial_data, how="intersection", keep_geom_type=False)
overlaps['overlap_length'] = overlaps.geometry.length
overlap_sums = overlaps.groupby(merge_col)['overlap_length'].sum().reset_index(name='overlap_length_sum')
edges = edges.merge(overlap_sums, on=merge_col, how='left')
# Fill NaN values in overlap_length_sum with 0 (no overlap)
edges['overlap_length_sum'] = edges['overlap_length_sum'].fillna(0)
edges[target_col] = (edges['overlap_length_sum'] / edges[threshold_col]) * 100 > threshold
edges = edges.drop(columns=['overlap_length_sum'])
return edges
def merge_spatial_attribute(
buff_edges: gpd.GeoDataFrame,
edges: gpd.GeoDataFrame,
spatial_data: gpd.GeoDataFrame,
attribute_cols: Union[str, list],
target_cols: Union[str, list] = None,
merge_col: str = "index"
) -> gpd.GeoDataFrame:
# if only one column
if not isinstance(attribute_cols, list):
attribute_cols = [attribute_cols]
if target_cols is None:
target_cols = attribute_cols
if not isinstance(target_cols, list):
target_cols = [target_cols]
# Perform spatial overlay
overlaps = gpd.overlay(buff_edges, spatial_data, how="intersection", keep_geom_type=False)
overlaps['overlap_length'] = overlaps.geometry.length
idx = overlaps.groupby(merge_col)['overlap_length'].idxmax()
max_overlaps = overlaps.loc[idx]
merge_cols = [merge_col] + attribute_cols
edges = edges.merge(max_overlaps[merge_cols], on=merge_col, how="left")
rename_dict = dict(zip(attribute_cols, target_cols))
edges = edges.rename(columns=rename_dict)
return edges
def calculate_count(
df: gpd.GeoDataFrame,
other_df: gpd.GeoDataFrame,
grouping_column: str,
count_column: str = None
) -> gpd.GeoDataFrame:
overlaps = gpd.sjoin(df, other_df, how='inner', predicate='intersects')
if count_column:
result = overlaps.groupby(grouping_column)[count_column].sum()
else:
result = overlaps.groupby(grouping_column).size()
return df[grouping_column].map(result).fillna(0)
def merge_spatial_share(
edges: gpd.GeoDataFrame,
buffer: gpd.GeoDataFrame,
spatial_data: gpd.GeoDataFrame,
target_col: str,
divider_col: str,
percent: bool = False,
merge_col: str = "index",
) -> gpd.GeoDataFrame:
overlaps = gpd.overlay(buffer, spatial_data, how="intersection", keep_geom_type=False)
if overlaps.empty:
edges[target_col] = 0
return edges
if edges.geometry.type[0] == 'LineString':
overlaps['overlap'] = overlaps.geometry.length
elif edges.geometry.type[0] == 'Polygon':
overlaps['overlap'] = overlaps.geometry.area
overlap_sums = overlaps.groupby(merge_col)['overlap'].sum().rename('overlap_sum')
edges = edges.merge(overlap_sums, on=merge_col, how='left')
edges[target_col] = (edges['overlap_sum'] / buffer[divider_col]).fillna(0)
if percent:
edges[target_col] *= 100
edges.drop(columns=['overlap_sum'], inplace=True)
return edges
def merge_distance_to_nearest(
edges: gpd.GeoDataFrame,
spatial_data: gpd.GeoDataFrame,
target_col: str,
merge_col: str = "index",
how: str = "intersection"):
nearest_stops = gpd.sjoin_nearest(edges, spatial_data, how=how, distance_col=target_col)
edges = edges.merge(nearest_stops[[merge_col, target_col]], on=merge_col)
return edges
def merge_spatial_count(
buff_edges: gpd.GeoDataFrame,
edges: gpd.GeoDataFrame,
spatial_data: gpd.GeoDataFrame,
target_col: str,
agg_col: str = None,
agg_func: str = "size",
merge_col: str = "index",
) -> gpd.GeoDataFrame:
"""
:rtype: object
"""
overlaps = gpd.overlay(buff_edges, spatial_data, how="intersection", keep_geom_type=False)
if agg_func in ['size', 'sum', 'min', 'max']:
aggregation = overlaps.groupby(merge_col)[agg_col].agg(agg_func) if agg_col else overlaps.groupby(
merge_col).agg(agg_func)
else:
aggregation = overlaps.groupby(merge_col).apply(agg_func)
aggregation_aligned = buff_edges[merge_col].map(aggregation).fillna(0)
edges[target_col] = aggregation_aligned
return edges
def compute_air_pollution_heatmap(gdf, column, boundary_gdf, resolution=300, bandwidth=500):
"""
Compute a KDE-based density grid for NO2 concentrations and return a GeoDataFrame.
Parameters:
- gdf: GeoDataFrame with point data and NO2 values.
- column: Column name in `gdf` for the NO2 values.
- boundary_gdf: GeoDataFrame representing the boundary for masking.
- resolution: Grid resolution for the KDE (default: 300).
- bandwidth: Bandwidth for KDE (default: 500).
Returns:
- heatmap_gdf: GeoDataFrame with polygons representing grid cells and a density attribute.
"""
# Calculate bounds
x_min, y_min, x_max, y_max = boundary_gdf.total_bounds
coords = np.array([(geom.x, geom.y) for geom in gdf.geometry])
values = gdf[column].values
xx, yy = np.meshgrid(
np.arange(x_min, x_max, resolution),
np.arange(y_min, y_max, resolution)
)
grid_coords = np.c_[xx.ravel(), yy.ravel()]
kde = KernelDensity(bandwidth=bandwidth, kernel='gaussian').fit(coords, sample_weight=values)
log_density = kde.score_samples(grid_coords)
density = np.exp(log_density).reshape(xx.shape)
# Rescale density to match original NO2 values
density *= gdf[column].sum() # Scale by total NO2 values
density *= (gdf[column].max() / density.max()) # Normalize to the max NO2 value
# Mask using boundary
mask = np.array([
boundary_gdf.contains(Point(x, y)).any()
for x, y in grid_coords
]).reshape(xx.shape)
density[~mask] = np.nan
density = density.astype('float32')
# Create polygons
transform = rasterio.transform.from_bounds(
x_min, y_max,
x_max, y_min,
density.shape[1], density.shape[0]
)
shapes = rasterio.features.shapes(density, transform=transform)
polygons = []
values = []
for geom, value in shapes:
if not np.isnan(value):
polygons.append(shape(geom))
values.append(value)
heatmap_gdf = gpd.GeoDataFrame({column: values}, geometry=polygons, crs=boundary_gdf.crs)
return heatmap_gdf
def count_characters(value, chars):
if pd.isna(value):
return 0 # Return 0 if the value is NaN
return sum([1 for char in value if char in chars])
def calculate_land_use_mix(buffer, edges, landuse, landuse_col):
def calculate_shannon_entropy(proportions):
return entropy(proportions, base=np.e)
intersected = gpd.overlay(landuse, buffer, how='intersection')
intersected['area'] = intersected.geometry.area
land_use_areas = intersected.groupby(['index', 'typ'])['area'].sum().reset_index()
total_area = land_use_areas.groupby('index')['area'].sum().rename('total_area')
land_use_areas = land_use_areas.join(total_area, on='index')
land_use_areas['proportion'] = land_use_areas['area'] / land_use_areas['total_area']
edges_entropy = land_use_areas.groupby('index')['proportion'].apply(calculate_shannon_entropy).reset_index()
edges_entropy = edges_entropy .rename(columns={'proportion': landuse_col})
edges = edges.merge(edges_entropy , on='index', how='left')
edges[landuse_col] = edges[landuse_col].fillna(0)
return edges
def calculate_edge_betweenness(edges, target_col, normalize=True):
G = nx.from_pandas_edgelist(edges, 'u', 'v')
edge_btw = nx.edge_betweenness_centrality(G, normalized=normalize)
edge_btw_df = pd.DataFrame([
{'u': u, 'v': v, target_col: bc}
for (u, v), bc in edge_btw.items()
])
edges = edges.merge(edge_btw_df, on=['u', 'v'], how='left')
return edges
def calculate_intersection_density(edges, buffer, intersection_col):
all_points = edges['geometry'].apply(lambda geom: [Point(geom.coords[0]), Point(geom.coords[-1])]).explode()
unique_points = gpd.GeoSeries(pd.unique(all_points), crs=edges.crs)
intersections = gpd.GeoDataFrame(geometry=unique_points)
edges = merge_spatial_count(buffer, edges, intersections, intersection_col)
return edges
def calculate_bikelane_density(edges, buffer, bike_lanes, bikelane_col):
overlaps = gpd.overlay(buffer, bike_lanes, how="intersection", keep_geom_type=False)
overlaps['overlap_length'] = overlaps.geometry.length
overlap_sums = overlaps.groupby('index')['overlap_length'].sum().rename('overlap_length_sum')
edges = edges.merge(overlap_sums, on='index', how='left')
edges[bikelane_col] = (edges['overlap_length_sum'] / buffer['buff_area']).fillna(0)
return edges
def calculate_node_degrees(edges, target_col):
G = nx.from_pandas_edgelist(edges, 'u', 'v')
degree_dict = dict(G.degree())
degree_df = pd.DataFrame.from_dict(degree_dict, orient='index', columns=['degree']).reset_index().rename(
columns={'index': 'node'})
edges = edges.merge(degree_df, left_on='u', right_on='node', how='left').rename(
columns={'degree': 'u_degree'}).drop(columns=['node'])
edges = edges.merge(degree_df, left_on='v', right_on='node', how='left').rename(
columns={'degree': 'v_degree'}).drop(columns=['node'])
edges[target_col] = edges[['u_degree', 'v_degree']].mean(axis=1)
return edges
def assign_points(series, bins):
"""Assign points based on thresholds."""
return np.digitize(series, bins, right=True).clip(1, 5)
def average_pop(group):
"""Calculate average population."""
return group['PERS_N'].sum() / len(group)
def avg_no2(group):
"""Calculate average no2 emissions"""
return group['no2'].sum() / len(group)