-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathspatiotemporal_analysis.py
More file actions
940 lines (784 loc) · 41.4 KB
/
spatiotemporal_analysis.py
File metadata and controls
940 lines (784 loc) · 41.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
import glob
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
import folium
from folium import plugins
import warnings
warnings.filterwarnings('ignore')
# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'Arial Unicode MS']
plt.rcParams['axes.unicode_minus'] = False
def load_all_months_data(base_dir='cleaned_data', months=None):
"""
加载指定月份的处理后数据
"""
print("=" * 60)
print("📊 第三阶段:探索时空规律")
print("=" * 60)
print("\n正在加载数据...")
# 如果指定了月份,只加载这些月份;否则加载所有月份
if months is None:
# 查找所有月份文件夹
month_folders = sorted(glob.glob(os.path.join(base_dir, '*/')))
else:
# 只加载指定的月份
month_folders = []
for month in months:
month_folder = os.path.join(base_dir, month)
if os.path.exists(month_folder):
month_folders.append(month_folder)
if not month_folders:
print(f"❌ 未找到数据文件夹! 请确保 {base_dir} 文件夹中有指定的月份子文件夹。")
return None, None
print(f"找到 {len(month_folders)} 个月份的数据:")
for folder in month_folders:
print(f" - {os.path.basename(folder)}")
# 加载所有月份的小时指标数据
all_metrics = []
all_trip_data = []
for month_folder in month_folders:
month = os.path.basename(month_folder)
metrics_file = os.path.join(month_folder, 'station_hourly_metrics.csv')
trip_file = os.path.join(month_folder, 'cleaned_trip_data.csv')
if os.path.exists(metrics_file):
print(f"\n加载 {month} 的小时指标数据...")
metrics = pd.read_csv(metrics_file)
metrics['month'] = month
all_metrics.append(metrics)
print(f" ✓ 加载了 {len(metrics):,} 条小时指标记录")
if os.path.exists(trip_file):
print(f"加载 {month} 的行程数据(用于提取地理信息)...")
# 只读取需要的列以节省内存
trip = pd.read_csv(trip_file, usecols=[
'start_station_id', 'start_station_name',
'start_lat', 'start_lng',
'end_station_id', 'end_station_name',
'end_lat', 'end_lng'
])
all_trip_data.append(trip)
print(f" ✓ 加载了 {len(trip):,} 条行程记录")
# 合并所有月份的数据
print("\n合并所有月份的数据...")
combined_metrics = pd.concat(all_metrics, ignore_index=True)
combined_trip = pd.concat(all_trip_data, ignore_index=True)
print(f"✓ 合并后小时指标数据: {len(combined_metrics):,} 条记录")
print(f"✓ 合并后行程数据: {len(combined_trip):,} 条记录")
return combined_metrics, combined_trip
def extract_station_coordinates(trip_data):
"""
从行程数据中提取每个站点的经纬度坐标
"""
print("\n正在提取站点地理坐标...")
# 提取起始站点坐标
start_stations = trip_data[['start_station_id', 'start_station_name', 'start_lat', 'start_lng']].copy()
start_stations.columns = ['station_id', 'station_name', 'lat', 'lng']
# 提取结束站点坐标
end_stations = trip_data[['end_station_id', 'end_station_name', 'end_lat', 'end_lng']].copy()
end_stations.columns = ['station_id', 'station_name', 'lat', 'lng']
# 合并并去重
all_stations = pd.concat([start_stations, end_stations], ignore_index=True)
# 对每个站点取坐标的平均值(处理可能的坐标偏差)
station_coords = all_stations.groupby(['station_id', 'station_name']).agg({
'lat': 'mean',
'lng': 'mean'
}).reset_index()
# 过滤掉无效坐标
station_coords = station_coords[
(station_coords['lat'].notna()) &
(station_coords['lng'].notna()) &
(station_coords['lat'] != 0) &
(station_coords['lng'] != 0)
]
print(f"✓ 提取了 {len(station_coords)} 个站点的坐标信息")
return station_coords
def analyze_hourly_trends(metrics_data, output_dir='analysis_results', period_name=''):
"""
第三阶段任务1: 计算全城所有站点每小时的平均净流量,画24小时曲线
"""
print("\n" + "=" * 60)
print("📈 任务1: 分析全城24小时净流量趋势")
print("=" * 60)
# 计算每个小时的平均净流量(所有站点、所有日期的平均值)
hourly_avg_netflow = metrics_data.groupby('hour')['net_flow'].mean().reset_index()
hourly_avg_netflow.columns = ['hour', 'avg_net_flow']
# 绘制24小时曲线
plt.figure(figsize=(14, 7))
plt.plot(hourly_avg_netflow['hour'], hourly_avg_netflow['avg_net_flow'],
marker='o', linewidth=2.5, markersize=8, color='#2E86AB')
plt.axhline(y=0, color='gray', linestyle='--', linewidth=1, alpha=0.5)
plt.fill_between(hourly_avg_netflow['hour'], 0, hourly_avg_netflow['avg_net_flow'],
where=(hourly_avg_netflow['avg_net_flow'] < 0),
color='red', alpha=0.3, label='净流出(缺车)')
plt.fill_between(hourly_avg_netflow['hour'], 0, hourly_avg_netflow['avg_net_flow'],
where=(hourly_avg_netflow['avg_net_flow'] > 0),
color='blue', alpha=0.3, label='净流入(堆积)')
plt.xlabel('小时', fontsize=12, fontweight='bold')
plt.ylabel('平均净流量', fontsize=12, fontweight='bold')
title = f'全城所有站点每小时平均净流量趋势{period_name}'
plt.title(title, fontsize=14, fontweight='bold', pad=20)
plt.grid(True, alpha=0.3, linestyle='--')
plt.legend(fontsize=10)
plt.xticks(range(24))
plt.tight_layout()
# 保存图片
os.makedirs(output_dir, exist_ok=True)
plt.savefig(os.path.join(output_dir, '01_hourly_netflow_trend.png'), dpi=300, bbox_inches='tight')
print(f"✓ 24小时净流量趋势图已保存到: {os.path.join(output_dir, '01_hourly_netflow_trend.png')}")
# 识别高峰时段
morning_peak = hourly_avg_netflow[(hourly_avg_netflow['hour'] >= 7) & (hourly_avg_netflow['hour'] < 9)]
evening_peak = hourly_avg_netflow[(hourly_avg_netflow['hour'] >= 17) & (hourly_avg_netflow['hour'] < 19)]
print(f"\n早高峰(7-9点)平均净流量: {morning_peak['avg_net_flow'].mean():.2f}")
print(f"晚高峰(17-19点)平均净流量: {evening_peak['avg_net_flow'].mean():.2f}")
# 判断是否出现"早晚双峰"潮汐波形
max_negative = hourly_avg_netflow['avg_net_flow'].min()
max_positive = hourly_avg_netflow['avg_net_flow'].max()
if max_negative < -0.5 and max_positive > 0.5:
print("\n✓ 观察到典型的'早晚双峰'潮汐波形!")
print(f" 早高峰时段(7-9点)出现大量借车(净流出),平均净流量为负")
print(f" 晚高峰时段(17-19点)出现大量还车(净流入),平均净流量为正")
else:
print("\n⚠ 潮汐波形特征不明显")
plt.close()
return hourly_avg_netflow
def analyze_peak_hours(metrics_data, output_dir='analysis_results'):
"""
第三阶段任务2: 提取早高峰和晚高峰数据,计算每个站点的平均净流量
"""
print("\n" + "=" * 60)
print("⏰ 任务2: 分析高峰时段站点流量")
print("=" * 60)
# 提取早高峰(7-9点)数据
morning_data = metrics_data[
(metrics_data['hour'] >= 7) & (metrics_data['hour'] < 9)
].copy()
# 提取晚高峰(17-19点)数据
evening_data = metrics_data[
(metrics_data['hour'] >= 17) & (metrics_data['hour'] < 19)
].copy()
# 计算每个站点早高峰的平均净流量
morning_station_flow = morning_data.groupby(['station_id', 'station_name']).agg({
'net_flow': 'mean',
'outflow_count': 'mean',
'inflow_count': 'mean'
}).reset_index()
morning_station_flow.columns = ['station_id', 'station_name',
'morning_avg_netflow', 'morning_avg_outflow', 'morning_avg_inflow']
# 计算每个站点晚高峰的平均净流量
evening_station_flow = evening_data.groupby(['station_id', 'station_name']).agg({
'net_flow': 'mean',
'outflow_count': 'mean',
'inflow_count': 'mean'
}).reset_index()
evening_station_flow.columns = ['station_id', 'station_name',
'evening_avg_netflow', 'evening_avg_outflow', 'evening_avg_inflow']
# 合并早高峰和晚高峰数据
peak_flow = pd.merge(morning_station_flow, evening_station_flow,
on=['station_id', 'station_name'], how='outer')
# 填充缺失值
peak_flow['morning_avg_netflow'] = peak_flow['morning_avg_netflow'].fillna(0)
peak_flow['evening_avg_netflow'] = peak_flow['evening_avg_netflow'].fillna(0)
print(f"✓ 早高峰数据: {len(morning_data):,} 条记录,涉及 {len(morning_station_flow)} 个站点")
print(f"✓ 晚高峰数据: {len(evening_data):,} 条记录,涉及 {len(evening_station_flow)} 个站点")
# 保存高峰时段数据
os.makedirs(output_dir, exist_ok=True)
peak_flow.to_csv(os.path.join(output_dir, 'peak_hour_station_flow.csv'), index=False, encoding='utf-8-sig')
print(f"✓ 高峰时段站点流量数据已保存到: {os.path.join(output_dir, 'peak_hour_station_flow.csv')}")
return peak_flow
def merge_geographic_info(peak_flow, station_coords, output_dir='analysis_results'):
"""
第三阶段任务3: 关联地理信息
"""
print("\n" + "=" * 60)
print("🗺️ 任务3: 关联站点地理信息")
print("=" * 60)
# 合并地理坐标
peak_flow_with_geo = pd.merge(peak_flow, station_coords,
on=['station_id', 'station_name'],
how='left')
# 过滤掉没有坐标的站点
peak_flow_with_geo = peak_flow_with_geo[peak_flow_with_geo['lat'].notna()]
print(f"✓ 成功关联 {len(peak_flow_with_geo)} 个站点的地理信息")
# 保存带地理信息的数据
os.makedirs(output_dir, exist_ok=True)
peak_flow_with_geo.to_csv(os.path.join(output_dir, 'peak_flow_with_geography.csv'),
index=False, encoding='utf-8-sig')
print(f"✓ 带地理信息的高峰流量数据已保存到: {os.path.join(output_dir, 'peak_flow_with_geography.csv')}")
return peak_flow_with_geo
def create_tidal_map(peak_flow_with_geo, output_dir='analysis_results'):
"""
第四阶段任务1: 绘制"潮汐地图"
"""
print("\n" + "=" * 60)
print("🗺️ 第四阶段:可视化与分类")
print("=" * 60)
print("\n📍 任务1: 绘制潮汐地图")
print("=" * 60)
# 计算地图中心点(所有站点的平均坐标)
center_lat = peak_flow_with_geo['lat'].mean()
center_lng = peak_flow_with_geo['lng'].mean()
# 创建基础地图
m = folium.Map(location=[center_lat, center_lng], zoom_start=11, tiles='OpenStreetMap')
# 添加早高峰缺车站点(红色)- 净流出大(负值)
morning_shortage = peak_flow_with_geo[
peak_flow_with_geo['morning_avg_netflow'] < -2
].copy()
# 添加晚高峰淤积站点(蓝色)- 净流入大(正值)
evening_accumulation = peak_flow_with_geo[
peak_flow_with_geo['evening_avg_netflow'] > 2
].copy()
# 添加早高峰缺车站点标记(红色)
for idx, row in morning_shortage.iterrows():
folium.CircleMarker(
location=[row['lat'], row['lng']],
radius=5 + abs(row['morning_avg_netflow']) * 0.5, # 根据净流量调整大小
popup=f"{row['station_name']}<br>早高峰净流量: {row['morning_avg_netflow']:.2f}",
color='red',
fill=True,
fillColor='red',
fillOpacity=0.7,
weight=2
).add_to(m)
# 添加晚高峰淤积站点标记(蓝色)
for idx, row in evening_accumulation.iterrows():
folium.CircleMarker(
location=[row['lat'], row['lng']],
radius=5 + row['evening_avg_netflow'] * 0.5,
popup=f"{row['station_name']}<br>晚高峰净流量: {row['evening_avg_netflow']:.2f}",
color='blue',
fill=True,
fillColor='blue',
fillOpacity=0.7,
weight=2
).add_to(m)
# 添加图例
legend_html = '''
<div style="position: fixed;
bottom: 50px; left: 50px; width: 200px; height: 120px;
background-color: white; z-index:9999; font-size:14px;
border:2px solid grey; border-radius:5px; padding: 10px">
<p><b>潮汐地图图例</b></p>
<p><i class="fa fa-circle" style="color:red"></i> 早高峰缺车(红色区域)</p>
<p><i class="fa fa-circle" style="color:blue"></i> 晚高峰淤积(蓝色区域)</p>
<p>圆圈大小表示流量强度</p>
</div>
'''
m.get_root().html.add_child(folium.Element(legend_html))
# 保存地图
os.makedirs(output_dir, exist_ok=True)
map_file = os.path.join(output_dir, 'tidal_map.html')
m.save(map_file)
print(f"✓ 潮汐地图已保存到: {map_file}")
print(f" - 红色标记: {len(morning_shortage)} 个早高峰缺车站点")
print(f" - 蓝色标记: {len(evening_accumulation)} 个晚高峰淤积站点")
# 创建静态可视化(使用matplotlib)
create_static_tidal_map(peak_flow_with_geo, morning_shortage, evening_accumulation, output_dir)
return m
def create_static_tidal_map(peak_flow_with_geo, morning_shortage, evening_accumulation, output_dir='analysis_results'):
"""
创建静态的潮汐地图(使用matplotlib)
"""
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
# 早高峰地图
ax1.scatter(peak_flow_with_geo['lng'], peak_flow_with_geo['lat'],
c='lightgray', s=10, alpha=0.3, label='其他站点')
ax1.scatter(morning_shortage['lng'], morning_shortage['lat'],
c='red', s=50 + abs(morning_shortage['morning_avg_netflow']) * 5,
alpha=0.6, label='早高峰缺车')
ax1.set_xlabel('经度', fontsize=12)
ax1.set_ylabel('纬度', fontsize=12)
ax1.set_title('早高峰缺车站点分布(红色)', fontsize=14, fontweight='bold')
ax1.legend()
ax1.grid(True, alpha=0.3)
# 晚高峰地图
ax2.scatter(peak_flow_with_geo['lng'], peak_flow_with_geo['lat'],
c='lightgray', s=10, alpha=0.3, label='其他站点')
ax2.scatter(evening_accumulation['lng'], evening_accumulation['lat'],
c='blue', s=50 + evening_accumulation['evening_avg_netflow'] * 5,
alpha=0.6, label='晚高峰淤积')
ax2.set_xlabel('经度', fontsize=12)
ax2.set_ylabel('纬度', fontsize=12)
ax2.set_title('晚高峰淤积站点分布(蓝色)', fontsize=14, fontweight='bold')
ax2.legend()
ax2.grid(True, alpha=0.3)
plt.tight_layout()
os.makedirs(output_dir, exist_ok=True)
plt.savefig(os.path.join(output_dir, '02_tidal_map_static.png'), dpi=300, bbox_inches='tight')
print(f"✓ 静态潮汐地图已保存到: {os.path.join(output_dir, '02_tidal_map_static.png')}")
plt.close()
def extract_24h_patterns(metrics_data):
"""
提取每个站点24小时的流量变化模式
"""
print("\n正在提取站点24小时流量模式...")
# 计算每个站点每个小时的平均净流量
station_hourly_pattern = metrics_data.groupby(['station_id', 'station_name', 'hour']).agg({
'net_flow': 'mean',
'outflow_count': 'mean',
'inflow_count': 'mean'
}).reset_index()
# 重塑数据:每个站点一行,24小时作为24列
pattern_matrix = station_hourly_pattern.pivot_table(
index=['station_id', 'station_name'],
columns='hour',
values='net_flow',
fill_value=0
).reset_index()
# 确保有24列(0-23小时)
for h in range(24):
if h not in pattern_matrix.columns:
pattern_matrix[h] = 0
# 按小时排序列
hour_cols = [col for col in pattern_matrix.columns if isinstance(col, (int, np.integer)) and 0 <= col < 24]
hour_cols = sorted(hour_cols)
other_cols = [col for col in pattern_matrix.columns if col not in hour_cols]
pattern_matrix = pattern_matrix[other_cols + hour_cols]
print(f"✓ 提取了 {len(pattern_matrix)} 个站点的24小时流量模式")
return pattern_matrix
def classify_stations(metrics_data, station_coords, output_dir='analysis_results'):
"""
第四阶段任务2: 对站点进行功能分类(K-Means聚类)
"""
print("\n" + "=" * 60)
print("🔍 任务2: 站点功能分类(K-Means聚类)")
print("=" * 60)
# 提取24小时流量模式
pattern_matrix = extract_24h_patterns(metrics_data)
# 准备聚类特征:24小时的净流量
feature_cols = [col for col in pattern_matrix.columns if isinstance(col, (int, np.integer)) and 0 <= col < 24]
X = pattern_matrix[feature_cols].values
# 标准化特征
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# 使用K-Means聚类(增加到6-8个类别以获得更细致的分类)
# 使用肘部法则确定最佳聚类数(可选,这里直接使用6个)
n_clusters = 6
kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)
pattern_matrix['cluster'] = kmeans.fit_predict(X_scaled)
# 合并地理信息
pattern_matrix = pd.merge(pattern_matrix, station_coords,
on=['station_id', 'station_name'],
how='left')
# 分析每个聚类的特征
print("\n聚类结果分析:")
cluster_analysis = []
for cluster_id in range(n_clusters):
cluster_data = pattern_matrix[pattern_matrix['cluster'] == cluster_id]
cluster_pattern = cluster_data[feature_cols].mean()
# 计算关键指标
morning_flow = cluster_pattern[[7, 8]].mean() # 7-8点
evening_flow = cluster_pattern[[17, 18]].mean() # 17-18点
peak_hour = cluster_pattern.idxmax()
min_hour = cluster_pattern.idxmin()
# 计算更多特征用于细化分类
peak_flow = cluster_pattern.max() # 峰值流量
min_flow = cluster_pattern.min() # 最低流量
flow_range = peak_flow - min_flow # 流量变化幅度
flow_std = cluster_pattern.std() # 流量标准差(波动程度)
total_flow = cluster_pattern.sum() # 全天总净流量
# 计算不同时段的流量特征
morning_outflow = cluster_pattern[[7, 8, 9]].min() # 早高峰最大借出(负值)
evening_inflow = cluster_pattern[[17, 18, 19]].max() # 晚高峰最大归还(正值)
midday_flow = cluster_pattern[[11, 12, 13, 14]].mean() # 中午时段平均流量
night_flow = cluster_pattern[[22, 23, 0, 1]].mean() # 夜间时段平均流量
cluster_analysis.append({
'cluster': cluster_id,
'count': len(cluster_data),
'morning_flow': morning_flow,
'evening_flow': evening_flow,
'peak_hour': peak_hour,
'min_hour': min_hour,
'peak_flow': peak_flow,
'min_flow': min_flow,
'flow_range': flow_range,
'flow_std': flow_std,
'total_flow': total_flow,
'morning_outflow': morning_outflow,
'evening_inflow': evening_inflow,
'midday_flow': midday_flow,
'night_flow': night_flow,
'pattern': cluster_pattern
})
print(f"\n类别 {cluster_id}: {len(cluster_data)} 个站点")
print(f" 早高峰(7-8点)平均净流量: {morning_flow:.2f}")
print(f" 晚高峰(17-18点)平均净流量: {evening_flow:.2f}")
print(f" 净流量峰值时段: {int(peak_hour)}:00 (峰值: {peak_flow:.2f})")
print(f" 净流量最低时段: {int(min_hour)}:00 (最低: {min_flow:.2f})")
print(f" 流量变化幅度: {flow_range:.2f}")
print(f" 流量波动程度: {flow_std:.2f}")
# 根据多个特征细化命名类别
# 先按特征强度排序,优先处理特征明显的类别
cluster_analysis_sorted = sorted(cluster_analysis,
key=lambda x: abs(x['flow_range']) + abs(x['morning_flow']) + abs(x['evening_flow']),
reverse=True)
cluster_names = {}
for idx, analysis in enumerate(cluster_analysis_sorted):
cluster_id = analysis['cluster']
morning = analysis['morning_flow']
evening = analysis['evening_flow']
flow_range = analysis['flow_range']
flow_std = analysis['flow_std']
morning_outflow = analysis['morning_outflow']
evening_inflow = analysis['evening_inflow']
midday_flow = analysis['midday_flow']
night_flow = analysis['night_flow']
peak_hour = analysis['peak_hour']
min_hour = analysis['min_hour']
peak_flow = analysis['peak_flow']
min_flow = analysis['min_flow']
total_flow = analysis['total_flow']
# 计算更多特征用于区分
morning_peak = analysis['pattern'][[7, 8, 9]].min() # 早高峰最低点(最大借出)
evening_peak = analysis['pattern'][[17, 18, 19]].max() # 晚高峰最高点(最大归还)
# 细化分类逻辑 - 按优先级判断
name = None
description = ""
# 1. 高波动特殊功能区 - 流量变化幅度很大
if flow_range > 4:
if abs(morning) > 2 or abs(evening) > 2:
name = "特殊功能区-强潮汐波动"
description = f"流量波动极大(幅度{flow_range:.1f}),高峰时段流量变化明显,可能是大型交通枢纽或特殊活动区域"
else:
name = "特殊功能区-高波动"
description = f"流量波动很大(幅度{flow_range:.1f}),但高峰特征不明显,可能是特殊用途站点"
# 2. 住宅区型(早出晚归)- 早上大量借出,傍晚大量归还
elif morning < -0.8 and evening > 0.8:
if flow_range > 2.5 and abs(morning_outflow) > 1.5:
name = "住宅区型-强潮汐(早出晚归)"
description = f"早上大量借出({morning:.1f}),傍晚大量归还({evening:.1f}),流量变化明显({flow_range:.1f}),典型的居民区"
elif flow_range > 1.5:
name = "住宅区型-中等潮汐(早出晚归)"
description = f"早上借出较多({morning:.1f}),傍晚归还较多({evening:.1f}),流量有一定变化({flow_range:.1f}),居民区特征"
else:
name = "住宅区型-弱潮汐(早出晚归)"
description = f"早上借出({morning:.1f}),傍晚归还({evening:.1f}),但流量变化较小({flow_range:.1f}),混合居民区"
# 3. 办公区型(早入晚出)- 早上大量归还,傍晚大量借出
elif morning > 0.8 and evening < -0.8:
if flow_range > 2.5 and evening_inflow > 1.5:
name = "办公区型-强潮汐(早入晚出)"
description = f"早上大量归还({morning:.1f}),傍晚大量借出({evening:.1f}),流量变化明显({flow_range:.1f}),典型的办公区"
elif flow_range > 1.5:
name = "办公区型-中等潮汐(早入晚出)"
description = f"早上归还较多({morning:.1f}),傍晚借出较多({evening:.1f}),流量有一定变化({flow_range:.1f}),办公区特征"
else:
name = "办公区型-弱潮汐(早入晚出)"
description = f"早上归还({morning:.1f}),傍晚借出({evening:.1f}),但流量变化较小({flow_range:.1f}),混合办公区"
# 4. 早高峰主导型
elif abs(morning) > abs(evening) and morning < -0.5:
if evening > 0.3:
name = "通勤起点型-早高峰借出+晚高峰归还"
description = f"早高峰借出明显({morning:.1f}),晚高峰也有归还({evening:.1f}),可能是通勤起点或交通节点"
elif flow_range > 1.5:
name = "通勤起点型-早高峰强借出"
description = f"早高峰借出非常明显({morning:.1f}),其他时段流量较低,典型的早高峰通勤起点"
else:
name = "通勤起点型-早高峰中等借出"
description = f"早高峰借出({morning:.1f}),但整体流量变化较小,可能是次要通勤起点"
# 5. 晚高峰主导型
elif abs(evening) > abs(morning) and evening < -0.5:
if morning > 0.3:
name = "通勤起点型-早高峰归还+晚高峰借出"
description = f"早高峰有归还({morning:.1f}),晚高峰借出明显({evening:.1f}),可能是反向通勤模式"
elif flow_range > 1.5:
name = "通勤起点型-晚高峰强借出"
description = f"晚高峰借出非常明显({evening:.1f}),其他时段流量较低,典型的晚高峰通勤起点"
else:
name = "通勤起点型-晚高峰中等借出"
description = f"晚高峰借出({evening:.1f}),但整体流量变化较小,可能是次要通勤起点"
# 6. 午间活跃型
elif peak_hour >= 11 and peak_hour <= 14 and midday_flow > 0.3:
if flow_range > 1.5:
name = "商业休闲型-午间活跃"
description = f"午间时段流量最高(峰值{peak_flow:.1f}),全天有一定波动({flow_range:.1f}),可能是商业区或餐饮区"
else:
name = "商业休闲型-午间平稳活跃"
description = f"午间时段流量较高(峰值{peak_flow:.1f}),但整体波动较小,可能是休闲区或购物中心附近"
# 7. 平衡型 - 全天流量相对平稳
elif abs(morning) < 0.6 and abs(evening) < 0.6 and flow_range < 1.8:
# 进一步细分平衡型
if flow_std < 0.3:
if abs(total_flow) < 2:
name = "平衡型-极低流量(几乎无使用)"
description = f"全天流量极低且非常平稳(标准差{flow_std:.2f}),可能是偏远区域或低使用率站点"
else:
name = "平衡型-极平稳(全天均匀)"
description = f"全天流量非常平稳(标准差{flow_std:.2f}),几乎没有波动,可能是公园、学校或休闲区"
elif midday_flow > 0.3:
name = "平衡型-午间略活跃"
description = f"全天流量相对平稳(标准差{flow_std:.2f}),但午间时段略活跃,可能是休闲区或混合功能区"
elif abs(total_flow) < 3:
name = "平衡型-低流量平稳"
description = f"全天流量平稳且较低(标准差{flow_std:.2f}),可能是低使用率站点或次要区域"
else:
name = "平衡型-中等流量平稳"
description = f"全天流量平稳(标准差{flow_std:.2f}),流量适中,可能是混合功能区或一般使用站点"
# 8. 其他特殊模式
else:
# 根据峰值时段和流量特征进一步细分
if peak_hour >= 7 and peak_hour <= 9:
name = "混合型-早高峰特征"
description = f"早高峰时段流量较高(峰值{peak_flow:.1f}),但模式不完全符合典型通勤模式,可能是特殊通勤起点"
elif peak_hour >= 17 and peak_hour <= 19:
name = "混合型-晚高峰特征"
description = f"晚高峰时段流量较高(峰值{peak_flow:.1f}),但模式不完全符合典型通勤模式,可能是特殊通勤起点"
elif peak_hour >= 22 or peak_hour <= 2:
name = "混合型-夜间活跃"
description = f"夜间时段流量较高(峰值{peak_flow:.1f}),可能是夜生活区域或特殊用途站点"
elif flow_range > 2:
name = "混合型-中等波动"
description = f"流量有一定波动(幅度{flow_range:.1f}),但不符合典型模式,可能是特殊功能区"
else:
name = "混合型-低流量特殊模式"
description = f"流量模式特殊但整体较低(幅度{flow_range:.1f}),可能是特殊用途站点或低使用率区域"
# 如果名称已存在,添加更具体的特征来区分
existing_names = [cluster_names[k]['name'] for k in cluster_names.keys()]
original_name = name
# 检查是否有相同的基础名称
base_name_parts = name.split('(')
base_name = base_name_parts[0] if len(base_name_parts) > 0 else name
# 如果基础名称已存在,添加特征后缀
if any(base_name in existing_name for existing_name in existing_names):
# 根据具体特征添加后缀
if flow_range > 2:
name = f"{base_name}-高波动"
elif abs(morning) > abs(evening):
name = f"{base_name}-早高峰主导"
elif abs(evening) > abs(morning):
name = f"{base_name}-晚高峰主导"
elif flow_std < 0.3:
name = f"{base_name}-极平稳"
elif midday_flow > 0.3:
name = f"{base_name}-午间活跃"
else:
# 如果还是重复,添加序号
counter = 1
while name in existing_names:
name = f"{base_name}-变体{counter}"
counter += 1
cluster_names[cluster_id] = {'name': name, 'description': description}
print(f"\n类别 {cluster_id} 命名为: {name}")
print(f" 说明: {description}")
# 添加类别名称
pattern_matrix['cluster_name'] = pattern_matrix['cluster'].map(lambda x: cluster_names[x]['name'])
# 保存分类结果
os.makedirs(output_dir, exist_ok=True)
pattern_matrix.to_csv(os.path.join(output_dir, 'station_clusters.csv'), index=False, encoding='utf-8-sig')
print(f"\n✓ 站点分类结果已保存到: {os.path.join(output_dir, 'station_clusters.csv')}")
# 可视化聚类结果
visualize_clusters(pattern_matrix, cluster_analysis, cluster_names, output_dir)
# 在地图上显示分类结果
create_cluster_map(pattern_matrix, cluster_names, output_dir)
return pattern_matrix, cluster_names
def visualize_clusters(pattern_matrix, cluster_analysis, cluster_names, output_dir='analysis_results'):
"""
可视化聚类结果
"""
print("\n正在生成聚类可视化...")
n_clusters = len(cluster_analysis)
# 根据聚类数量动态调整子图布局
if n_clusters <= 4:
rows, cols = 2, 2
elif n_clusters <= 6:
rows, cols = 2, 3
elif n_clusters <= 9:
rows, cols = 3, 3
else:
rows, cols = 4, 3
fig, axes = plt.subplots(rows, cols, figsize=(cols*5, rows*4))
axes = axes.flatten() if n_clusters > 1 else [axes]
for i, analysis in enumerate(cluster_analysis):
if i >= len(axes):
break
cluster_id = analysis['cluster']
cluster_data = pattern_matrix[pattern_matrix['cluster'] == cluster_id]
pattern = analysis['pattern']
ax = axes[i]
ax.plot(range(24), pattern.values, marker='o', linewidth=2, markersize=6)
ax.axhline(y=0, color='gray', linestyle='--', linewidth=1, alpha=0.5)
ax.fill_between(range(24), 0, pattern.values,
where=(pattern.values < 0), color='red', alpha=0.3)
ax.fill_between(range(24), 0, pattern.values,
where=(pattern.values > 0), color='blue', alpha=0.3)
cluster_name = cluster_names[cluster_id]['name']
# 如果名称太长,换行显示
if len(cluster_name) > 20:
cluster_name = cluster_name.replace('-', '-\n')
ax.set_title(f'{cluster_name}\n({len(cluster_data)} 个站点)',
fontsize=11, fontweight='bold')
ax.set_xlabel('小时', fontsize=9)
ax.set_ylabel('平均净流量', fontsize=9)
ax.set_xticks(range(0, 24, 2))
ax.grid(True, alpha=0.3)
# 隐藏多余的子图
for i in range(n_clusters, len(axes)):
axes[i].axis('off')
plt.suptitle('站点功能分类:24小时流量模式', fontsize=16, fontweight='bold', y=0.995)
plt.tight_layout()
os.makedirs(output_dir, exist_ok=True)
plt.savefig(os.path.join(output_dir, '03_station_clusters.png'), dpi=300, bbox_inches='tight')
print(f"✓ 聚类可视化已保存到: {os.path.join(output_dir, '03_station_clusters.png')}")
plt.close()
def create_cluster_map(pattern_matrix, cluster_names, output_dir='analysis_results'):
"""
在地图上显示站点分类结果
"""
print("\n正在生成分类地图...")
# 过滤有效坐标
pattern_matrix = pattern_matrix[pattern_matrix['lat'].notna()]
# 计算地图中心
center_lat = pattern_matrix['lat'].mean()
center_lng = pattern_matrix['lng'].mean()
# 创建地图
m = folium.Map(location=[center_lat, center_lng], zoom_start=11, tiles='OpenStreetMap')
# 定义颜色
colors = ['red', 'blue', 'green', 'orange', 'purple', 'brown']
# 为每个类别添加标记
for cluster_id in pattern_matrix['cluster'].unique():
cluster_data = pattern_matrix[pattern_matrix['cluster'] == cluster_id]
color = colors[cluster_id % len(colors)]
cluster_name = cluster_names[cluster_id]['name']
for idx, row in cluster_data.iterrows():
folium.CircleMarker(
location=[row['lat'], row['lng']],
radius=5,
popup=f"{row['station_name']}<br>类别: {cluster_name}",
color=color,
fill=True,
fillColor=color,
fillOpacity=0.6,
weight=2
).add_to(m)
# 添加图例
legend_html = '<div style="position: fixed; bottom: 50px; left: 50px; width: 250px; '
legend_html += 'background-color: white; z-index:9999; font-size:14px; '
legend_html += 'border:2px solid grey; border-radius:5px; padding: 10px"><p><b>站点功能分类</b></p>'
for cluster_id, info in cluster_names.items():
color = colors[cluster_id % len(colors)]
legend_html += f'<p><i class="fa fa-circle" style="color:{color}"></i> {info["name"]}</p>'
legend_html += '</div>'
m.get_root().html.add_child(folium.Element(legend_html))
# 保存地图
os.makedirs(output_dir, exist_ok=True)
map_file = os.path.join(output_dir, 'cluster_map.html')
m.save(map_file)
print(f"✓ 分类地图已保存到: {map_file}")
# 创建静态分类地图
fig, ax = plt.subplots(figsize=(14, 10))
colors_list = ['red', 'blue', 'green', 'orange', 'purple', 'brown']
for cluster_id in sorted(pattern_matrix['cluster'].unique()):
cluster_data = pattern_matrix[pattern_matrix['cluster'] == cluster_id]
color = colors_list[cluster_id % len(colors_list)]
cluster_name = cluster_names[cluster_id]['name']
ax.scatter(cluster_data['lng'], cluster_data['lat'],
c=color, s=30, alpha=0.6, label=cluster_name)
ax.set_xlabel('经度', fontsize=12)
ax.set_ylabel('纬度', fontsize=12)
ax.set_title('站点功能分类地图', fontsize=14, fontweight='bold')
ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
ax.grid(True, alpha=0.3)
plt.tight_layout()
plt.savefig(os.path.join(output_dir, '04_cluster_map_static.png'), dpi=300, bbox_inches='tight')
print(f"✓ 静态分类地图已保存到: {os.path.join(output_dir, '04_cluster_map_static.png')}")
plt.close()
def generate_summary_report(metrics_data, peak_flow_with_geo, pattern_matrix, cluster_names, output_dir='analysis_results', period_name=''):
"""
生成汇总报告
"""
print("\n" + "=" * 60)
print("📋 生成分析报告")
print("=" * 60)
report = []
report.append("=" * 60)
report.append("自行车共享系统时空规律分析报告")
report.append("=" * 60)
report.append(f"\n分析时间: {pd.Timestamp.now().strftime('%Y-%m-%d %H:%M:%S')}")
report.append(f"数据时间范围: {period_name}")
report.append(f"总站点数: {metrics_data['station_id'].nunique()}")
report.append(f"总记录数: {len(metrics_data):,}")
report.append("\n" + "-" * 60)
report.append("第三阶段:时空规律发现")
report.append("-" * 60)
# 24小时趋势
hourly_avg = metrics_data.groupby('hour')['net_flow'].mean()
morning_peak_avg = hourly_avg[7:9].mean()
evening_peak_avg = hourly_avg[17:19].mean()
report.append(f"\n1. 全城24小时净流量趋势:")
report.append(f" - 早高峰(7-9点)平均净流量: {morning_peak_avg:.2f}")
report.append(f" - 晚高峰(17-19点)平均净流量: {evening_peak_avg:.2f}")
if morning_peak_avg < -0.5 and evening_peak_avg > 0.5:
report.append(" - ✓ 观察到典型的'早晚双峰'潮汐波形")
# 高峰时段站点统计
morning_shortage = len(peak_flow_with_geo[peak_flow_with_geo['morning_avg_netflow'] < -2])
evening_accumulation = len(peak_flow_with_geo[peak_flow_with_geo['evening_avg_netflow'] > 2])
report.append(f"\n2. 高峰时段站点统计:")
report.append(f" - 早高峰缺车站点(净流出<-2): {morning_shortage} 个")
report.append(f" - 晚高峰淤积站点(净流入>2): {evening_accumulation} 个")
report.append("\n" + "-" * 60)
report.append("第四阶段:站点功能分类")
report.append("-" * 60)
for cluster_id, info in cluster_names.items():
count = len(pattern_matrix[pattern_matrix['cluster'] == cluster_id])
report.append(f"\n类别 {cluster_id}: {info['name']}")
report.append(f" - 站点数量: {count} 个")
report.append(f" - 特征: {info['description']}")
report.append("\n" + "=" * 60)
report.append(f"分析完成!所有结果已保存到 {output_dir}/ 文件夹")
report.append("=" * 60)
report_text = "\n".join(report)
print(report_text)
# 保存报告
os.makedirs(output_dir, exist_ok=True)
with open(os.path.join(output_dir, 'analysis_report.txt'), 'w', encoding='utf-8') as f:
f.write(report_text)
print(f"\n✓ 分析报告已保存到: {os.path.join(output_dir, 'analysis_report.txt')}")
def main(months=None, output_dir='analysis_results', period_name=''):
"""
主函数
"""
# 创建输出目录
os.makedirs(output_dir, exist_ok=True)
# 1. 加载指定月份数据
metrics_data, trip_data = load_all_months_data(months=months)
if metrics_data is None:
return
# 2. 提取站点坐标
station_coords = extract_station_coordinates(trip_data)
# 3. 分析24小时趋势
hourly_trends = analyze_hourly_trends(metrics_data, output_dir=output_dir, period_name=period_name)
# 4. 分析高峰时段
peak_flow = analyze_peak_hours(metrics_data, output_dir=output_dir)
# 5. 关联地理信息
peak_flow_with_geo = merge_geographic_info(peak_flow, station_coords, output_dir=output_dir)
# 6. 绘制潮汐地图
create_tidal_map(peak_flow_with_geo, output_dir=output_dir)
# 7. 站点功能分类
pattern_matrix, cluster_names = classify_stations(metrics_data, station_coords, output_dir=output_dir)
# 8. 生成汇总报告
generate_summary_report(metrics_data, peak_flow_with_geo, pattern_matrix, cluster_names,
output_dir=output_dir, period_name=period_name)
print("\n" + "=" * 60)
print("🎉 所有分析任务完成!")
print("=" * 60)
print(f"\n生成的文件已保存到: {output_dir}/")
print("\n生成的文件:")
print(" 📊 图表:")
print(" - 01_hourly_netflow_trend.png: 24小时净流量趋势")
print(" - 02_tidal_map_static.png: 静态潮汐地图")
print(" - 03_station_clusters.png: 站点聚类可视化")
print(" - 04_cluster_map_static.png: 静态分类地图")
print(" 🗺️ 交互式地图:")
print(" - tidal_map.html: 潮汐地图(可在浏览器中打开)")
print(" - cluster_map.html: 分类地图(可在浏览器中打开)")
print(" 📄 数据文件:")
print(" - peak_hour_station_flow.csv: 高峰时段站点流量")
print(" - peak_flow_with_geography.csv: 带地理信息的高峰流量")
print(" - station_clusters.csv: 站点分类结果")
print(" 📋 报告:")
print(" - analysis_report.txt: 分析报告")
if __name__ == "__main__":
# 默认分析789月数据
main()