-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathplotter.py
More file actions
1789 lines (1569 loc) · 72.5 KB
/
plotter.py
File metadata and controls
1789 lines (1569 loc) · 72.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python3
"""
Plot results from squeezing.jl simulations.
Usage:
python squeezing_plotter.py <filename1.jld2> [filename2.jld2 ...] [output.png]
python squeezing_plotter.py [-vsL] [-instexp] [filename1.jld2 ...] [output.png]
python squeezing_plotter.py # Automatically finds files in data/ directory
arguments:
-vsL: Plot sigma_inf vs L instead of sigma(t) vs t
-instexp: Plot instantaneous power-law exponent beta(t)
"""
import h5py
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import sys
import os
import glob
from pathlib import Path
try: # stupid matplotlib fix; may not be needed on your system
import matplotlib.cbook
if not hasattr(matplotlib.cbook, "_Stack"):
class _Stack(list):
def push(self, item):
self.append(item)
return item
def pop(self):
return super().pop() if self else None
def current(self):
return self[-1] if self else None
matplotlib.cbook._Stack = _Stack
except:
pass
# Set matplotlib to use LaTeX for text rendering
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['font.family'] = 'serif'
matplotlib.rcParams['font.serif'] = ['Times New Roman', 'Times', 'DejaVu Serif']
matplotlib.rcParams['font.size'] = 22
matplotlib.rcParams['axes.labelsize'] = 16
matplotlib.rcParams['axes.titlesize'] = 16
matplotlib.rcParams['xtick.labelsize'] = 12
matplotlib.rcParams['ytick.labelsize'] = 12
matplotlib.rcParams['legend.fontsize'] = 11
matplotlib.rcParams['figure.dpi'] = 75
matplotlib.rcParams['savefig.dpi'] = 150
matplotlib.rcParams['savefig.bbox'] = 'tight'
# Marker size options
matplotlib.rcParams['lines.markersize'] = 5 # Default marker size
matplotlib.rcParams['lines.markeredgewidth'] = 0.5 # Default marker edge width
# Plotting style options (global boolean variables)
plot_as_line = not False # If True, plot data as lines instead of markers
plot_instantaneous_exponent = not False # If True, plot instantaneous power-law exponent
linewidth = 2.75
b_exponent = 5 # Base for computing instantaneous exponent: beta(t) = log_b(sigma(t/b)/sigma(t))
cmap = plt.cm.coolwarm # Colormap for plotting colors by order of input files
def read_jld2_file(filename):
"""
Read a JLD2 file (which is an HDF5 file) and extract the data.
Returns a dictionary with the data.
"""
data = {}
with h5py.File(filename, 'r') as f:
# JLD2 files store data in a specific structure
# The actual data is usually in a group or as attributes
def extract_data(name, obj):
if isinstance(obj, h5py.Dataset):
# Get the dataset
try:
# Try to read as array
arr = obj[:]
# Convert to appropriate type
if arr.dtype == 'object':
# Handle string arrays
if len(arr.shape) == 0:
data[name] = arr[()]
else:
data[name] = arr
else:
data[name] = arr
except:
data[name] = obj[()]
elif isinstance(obj, h5py.Group):
# Recursively extract from groups
for key in obj.keys():
extract_data(f"{name}/{key}" if name else key, obj[key])
# Start extraction from root
for key in f.keys():
extract_data(key, f[key])
return data
def load_jld2_simple(filename):
"""
Simplified loader for JLD2 files.
JLD2 files are HDF5 files with a specific structure.
"""
data = {}
with h5py.File(filename, 'r') as f:
def extract_value(obj, path=""):
"""Recursively extract values from HDF5 structure."""
if isinstance(obj, h5py.Dataset):
try:
# Check shape first to avoid slicing errors on scalars
if obj.shape == ():
# Scalar value - use [()] to get the value directly
val = obj[()]
if isinstance(val, bytes):
return val.decode('utf-8', errors='ignore').strip('\x00')
elif isinstance(val, str):
return val
elif isinstance(val, (bool, np.bool_)):
return bool(val)
elif isinstance(val, (int, np.integer)):
return int(val)
elif isinstance(val, (float, np.floating)):
return float(val)
else:
return val
else:
# Array - safe to use [:]
arr = obj[:]
if arr.dtype.kind == 'S': # String
# String array - try to decode
try:
if arr.size == 1:
return arr.tobytes().decode('utf-8', errors='ignore').strip('\x00')
else:
return np.array([a.tobytes().decode('utf-8', errors='ignore').strip('\x00') for a in arr])
except:
return np.array(arr)
elif arr.dtype.kind == 'U': # Unicode
return np.array([str(a) for a in arr])
else:
return np.array(arr)
except Exception as e:
# Don't print warning for every failed extraction to reduce noise
return None
elif isinstance(obj, h5py.Group):
# Group - recurse
result = {}
for key in obj.keys():
val = extract_value(obj[key], f"{path}/{key}" if path else key)
if val is not None:
result[key] = val
return result if result else None
return None
# JLD2 stores data in _jld2/keys and _jld2/data
if '_jld2' in f:
jld2_group = f['_jld2']
if 'keys' in jld2_group and 'data' in jld2_group:
keys = jld2_group['keys']
data_group = jld2_group['data']
# Extract keys (they're stored as strings)
key_names = []
for i in range(len(keys)):
try:
key_str = keys[i].tobytes().decode('utf-8', errors='ignore').strip('\x00')
key_names.append(key_str)
except:
pass
# Extract corresponding data
for i, key_name in enumerate(key_names):
if str(i) in data_group:
val = extract_value(data_group[str(i)])
if val is not None:
data[key_name] = val
# Also try direct access (JLD2 might store directly)
for key in f.keys():
if key != '_jld2':
val = extract_value(f[key], key)
if val is not None:
data[key] = val
return data
def identify_rule(R_or, R_and):
"""
Identify which pre-defined rule (R, F, or M) matches the given R_or and R_and.
Returns the rule name as a string, or "unknown" if no match is found.
"""
# Convert to lists of tuples for comparison
def normalize_set(R):
if R is None:
return None
# Convert to list of tuples, handling different array formats
if isinstance(R, np.ndarray):
if R.ndim == 2 and R.shape[1] == 2:
# Array of shape (n, 2) - each row is a tuple
result = sorted([tuple(int(R[i, j]) for j in range(2)) for i in range(len(R))])
return result
elif R.ndim == 1:
# Might be a flattened array or object array
# Try to reshape or handle as object array
try:
# If it's an object array, each element might be a tuple
if R.dtype == object:
result = sorted([tuple(x) for x in R if x is not None])
return result
# Otherwise, might need special handling
except:
pass
elif isinstance(R, (list, tuple)):
# Handle list of lists or list of tuples
result = []
for x in R:
if isinstance(x, (list, tuple, np.ndarray)):
result.append(tuple(int(y) for y in x))
else:
result.append(tuple(x))
return sorted(result)
return None
R_or_norm = normalize_set(R_or)
R_and_norm = normalize_set(R_and)
if R_or_norm is None or R_and_norm is None:
return "unknown"
# Known rules
R_or_R = sorted([(0, 1), (0, -1)])
R_and_R = sorted([(1, 0), (-1, 0)])
R_or_F = sorted([(1, 1), (0, -1)])
R_and_F = sorted([(1, 0), (-1, -1)])
R_or_M = sorted([(1, 0), (0, -1)])
R_and_M = sorted([(1, 0), (0, 1)])
if R_or_norm == R_or_R and R_and_norm == R_and_R:
return "R"
elif R_or_norm == R_or_F and R_and_norm == R_and_F:
return "F"
elif R_or_norm == R_or_M and R_and_norm == R_and_M:
return "M"
else:
return "unknown"
def get_orientation(r, rotate):
"""
Get orientation symbol based on (r, rotate) tuple.
(T, T): ↓, (T, F): ←, (F, T): ↑, (F, F): →
"""
# Convert to boolean if needed
r_bool = bool(r) if not isinstance(r, bool) else r
rotate_bool = bool(rotate) if not isinstance(rotate, bool) else rotate
if r_bool and rotate_bool:
return r'$\downarrow$'
elif r_bool and not rotate_bool:
return r'$\leftarrow$'
elif not r_bool and rotate_bool:
return r'$\uparrow$'
else: # not r_bool and not rotate_bool
return r'$\rightarrow$'
def plot_dw_growth(filenames, output_filename=None, allhists=False):
"""
Plot domain wall growth sigma(t) vs time for dw_growth mode.
Can handle single file or list of files.
Arguments:
- filenames: List of JLD2 filenames or single filename
- output_filename: Optional output filename for saving the plot
- allhists: If True, plot all individual histories. If False, plot mean ± 1 std dev.
"""
# Convert single filename to list
if isinstance(filenames, str):
filenames = [filenames]
# Load data from all files
all_data = []
rule_name = None
for filename in filenames:
# Try multiple methods to read JLD2 file
data = None
# Method 1: Try jld2 Python package (if available)
try:
import jld2
with jld2.open(filename, 'r') as f:
data = dict(f)
print(f"Successfully read {filename} using jld2 package")
except ImportError:
pass # jld2 not available, try other methods
except Exception as e:
print(f"jld2 package failed: {e}")
# Method 2: Try h5py with our custom loader
if data is None:
try:
data = load_jld2_simple(filename)
if data:
print(f"Successfully read {filename} using h5py")
except Exception as e:
print(f"h5py method failed: {e}")
# Method 3: Try direct h5py access (simpler structure)
if data is None or len(data) == 0:
try:
data = {}
with h5py.File(filename, 'r') as f:
# Try to access variables directly
for key in ['mode', 'Lx', 'Ly', 'sigma_avg', 'r', 'rotate', 'R_or', 'R_and', 'histories']:
if key in f:
try:
obj = f[key]
if isinstance(obj, h5py.Dataset):
# Check if it's a scalar
if obj.shape == ():
# Scalar value
data[key] = obj[()]
else:
# Array
data[key] = np.array(obj[:])
elif isinstance(obj, h5py.Group):
# It's a group, try to extract recursively
pass
except Exception as e:
# Silently skip if we can't read it
pass
# Try to get kwargs
if 'kwargs' in f:
kwargs_group = f['kwargs']
if isinstance(kwargs_group, h5py.Group):
kwargs_dict = {}
for k in kwargs_group.keys():
try:
v_obj = kwargs_group[k]
if isinstance(v_obj, h5py.Dataset):
if v_obj.shape == ():
kwargs_dict[k] = v_obj[()]
else:
kwargs_dict[k] = np.array(v_obj[:])
except:
pass
if kwargs_dict:
data['kwargs'] = kwargs_dict
except Exception as e:
print(f"Direct h5py access failed: {e}")
if data is None or len(data) == 0:
print(f"Could not read {filename}")
print("Try installing jld2 package: pip install jld2")
print("Or ensure the file is a valid JLD2/HDF5 file")
continue
# Determine mode from filename
if '_dw_growth' in filename:
mode = 'dw_growth'
elif '_dw_roughness' in filename:
mode = 'dw_roughness'
else:
mode = 'unknown'
if mode != 'dw_growth':
print(f"File {filename} is not in dw_growth mode (filename doesn't contain '_dw_growth'), skipping")
continue
# Get n_trials and print it
# Try to extract from sigma_trials array shape/length
n_trials = None
# Method 1: Extract from sigma_trials shape (should be (time_steps, n_trials))
sigma_trials = data.get('sigma_trials', None)
if sigma_trials is not None:
try:
if hasattr(sigma_trials, 'shape') and len(sigma_trials.shape) >= 2:
n_trials = sigma_trials.shape[1] # Second dimension is number of trials
elif hasattr(sigma_trials, 'shape') and len(sigma_trials.shape) == 1:
# If it's a 1D array, might be structured differently
n_trials = len(sigma_trials)
elif hasattr(sigma_trials, '__len__'):
# If it's a list, use length
n_trials = len(sigma_trials)
except:
pass
# Method 2: Try using jld2 package if available (handles references properly)
if n_trials is None:
try:
import jld2
with jld2.open(filename, 'r') as f:
jld2_data = dict(f)
jld2_kwargs = jld2_data.get('kwargs', {})
if isinstance(jld2_kwargs, dict):
n_trials = jld2_kwargs.get('n_trials', None)
except ImportError:
pass
except:
pass
# Method 3: Try from already loaded data kwargs
if n_trials is None:
kwargs = data.get('kwargs', {})
if isinstance(kwargs, dict):
n_trials = kwargs.get('n_trials', None)
# Convert to int
if n_trials is not None:
if isinstance(n_trials, np.ndarray):
if n_trials.size == 1:
n_trials = int(n_trials[()])
else:
n_trials = None
elif isinstance(n_trials, (int, np.integer)):
n_trials = int(n_trials)
# Print for each file
if n_trials is not None:
print(f"File {filename}: n_trials = {n_trials}")
else:
print(f"File {filename}: n_trials not found")
sigma_avg = data.get('sigma_avg', None)
if sigma_avg is None:
print(f"No sigma_avg data found in {filename}, skipping")
continue
# Convert to numpy array if needed
if not isinstance(sigma_avg, np.ndarray):
sigma_avg = np.array(sigma_avg)
# Flatten if needed
if sigma_avg.ndim > 1:
sigma_avg = sigma_avg.flatten()
# Extract r and rotate
r = data.get('r', False)
rotate = data.get('rotate', False)
# Convert to boolean if needed
if isinstance(r, np.ndarray):
r = bool(r[()]) if r.size == 1 else False
if isinstance(rotate, np.ndarray):
rotate = bool(rotate[()]) if rotate.size == 1 else False
# Get Lx and Ly for threshold check
Lx = data.get('Lx', None)
Ly = data.get('Ly', None)
if isinstance(Lx, np.ndarray):
Lx = int(Lx[()]) if Lx.size == 1 else None
if isinstance(Ly, np.ndarray):
Ly = int(Ly[()]) if Ly.size == 1 else None
# Check if sigma exceeds threshold
if Lx is not None and Ly is not None:
threshold = Ly / 2.0 if not rotate else Lx / 2.0
max_sigma = np.max(sigma_avg[1:]) # Skip first point at t=0
if max_sigma > threshold:
print(f"WARNING: sigma(t) exceeds threshold in {filename}")
print(f" Maximum sigma = {max_sigma:.3f}, threshold = {threshold:.3f}")
print(f" (rotate = {rotate}, threshold is {'Lx' if rotate else 'Ly'}/2)")
# Get rule - first try reading directly, then fall back to identifying from R_or/R_and
file_rule = data.get('rule', None)
if file_rule is None:
# Fall back to identifying from R_or and R_and
R_or = data.get('R_or', None)
R_and = data.get('R_and', None)
file_rule = identify_rule(R_or, R_and)
else:
# Convert to string if it's an array
if isinstance(file_rule, np.ndarray):
if file_rule.size == 1:
file_rule = str(file_rule[()])
else:
# Try to decode as string if it's a byte array
try:
file_rule = file_rule.tobytes().decode('utf-8').strip('\x00')
except:
file_rule = str(file_rule[0]) if len(file_rule) > 0 else "unknown"
else:
file_rule = str(file_rule)
# Clean up: extract just the rule letter (R, F, or M) if it's embedded in other text
import re
# First, try to extract just R, F, or M from the string
match = re.search(r'([RFM])', str(file_rule).upper())
if match:
file_rule = match.group(1)
# If it's already just R, F, or M, keep it
elif str(file_rule).upper().strip() in ['R', 'F', 'M']:
file_rule = str(file_rule).upper().strip()
else:
file_rule = "unknown"
if rule_name is None:
rule_name = file_rule
elif rule_name != file_rule:
print(f"Warning: Files have different rules ({rule_name} vs {file_rule})")
# Also get R_or and R_and for storage
R_or = data.get('R_or', None)
R_and = data.get('R_and', None)
# Get p from kwargs (defaults to 0.0)
kwargs = data.get('kwargs', {})
if isinstance(kwargs, dict):
p = kwargs.get('p', 0.0)
else:
p = 0.0
# Convert to float if needed
if isinstance(p, np.ndarray):
p = float(p[()]) if p.size == 1 else 0.0
else:
p = float(p) if p is not None else 0.0
# Fallback: try to extract p from filename if not found in kwargs
# Filename format: ..._dw_growthp0.02_... or ..._dw_growth_...
if abs(p) < 1e-10: # Check if p is effectively zero
import re
# Look for pattern: dw_roughnessp0.02 or dw_growthp0.02
match = re.search(r'_dw_(?:roughness|growth)p([\d.]+)', filename)
if match:
try:
p = float(match.group(1))
except:
p = 0.0
# Get orientation
orientation = get_orientation(r, rotate)
# Get histories if available
histories = data.get('histories', None)
if histories is not None:
# Convert to numpy array if needed
if not isinstance(histories, np.ndarray):
histories = np.array(histories)
# Ensure it's 2D: (n_trials, time_steps)
if histories.ndim == 1:
# If 1D, it might be a single trial - reshape
histories = histories.reshape(1, -1)
else:
print(f"No histories found in {filename}")
# Store data for plotting
all_data.append({
'sigma_avg': sigma_avg,
'r': r,
'rotate': rotate,
'orientation': orientation,
'filename': filename,
'rule': file_rule,
'R_or': R_or,
'R_and': R_and,
'p': p,
'histories': histories # Can be None if not available
})
if len(all_data) == 0:
print("No valid data to plot")
return
# Create figure with PRL-style aesthetics - make window smaller
# Use global variable for instantaneous exponent plotting
global plot_instantaneous_exponent
if plot_instantaneous_exponent:
# Create single plot for instantaneous exponent only
fig, ax_exp = plt.subplots(figsize=(4., 4.), dpi=100)
ax = None # Don't plot sigma(t)
else:
fig, ax = plt.subplots(figsize=(4., 4.), dpi=100) # Smaller window
ax_exp = None
# Color mapping for orientations (same as dw_roughness)
orientation_colors = {
r'$\rightarrow$': 'blue',
r'$\uparrow$': 'red',
r'$\leftarrow$': 'green',
r'$\downarrow$': 'purple'
}
# Marker shapes for different p values (if any p != 0)
marker_shapes = ['o', 's', '^', 'P'] # circle, square, triangle, plus
p_values = sorted(set([item['p'] for item in all_data]))
has_nonzero_p = any(p != 0.0 for p in p_values)
# Create mapping from p to marker shape
p_to_marker = {}
if has_nonzero_p:
# Only assign shapes to nonzero p values, zero p gets default 'o'
nonzero_p = [p for p in p_values if p != 0.0]
for i, p_val in enumerate(nonzero_p):
if i < len(marker_shapes):
p_to_marker[p_val] = marker_shapes[i]
# Zero p gets circle
if 0.0 in p_values:
p_to_marker[0.0] = 'o'
else:
# All p are zero, use default circle
p_to_marker = {0.0: 'o'}
# Plot each curve with power law fit
for idx, data_item in enumerate(all_data):
sigma_avg = data_item['sigma_avg']
orientation = data_item['orientation']
p = data_item.get('p', 0.0)
file_rule = data_item.get('rule', rule_name) # Use stored rule or fallback to global
# Get color for this orientation
color = orientation_colors.get(orientation, 'gray')
# Get marker shape for this p value
marker = p_to_marker.get(p, 'o')
# Create time array (skip first point at t=0)
T_evolve = len(sigma_avg) - 1
time = np.arange(1, T_evolve + 1) # Skip t=0
sigma = sigma_avg[1:] # Skip first data point
# Remove any zero or negative values for log scale
valid = (sigma > 0) & (time > 0)
time_plot = time[valid]
sigma_plot = sigma[valid]
if len(time_plot) < 2:
continue
# For rule M with uparrow orientation, only fit to first 100 data points
if file_rule == "M" and orientation == r'$\uparrow$':
n_fit_points = min(100, len(time_plot))
time_fit = time_plot[:n_fit_points]
sigma_fit = sigma_plot[:n_fit_points]
else:
time_fit = time_plot
sigma_fit = sigma_plot
if len(time_fit) < 2:
continue
# Fit power law: sigma ~ a * t^beta
# On log-log scale: log(sigma) = log(a) + beta * log(t)
log_t = np.log(time_fit)
log_sigma = np.log(sigma_fit)
# Remove any invalid values
valid_fit = np.isfinite(log_t) & np.isfinite(log_sigma)
if np.sum(valid_fit) < 2:
continue
log_t_fit = log_t[valid_fit]
log_sigma_fit = log_sigma[valid_fit]
# Linear regression: log(sigma) = beta * log(t) + log(a)
# Using least squares: [log(t) ones] * [beta; log(a)] = log(sigma)
X = np.vstack([log_t_fit, np.ones(len(log_t_fit))]).T
coeffs = np.linalg.lstsq(X, log_sigma_fit, rcond=None)[0]
beta = coeffs[0] # Power law exponent
log_a = coeffs[1] # log of prefactor
a = np.exp(log_a) # Prefactor
# Generate fit curve
t_fit = np.logspace(np.log10(time_plot.min()), np.log10(time_plot.max()), 100)
sigma_fit = a * (t_fit ** beta)
# Format beta to 2 decimal places
beta_str = f"{beta:.2f}"
# Plot data points (only if not plotting instantaneous exponent)
if not plot_instantaneous_exponent and ax is not None:
# Extract orientation symbol from LaTeX string (e.g., r'$\downarrow$' -> '\downarrow')
# Remove $ signs to get just the symbol
orientation_symbol = orientation.replace('$', '').strip()
# Format label - include p if any file has nonzero p
if has_nonzero_p:
p_str = f"{p:.2f}"
label = rf'$\beta_{{{orientation_symbol}}} = {beta_str}$, $p = {p_str}$'
else:
label = rf'$\beta_{{{orientation_symbol}}} = {beta_str}$'
# Use global variable for line plotting
global plot_as_line
if plot_as_line:
# Plot as line instead of markers
ax.loglog(time_plot, sigma_plot, '-', color=color,
linewidth=linewidth, alpha=0.7, label=label)
else:
# Use marker shape based on p value
ax.loglog(time_plot, sigma_plot, marker,
markerfacecolor=color, markeredgecolor='k',
alpha=0.7, label=label, markersize=5)
# Plot power law fit as dashed line with same color
ax.loglog(t_fit, sigma_fit, '--', color=color, linewidth=linewidth, alpha=0.4)
# Plot histories if available
histories = data_item.get('histories', None)
if histories is not None and isinstance(histories, np.ndarray):
# Transpose if needed: histories should be (n_trials, time_steps) array
if histories.ndim == 2:
# Check if we need to transpose (time_steps, n_trials) -> (n_trials, time_steps)
if histories.shape[0] < histories.shape[1]:
# Likely (n_trials, time_steps) - no transpose needed
histories_transposed = histories
else:
# Likely (time_steps, n_trials) - transpose
histories_transposed = np.transpose(histories)
n_trials_hist, n_time = histories_transposed.shape
# Extract histories, skipping first point at t=0
if n_time == len(sigma_avg):
# Histories include t=0
histories_plot = histories_transposed[:, 1:]
elif n_time == len(sigma_avg) - 1:
# Histories already skip t=0
histories_plot = histories_transposed[:, :]
else:
# Length mismatch, skip
histories_plot = None
if histories_plot is not None and histories_plot.shape[1] == len(time_plot):
if allhists:
# Plot all individual histories as faint lines
for trial_idx in range(n_trials_hist):
trial_sigma = histories_plot[trial_idx, :]
# Filter out invalid values
valid_hist = (trial_sigma > 0) & (time_plot > 0)
if np.sum(valid_hist) >= 2:
ax.loglog(time_plot[valid_hist], trial_sigma[valid_hist],
'-', color=color, linewidth=0.5, alpha=0.15)
else:
# Compute ±1 standard deviation over trials
# histories_plot is (n_trials, time_steps)
# Use sigma_plot (already plotted, which is sigma_avg) as the mean, compute std from histories
# Handle NaN values in histories (from padding)
std_sigma = np.nanstd(histories_plot, axis=0)
# Compute ±1 std dev bounds around sigma_plot
sigma_upper = sigma_plot + std_sigma
sigma_lower = sigma_plot - std_sigma
# Filter out invalid values (ensure all are positive for log scale and finite)
valid = (sigma_plot > 0) & (sigma_upper > 0) & (sigma_lower > 0) & (time_plot > 0) & \
np.isfinite(sigma_upper) & np.isfinite(sigma_lower) & np.isfinite(std_sigma)
if np.sum(valid) >= 2:
# Fill between ±1 std dev
ax.fill_between(time_plot[valid], sigma_lower[valid], sigma_upper[valid],
color=color, alpha=0.15, linewidth=0)
# Plot instantaneous power-law exponent if requested
if plot_instantaneous_exponent and ax_exp is not None:
# Label for exponent plot (no beta in label since we're plotting it)
label_exp = orientation
# Compute instantaneous exponent: beta(t) = log_b(sigma(t/b) / sigma(t))
# Follow the exact approach from ca_plotter.py
global b_exponent
b = b_exponent
ts = time_plot
mt = sigma_plot # mt is sigma in our case
# Compress time and sigma arrays: compressed_ts[i] = ts[int(i/b)], compressed_mt[i] = mt[int(i/b)]
compressed_ts = np.copy(ts)
compressed_mt = np.copy(mt)
for i in range(len(ts)):
compressed_ts[i] = ts[int(i/b)]
compressed_mt[i] = mt[int(i/b)]
# Find unique times
unique_times = len(np.unique(compressed_ts))
avgd_comp_ts = np.zeros(unique_times)
avgd_comp_mt = np.zeros(unique_times)
# Average the exponent values
for i in range(len(compressed_ts)):
indx = i // b
if indx < unique_times:
avgd_comp_ts[indx] = compressed_ts[i]
# Compute log_b(compressed_mt[i]/mt[i]) = log(compressed_mt[i]/mt[i]) / log(b)
# and average by dividing by b
if mt[i] > 0 and compressed_mt[i] > 0:
avgd_comp_mt[indx] += (np.log(compressed_mt[i]/mt[i]) / np.log(b)) / b
# Plot instantaneous exponent
# Plot avgd_comp_ts * b vs avgd_comp_mt (scale time back by b)
# Use linear-linear scale instead of semilogx
ax_exp.plot(avgd_comp_ts * b, -avgd_comp_mt, '-',
color=color, linewidth=linewidth, alpha=0.7, label=label_exp)
# Plot fitted beta as horizontal line
ax_exp.axhline(y=beta, color=color, linestyle='--',
linewidth=linewidth, alpha=0.4)
# Add reference power-law with exponent 0.25 (only for sigma(t) plots, not instantaneous exponent)
if not plot_instantaneous_exponent and ax is not None and len(all_data) > 0:
# Find the overall time range from all datasets
all_times = []
for data_item in all_data:
sigma_avg = data_item['sigma_avg']
if len(sigma_avg) > 1:
time = np.arange(1, len(sigma_avg))
all_times.extend(time)
reference_curve = False
reference_exponent = 0.25
if reference_curve:
# Generate reference power-law curve
if len(all_times) > 0:
t_min = min(all_times)
t_max = max(all_times)
t_ref = np.logspace(np.log10(t_min), np.log10(t_max), 100)
# Scale to match typical data values - use middle sigma value
# Find a typical sigma value from the first dataset
first_sigma = all_data[0]['sigma_avg']
if len(first_sigma) > 1:
# Use middle sigma value to scale (skip first point at t=0)
sigma_data = first_sigma[1:]
middle_idx = len(sigma_data) // 2
if middle_idx < len(sigma_data) and sigma_data[middle_idx] > 0:
sigma_scale = sigma_data[middle_idx]
# Use corresponding time value
t_scale = middle_idx + 1 # time starts at 1
sigma_ref = sigma_scale * ((t_ref / t_scale) ** reference_exponent)
ax.loglog(t_ref, sigma_ref, ':', color='gray', linewidth=linewidth*0.8,
alpha=0.5, label=r'$t^{{reference_exponent}}$')
# Set up plots based on mode
if plot_instantaneous_exponent and ax_exp is not None:
# Only plot instantaneous exponent
ax_exp.set_xlabel(r'$t$')
ax_exp.set_ylabel(r'$\beta(t)$')
# Title with rule in mathsf font - just the rule symbol
if rule_name and rule_name != "unknown":
# Ensure rule_name is just a single character (R, F, or M)
rule_clean = str(rule_name).strip().upper()
if rule_clean in ['R', 'F', 'M']:
title = r'$\mathsf{' + rule_clean + '}$'
else:
# Try to extract just the letter
import re
match = re.search(r'([RFM])', rule_clean)
if match:
title = r'$\mathsf{' + match.group(1) + '}$'
else:
title = ""
else:
# Try to extract rule name from filename as fallback
# Filename format: R_dw_growth_... or data/R_dw_growth_...
extracted_rule = None
if len(all_data) > 0:
import re
filename = all_data[0].get('filename', '')
# Extract rule from start of filename (before first underscore)
match = re.search(r'^([RFM])_|/([RFM])_', filename)
if match:
extracted_rule = match.group(1) or match.group(2)
if extracted_rule:
title = r'$\mathsf{' + extracted_rule + '}$'
else:
title = ""
ax_exp.set_title(title)
# Grid
ax_exp.grid(True, alpha=0.3, linestyle='--', linewidth=0.5)
ax_exp.set_axisbelow(True)
# Legend
if len(all_data) > 0:
ax_exp.legend(loc='best', frameon=True, fancybox=False, edgecolor='black', framealpha=0.9)
elif ax is not None:
# Plot sigma(t) normally
# Labels with LaTeX math (use rcParams for fontsize)
ax.set_xlabel(r'$t$')
# Check if "_maxheight" is in any filename
has_maxheight = False
if len(all_data) > 0:
for data_item in all_data:
filename = data_item.get('filename', '')
if '_maxheight' in filename:
has_maxheight = True
break
if has_maxheight:
ax.set_ylabel(r'$\widetilde\sigma(t)$')
else:
ax.set_ylabel(r'$\sigma(t)$')
# Title with rule in mathsf font - just the rule symbol
if rule_name and rule_name != "unknown":
# Ensure rule_name is just a single character (R, F, or M)
rule_clean = str(rule_name).strip().upper()
if rule_clean in ['R', 'F', 'M']:
title = r'$\mathsf{' + rule_clean + '}$'
else:
# Try to extract just the letter
import re
match = re.search(r'([RFM])', rule_clean)
if match:
title = r'$\mathsf{' + match.group(1) + '}$'
else:
title = ""
else:
# Try to extract rule name from filename as fallback
# Filename format: R_dw_growth_... or data/R_dw_growth_...
extracted_rule = None
if len(all_data) > 0:
import re
filename = all_data[0].get('filename', '')
# Extract rule from start of filename (before first underscore)
match = re.search(r'^([RFM])_|/([RFM])_', filename)
if match:
extracted_rule = match.group(1) or match.group(2)
if extracted_rule:
title = r'$\mathsf{' + extracted_rule + '}$'
else:
title = ""
ax.set_title(title)
# Grid
# ax.grid(True, which='both', alpha=0.3, linestyle='--', linewidth=0.5)
# ax.set_axisbelow(True)
# Legend
if len(all_data) > 0:
ax.legend(loc='best', frameon=True, fancybox=False, edgecolor='black', framealpha=0.9)
# Tight layout
plt.tight_layout()
# Save or show
if output_filename:
plt.savefig(output_filename, dpi=300, bbox_inches='tight')
print(f"Plot saved to {output_filename}")
else:
# Make the window smaller when displaying
mngr = fig.canvas.manager
if hasattr(mngr, 'window'):
mngr.window.wm_geometry("400x400+100+100") # width x height + x + y
plt.show()
plt.close()
def plot_dw_roughness(filenames, output_filename=None):
"""
Plot L vs sigma_inf for dw_roughness mode files.
Each file plots a single point where L = min(Lx, Ly) and sigma_inf is the estimated
long-time steady-state value of sigma.
"""
# Convert single filename to list
if isinstance(filenames, str):
filenames = [filenames]
# Load data and extract L and sigma_inf for each file
sigma_inf_list = []
L_list = []
orientations = []
p_list = []
rule_name = None
for filename in filenames:
# Try multiple methods to read JLD2 file
data = None
# Method 1: Try jld2 Python package (if available)
try:
import jld2
with jld2.open(filename, 'r') as f:
data = dict(f)
except ImportError:
pass
except Exception as e:
print(f"jld2 package failed: {e}")
# Method 2: Try h5py with our custom loader
if data is None:
try:
data = load_jld2_simple(filename)
except Exception as e:
print(f"h5py method failed: {e}")
# Method 3: Try direct h5py access
if data is None or len(data) == 0:
try:
data = {}
with h5py.File(filename, 'r') as f:
for key in ['mode', 'Lx', 'Ly', 'sigma_avg', 'r', 'rotate', 'R_or', 'R_and']:
if key in f:
try:
obj = f[key]