-
Notifications
You must be signed in to change notification settings - Fork 31
Expand file tree
/
Copy pathgather_report.sql
More file actions
1910 lines (1892 loc) · 121 KB
/
gather_report.sql
File metadata and controls
1910 lines (1892 loc) · 121 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
\set QUIET 1
\echo <!DOCTYPE html>
\echo <html><meta charset="utf-8" />
\echo <style>
\echo #finditem,#paramtune,table {box-shadow: 0px 20px 30px -10px grey; margin: 2em; caption {font:large bold; text-align:left; white-space: nowrap; span {font: italic bold 1.7em Georgia, serif}}}
\echo table, th, td { border: 1px solid #6FAEBF; border-spacing: 0; padding: 4px;position: relative; }
\echo th {background-color: #d2f5ff;cursor: pointer; position:sticky; top:1em; border-color: #4F8E9F;z-index: 1}
\echo tr:nth-child(even) {background-color: #eef8ff}
\echo c { display: block }
\echo c:hover,li:hover { background-color: #DFD; text-shadow: #800 0.5px 0 0.5px; }
\echo a:hover,tr:hover { background-color: #EBFFDA}
\echo ol { width: fit-content;}
\echo .warn { font-weight:bold; background-color: #FBA }
\echo .high { border: 5px solid red;font-weight:bold}
\echo .lime { font-weight:bold;background-color: #FFD}
\echo .lineblk {float: left; margin:2em }
\echo .thidden tr { td:nth-child(2),th:nth-child(2) {display: none} td:first-child {color:blue}}
\echo .bar {display:inline-block; border: 7px outset brown; border-width:7px 0; margin:0 5px;box-shadow: 2px 2px grey;} /* bar for graph */
\echo #bottommenu { position: fixed; right: 0px; bottom: 0px; padding: 5px; border : 2px solid #AFAFFF; border-radius: 5px; z-index: 3}
\echo #cur { font: 5em arial; position: absolute; color:brown; animation: vanish 2s ease forwards; z-index: 3 } /*sort indicator*/
\echo #dtls,#finditem,#paramtune,#menu { font-weight:initial;line-height:1.5em;position:absolute;background-color:#FAFFEA;border: 2px solid blue; border-radius: 5px; padding: 1em;box-shadow: 0px 20px 30px -10px grey; z-index: 2}
\echo #dtls { left:100%; top: 4%; width: max-content; max-width: 60vw; color: black; z-index: 4}
\echo @keyframes vanish { from { opacity: 1;} to {opacity: 0;} }
\echo summary { padding: 1rem; font: bold 1.2em arial; cursor: pointer }
\echo footer { text-align: center; padding: 3px; background-color:#d2f2ff}
\echo .bar-chart { display: flex; justify-content: space-between; align-items: flex-end; height: 300px; padding-bottom: 5px; gap: 10px; }
\echo .bar-group { display: flex; flex-direction: column; align-items: center; justify-content: flex-end; height: 100%; flex: 1; position: relative;}
\echo .vbar { width: 80%; max-width: 100px; background-color: #3b82f6; border-radius: 4px 4px 0 0; position: relative;}
\echo .vbar span { display: block; position: absolute; top: -20px; left: 50%; transform: translateX(-50%);}
\echo </style>
\H
\pset footer off
SET max_parallel_workers_per_gather = 0;
SELECT sum(read_bytes) IS NOT NULL AS pg18 FROM pg_get_io \gset
SELECT min(min) AS reset_ts FROM
(SELECT min(stats_reset) FROM pg_get_io
UNION
SELECT stats_reset FROM pg_stat_archiver
UNION
SELECT stats_reset FROM pg_get_wal
UNION
SELECT stats_reset FROM pg_get_bgwriter) a \gset
\echo <h1>
\echo <svg width="10em" viewBox="0 0 140 80">
\echo <path fill="none" stroke="#000000" stroke-linecap="round" stroke-width="2" d="m 21.2,46.7 c 1,2 0.67,4 -0.3,5.1 c -1.1,1 -2,1.5 -4,1 c -10,-3 -4,-25 -4 -25 c 0.6,-10 8,-9 8 -9 s 7,-4.5 11,0.2 c 1.2,1.4 1.7,3.3 1.7,5.17 c -0.1,3 3,7 -2,10 c-2,2 -1,5 -8,5.5 m -2 -12 c 0,0 -1,1 -0.2,0.2 m -4 12 c 0,0 0,10 -12,11"/>
\echo <text x="30" y="50" style="font:25px arial">gGather</text>
\echo <text x="60" y="62" style="fill:red; font:15px arial">Report</text>
\echo </svg>
\echo <b id="busy" class="warn"> Loading... </b>
\echo </h1>
\pset tableattr 'id="tblgather" class="lineblk"'
SELECT (SELECT count(*) > 1 FROM pg_srvr WHERE connstr ilike 'You%') AS conlines \gset
\if :conlines
\echo "There is serious problem with the data. Please make sure that all tables are dropped and recreated as part of importing data (gather_schema.sql) and there was no error"
"SOMETHING WENT WRONG WHILE IMPORTING THE DATA. PLEASE MAKE SURE THAT ALL TABLES ARE DROPPED AND RECREATED AS PART OF IMPORTING";
\q
\endif
\set tzone `echo "$PG_GATHER_TIMEZONE"`
SELECT * FROM
(WITH conf AS (SELECT CASE WHEN :'tzone' = '' THEN (SELECT setting FROM pg_get_confs WHERE name='log_timezone') ELSE :'tzone' END AS setting),
tz AS ( SELECT set_config('timezone',COALESCE(name,'UTC'),false) AS val FROM conf LEFT JOIN pg_timezone_names ON pg_timezone_names.name = conf.setting),
connstrs AS ( SELECT ROW_NUMBER() OVER () AS row_num, COUNT(*) OVER () AS total_rows, connstr FROM pg_srvr)
SELECT UNNEST(ARRAY ['Collected At','Collected By','Server build', 'Last Startup','In recovery?','Client','Server','Last Reload','Latest xid','Oldest xid ref','Current LSN','Time Line','WAL file','System','PG Bin Dir.']) AS pg_gather,
UNNEST(ARRAY [CONCAT(collect_ts::text,' (',TZ.val,')'),usr,ver, pg_start_ts::text ||' ('|| collect_ts-pg_start_ts || ')',recovery::text,client::text,server::text,reload_ts::text || ' ('|| collect_ts-reload_ts || ')',
pg_snapshot_xmax(snapshot)::text,pg_snapshot_xmin(snapshot)::text,current_wal::text,timeline::text || ' (Hex:' || upper(to_hex(timeline)) || ')', lpad(upper(to_hex(timeline)),8,'0')||substring(pg_walfile_name(current_wal) from 9 for 16),
'ID: ' || systemid || ' Since: ' || to_timestamp ( systemid >> 32 ) || ' ('|| collect_ts-to_timestamp ( systemid >> 32 ) || ')',bindir]) AS "Report"
FROM pg_gather LEFT JOIN tz ON TRUE
UNION ALL
(SELECT 'Client conn.' as col1 , STRING_AGG(connstr, ', ') AS col2
FROM (
SELECT connstr, NTILE((total_rows/6)::int) OVER (ORDER BY row_num) AS group_number
FROM connstrs
WHERE row_num < total_rows AND total_rows > 3
) AS grouped_data GROUP BY group_number ORDER BY group_number)
UNION ALL
SELECT 'Client conn.' as col1 , connstr AS col2 FROM connstrs WHERE row_num < total_rows AND total_rows <= 3
UNION ALL
SELECT 'Client build' as col1, connstr AS col2 FROM connstrs WHERE row_num = total_rows AND total_rows > 2
) a WHERE "Report" IS NOT NULL ORDER BY 1;
\pset tableattr 'id="dbs" class="thidden"'
\C ''
WITH cts AS (SELECT COALESCE(collect_ts,(SELECT max(state_change) FROM pg_get_activity)) AS c_ts FROM pg_gather)
SELECT datname "DB Name",concat(tup_inserted/days,',',tup_updated/days,',',tup_deleted/days,',',to_char(COALESCE(pg_get_db.stats_reset,:'reset_ts'),'YYYY-MM-DD HH24-MI-SS'),',',datid,',',mxidage,',',encod,',',colat)
,xact_commit/days "Avg.Commits",xact_rollback/days "Avg.Rollbacks",(tup_inserted+tup_updated+tup_deleted)/days "Avg.DMLs", CASE WHEN blks_fetch > 0 THEN blks_hit*100/blks_fetch ELSE NULL END "Cache hit ratio"
,temp_files/days "Avg.Temp Files",temp_bytes/days "Avg.Temp Bytes",db_size "DB size",age "Age"
FROM pg_get_db
LEFT JOIN LATERAL (SELECT GREATEST((EXTRACT(epoch FROM(c_ts-COALESCE(pg_get_db.stats_reset, :'reset_ts')))/86400)::bigint,1) as days FROM cts) AS lat1 ON TRUE;
\pset tableattr off
\echo <div id="paramrecs">
\echo <details style="clear: left; border: 2px solid #b3aeae; border-radius: 5px; padding: 1em;margin: 2em;">
\echo <summary style="font: italic bold 2em Georgia">Parameter Recommendations</summary>
\echo <fieldset style="border: 2px solid blue; border-radius: 5px; padding: 1em; width: fit-content;">
\echo <legend>Inputs</legend>
\echo <label for="cpus">CPUs:
\echo <input type="number" id="cpus" name="cpus" value="4">
\echo </label>
\echo <label for="mem" style="padding-left: 3em;">Memory(GB):
\echo <input type="number" id="mem" name="mem" value="8">
\echo </label>
\echo <label for="strg" style="padding-left: 3em;"> Storage:
\echo <select id="strg" name="strg">
\echo <option value="ssd">SSD/NVMe</option>
\echo <option value="san">SAN</option>
\echo <option value="mag">Magnetic</option>
\echo </select>
\echo </label>
\echo <label for="wrkld" style="padding-left: 3em;"> Work load:
\echo <select id="wrkld" name="wrkld">
\echo <option value="oltp">OLTP</option>
\echo <option value="olap">OLAP/DSS</option>
\echo <option value="mixed">Mixed</option>
\echo </select>
\echo </label>
\echo <label for="flsys" style="padding-left: 3em;"> Filesystem:
\echo <select id="flsys" name="flsys">
\echo <option value="rglr">Regular (like: ext4/xfs)</option>
\echo <option value="cow">COW (like: zfs/btrfs)</option>
\echo </select>
\echo </label>
\echo <p>☛ Please provide the CPU and memory available on the host machine. Choose the most suitable options from the list to receive specific recommendations. If you are unsure, seek expert guidance.</p>
\echo </fieldset>
\echo <div id="paramtune" style="padding:2em;position:relative;width: fit-content;">
\echo <h3 style="font: italic 1.2em Georgia, serif;text-decoration: underline; margin: 0 0 0.5em;">Recommendations:</h3>
\echo <ol>
\echo </ol>
\echo <p>* Collecting pg_gather data during right utilization levels is important to tune the system for the specific workload</p>
\echo </div>
\echo <button type="button" onclick="getreccomendation()" title="Calculate / Recalculate Parameters">🔁 Calculate</button>
\echo <button type="button" onclick="copyashtml()" title="Copy as html tags">Copy as HTML tags</button>
\echo <button type="button" onclick="copyrichhtml()" title="Copy as Rich HTML">Copy as Rich HTML</button>
\echo </details>
\echo </div>
\echo <h2 id="topics">Sections</h2>
\echo <ol>
\echo <li><a href="#tabInfo">Tables</a></li>
\echo <li><a href="#tabPart">Partitioned Tables</a></li>
\echo <li><a href="#IndInfo">Indexes</a></li>
\echo <li><a href="#params">Parameters / Settings</a></li>
\echo <li><a href="#tblextn">Extensions</a></li>
\echo <li><a href="#tblhba">Security-HBA rules</a>
\echo <li><a href="#tblcs">Connection & Users</a></li>
\echo <li><a href="#tblDBTime">Database Time</a></li>
\echo <li><a href="#tblsess">Session Details</a></li>
\echo <li><a href="#tblstmnt">Top Statements</a></li>
\echo <li><a href="#tblreplstat">Replications</a></li>
\echo <li><a href="#tblchkpnt" >BGWriter & Checkpointer</a></li>
\echo <li><a href="#finditem">Findings</a></li>
\echo </ol>
\echo <div id="bottommenu">
\echo <a href="#topics" title="Sections">☰ Section Index (Alt+I)</a>
\echo <div id="menu" style="display:none; position: relative">
\echo <ol>
\echo <li><a href="#tblgather">Head Info</a></li>
\echo <li><a href="#paramrecs">Parameter Recommendations</a></li>
\echo <li><a href="#tabInfo">Tables</a></li>
\echo <li><a href="#tabPart">Partitioned Tables</a></li>
\echo <li><a href="#IndInfo">Indexes</a></li>
\echo <li><a href="#params">Parameters / Settings</a></li>
\echo <li><a href="#tblextn">Extensions</a></li>
\echo <li><a href="#tblhba">Security-HBA rules</a>
\echo <li><a href="#tblcs">Connection & Users</a></li>
\echo <li><a href="#tblDBTime">Database Time</a></li>
\echo <li><a href="#tblsess">Session Details</a></li>
\echo <li><a href="#tblstmnt">Top Statements</a></li>
\echo <li><a href="#tblreplstat">Replications</a></li>
\echo <li><a href="#tblchkpnt" >BGWriter & Checkpointer</a></li>
\echo <li><a href="#tbliostat">IO Statistics</a></li>
\echo <li><a href="#finditem">Findings</a></li>
\echo </ol>
\echo </div>
\echo </div>
\echo <div id="sections" style="display:none">
\pset footer on
\pset tableattr 'id="tabInfo" class="thidden"'
SELECT c.relname || CASE WHEN inh.inhrelid IS NOT NULL THEN ' (part)' WHEN c.relkind != 'r' THEN ' ('||c.relkind||')' ELSE '' END "Name" ,
concat(r.relid,',',r.n_tup_ins,',',r.n_tup_upd,',',r.n_tup_del,',',r.n_tup_hot_upd,',',isum.totind,',',isum.ind0scan,',',isum.pk,',',isum.uk,',',inhp.relname,',',inhp.relkind,',',c.relfilenode,',',c.reltablespace,',',c.reloptions),r.relnamespace "NS", CASE WHEN r.blks > 999 AND r.blks > tb.est_pages THEN (r.blks-tb.est_pages)*100/r.blks ELSE NULL END "Bloat%",
r.n_live_tup "Live",r.n_dead_tup "Dead", CASE WHEN r.n_live_tup <> 0 THEN ROUND((r.n_dead_tup::real/r.n_live_tup::real)::numeric,1) END "D/L",
r.rel_size "Rel size",r.tot_tab_size "Tot.Tab size",r.tab_ind_size "Tab+Ind size",r.rel_age "Rel. Age",to_char(r.last_vac,'YYYY-MM-DD HH24:MI:SS') "Last vacuum",to_char(r.last_anlyze,'YYYY-MM-DD HH24:MI:SS') "Last analyze",r.vac_nos "Vaccs",
ct.relname "Toast name",rt.tab_ind_size "Toast + Ind" ,rt.rel_age "Toast Age",GREATEST(r.rel_age,rt.rel_age) "Max age",
c.blocks_fetched "Fetch",c.blocks_hit*100/nullif(c.blocks_fetched,0) "C.Hit%",to_char(r.lastuse,'YYYY-MM-DD HH24:MI:SS') "Last Use"
FROM pg_get_rel r
JOIN pg_get_class c ON r.relid = c.reloid AND c.relkind NOT IN ('t','p')
LEFT JOIN pg_get_toast t ON r.relid = t.relid
LEFT JOIN pg_get_class ct ON t.toastid = ct.reloid
LEFT JOIN pg_get_rel rt ON rt.relid = t.toastid
LEFT JOIN pg_tab_bloat tb ON r.relid = tb.table_oid
LEFT JOIN pg_get_inherits inh ON r.relid = inh.inhrelid
LEFT JOIN pg_get_class inhp ON inh.inhparent = inhp.reloid
LEFT JOIN (SELECT count(indexrelid) totind,count(indexrelid)FILTER( WHERE numscans=0 ) ind0scan, count(indexrelid) FILTER (WHERE indisprimary) pk,
count(indexrelid) FILTER (WHERE indisunique) uk, indrelid FROM pg_get_index GROUP BY indrelid ) AS isum ON isum.indrelid = r.relid
ORDER BY r.tab_ind_size DESC LIMIT 10000;
\pset tableattr 'id="tabPart" class="thidden"'
WITH ptables AS ( SELECT p.relname , p.relkind, i.inhparent, i.inhrelid
FROM pg_get_class p LEFT JOIN pg_get_inherits i ON i.inhparent = p.reloid
WHERE p.relkind in ('p','r'))
SELECT p.relname "Partitioned Table", CONCAT(any_value(c.relname) FILTER (WHERE dpart = 't'),',',any_value(r.n_live_tup) FILTER (WHERE dpart='t')) "Default Partition Name, Count",
'Native-Declarative' "Partitioning Type", count(r.relid) "Partitions", sum(r.tot_tab_size) "tot_tab_size" , sum(r.tab_ind_size) "tab_ind_size",
round(max(c.blocks_fetched)/sum(NULLIF(c.blocks_fetched,0))*100 ,1) "Fetch Prune %"
FROM ptables p LEFT JOIN pg_get_rel r ON p.inhrelid = r.relid
LEFT JOIN pg_get_class c ON p.inhrelid = c.reloid
WHERE p.relkind = 'p' GROUP BY 1
UNION ALL
SELECT p.relname ,',', 'Inheritance' , count(r.relid) "Partitions", sum(r.tot_tab_size) ,
sum(r.tab_ind_size), max(c.blocks_fetched)/sum(NULLIF(c.blocks_fetched,0))*100
FROM ptables p JOIN pg_get_rel r ON p.inhrelid = r.relid
JOIN pg_get_class c ON p.inhrelid = c.reloid
WHERE p.relkind = 'r' GROUP BY 1;
\pset tableattr 'id="IndInfo"'
SELECT n.nsname "Schema",ct.relname AS "Table", ci.relname as "Index",indisunique as "UK?",indisprimary as "PK?",numscans as "Scans",size,ci.blocks_fetched "Fetch",ci.blocks_hit*100/nullif(ci.blocks_fetched,0) "C.Hit%", to_char(i.lastuse,'YYYY-MM-DD HH24:MI:SS') "Last Use"
FROM pg_get_index i
JOIN pg_get_class ct on i.indrelid = ct.reloid and ct.relkind != 't'
JOIN pg_get_class ci ON i.indexrelid = ci.reloid
LEFT JOIN pg_get_ns n ON n.nsoid = ci.relnamespace
ORDER BY size DESC LIMIT 10000;
\pset tableattr 'id="params"'
WITH dset AS (
SELECT string_agg(setting,chr(10)) setting,a.name FROM
(SELECT btrim(CASE WHEN rolname IS NULL THEN '' ELSE 'User: '|| rolname ||' , ' END || CASE WHEN datname IS NULL THEN '' ELSE 'DB: '|| datname END ,' ,') || ' ==> ' ||setting AS setting
,split_part(setting,'=',1) AS name
FROM pg_get_db_role_confs drc
LEFT JOIN LATERAL unnest(config) AS setting ON TRUE
LEFT JOIN pg_get_db db ON drc.db = db.datid
LEFT JOIN pg_get_roles rol ON rol.oid = drc.setrole
ORDER BY 1,2 NULLS LAST
) AS a GROUP BY 2 ),
fset AS (SELECT coalesce(s.name,f.name) AS name
,s.setting,s.unit,s.source
,string_agg(f.sourcefile ||' - '|| f.setting || CASE WHEN f.applied = true THEN ' (applicable)' ELSE '' END ,chr(10)) FILTER (WHERE s.source != f.sourcefile OR s.source IS NULL ) AS loc
FROM pg_get_confs s FULL OUTER JOIN pg_get_file_confs f ON lower(s.name) = lower(f.name)
GROUP BY 1,2,3,4 ORDER BY 1)
SELECT fset.name "Name",fset.setting "Setting",fset.unit "Unit",fset.source "Current Source",
CASE WHEN dset.setting IS NULL THEN '' ELSE dset.setting ||chr(10) END || CASE WHEN fset.loc IS NULL THEN '' ELSE fset.loc END AS "Other Locations & Values"
FROM fset LEFT JOIN dset ON fset.name = dset.name;
\pset footer off
\pset tableattr 'id="tblextn"'
SELECT ext.oid,extname "Extension",rolname "Owner",nsname "Schema", extrelocatable "Relocatable?",extversion "Version"
FROM pg_get_extension ext LEFT JOIN pg_get_roles ON extowner=pg_get_roles.oid
LEFT JOIN pg_get_ns ON extnamespace = nsoid;
\pset tableattr 'id="tblhba"'
WITH rule_data AS ( SELECT seq, typ, db, usr, addr, s.prefix AS cidr_mask, mask,
CASE WHEN addr IN ('all','samehost','samenet') OR (mask IS NULL AND addr IS NOT NULL) THEN 'IPv4,IPv6' ELSE 'IPv' || family(addr::inet)
END AS "IP", method, err, CASE WHEN addr IN ('all','samehost','samenet') THEN NULL ELSE set_masklen(addr::inet, s.prefix) END AS network_block
FROM pg_get_hba_rules
LEFT JOIN LATERAL (
SELECT i AS prefix FROM generate_series(0, 128) AS i
WHERE addr NOT IN ('all','samehost','samenet') AND mask IS NOT NULL AND netmask(set_masklen(addr::inet, i)) = mask::inet LIMIT 1 ) s ON TRUE )
SELECT
v.seq AS "Line", v.typ AS "Type", v.db AS "Database", v.usr AS "User", v.addr AS "Address", v.cidr_mask AS "CIDR Mask",
v.mask AS "DDN/Binary Mask", v."IP" AS "IP Ver.", v.method AS "Auth Method", v.err AS "Error", v.network_block AS "Network Block",
( SELECT string_agg(s.seq::text, ',') FROM rule_data s
WHERE s.seq < v.seq
AND ( (v.typ = s.typ) OR (v.typ = 'hostssl' AND s.typ = 'host') OR (v.typ = 'hostnossl' AND s.typ = 'host'))
AND ( v.typ = 'local' OR (v.network_block IS NOT NULL AND s.network_block IS NOT NULL AND s.network_block >>= v.network_block) OR s.addr = 'all' )
AND ( ('replication' = ANY(v.db) AND 'replication' = ANY(s.db) AND v.db <@ s.db) OR (NOT ('replication' = ANY(v.db)) AND (s.db = '{all}' OR v.db <@ s.db)))
AND (s.usr = '{all}' OR v.usr <@ s.usr) ) AS "Shadowed By",
CASE v."IP" WHEN 'IPv4' THEN (2::numeric ^ (32 - masklen(network_block)))::numeric(38,0)
WHEN 'IPv6' THEN (2::numeric ^ (128 - masklen(network_block)))::NUMERIC(40,0) ELSE NULL END
AS "No. of IPs"
FROM rule_data v
ORDER BY v.seq;
\pset tableattr 'id="tblcs" class="lineblk thidden"'
WITH db_role AS (SELECT
pg_get_activity.datid,rolname,count(*) FILTER (WHERE state='active') as active,
count(*) FILTER (WHERE state='idle in transaction') as idle_in_transaction,
count(*) FILTER (WHERE state='idle') as idle,
count(*) as totalcons,
count (*) FILTER (WHERE ssl = true) as sslcons,
count (*) FILTER (WHERE ssl = false) as nonsslcons
FROM pg_get_activity
LEFT JOIN pg_get_roles on usesysid=pg_get_roles.oid
LEFT JOIN pg_get_db on pg_get_activity.datid = pg_get_db.datid
GROUP BY 1,2
ORDER BY 1,2),
db AS (SELECT datid,sum(active) "Active",sum(idle_in_transaction) "IdleInTrans",sum(idle) "Idle",sum(totalcons) "Total",sum(sslcons) "SSL",sum(nonsslcons) "NonSSL"
FROM db_role GROUP BY 1)
SELECT pg_get_db.datname "Database",
(SELECT json_agg(ROW(rolname,active,idle_in_transaction,idle,totalcons,sslcons,nonsslcons)) FROM db_role WHERE db_role.datid = pg_get_db.datid),
"Active","IdleInTrans","Idle","Total","SSL","NonSSL"
FROM pg_get_db LEFT JOIN db ON pg_get_db.datid = db.datid;
\pset tableattr 'id="tblusr" class="thidden"'
WITH rol_db AS (SELECT
rolname,datname,count(*) FILTER (WHERE state='active') as active,
count(*) FILTER (WHERE state='idle in transaction') as idle_in_transaction,
count(*) FILTER (WHERE state='idle') as idle,
count(*) as totalcons,
count (*) FILTER (WHERE ssl = true) as sslcons,
count (*) FILTER (WHERE ssl = false) as nonsslcons
FROM pg_get_activity
join pg_get_roles on usesysid=pg_get_roles.oid
join pg_get_db on pg_get_activity.datid = pg_get_db.datid
GROUP BY 1,2
ORDER BY 1,2),
rol AS (SELECT rolname,sum(active) "Active",sum(idle_in_transaction) "IdleInTrans",sum(idle) "Idle",sum(totalcons) "Total",sum(sslcons) "SSL",sum(nonsslcons) "NonSSL"
FROM rol_db GROUP BY 1)
SELECT pg_get_roles.rolname "User",
(SELECT json_agg(ROW(datname,active,idle_in_transaction,idle,totalcons,sslcons,nonsslcons)) FROM rol_db WHERE rol_db.rolname = pg_get_roles.rolname),
rolsuper "Super?",rolreplication "Repl?", CASE WHEN rolconnlimit > -1 THEN rolconnlimit ELSE NULL END "Limit",
CASE enc_method WHEN 'm' THEN 'MD5' WHEN 'S' THEN 'SCRAM' END "Enc",
"Active","IdleInTrans","Idle","Total","SSL","NonSSL"
FROM pg_get_roles LEFT JOIN rol ON pg_get_roles.rolname = rol.rolname;
\pset tableattr 'id="tblDBTime" name="waits" style="clear: left" class="thidden"'
\C 'WaitEvents'
SELECT COALESCE(wait_event,'CPU') "Event", NULL, count(*)::text "Event Count" FROM pg_pid_wait
WHERE wait_event IS NULL OR wait_event NOT IN ('ArchiverMain','AutoVacuumMain','BgWriterHibernate','BgWriterMain','CheckpointerMain','LogicalApplyMain','LogicalLauncherMain','RecoveryWalStream','SysLoggerMain','WalReceiverMain','WalSenderMain',
'WalWriterMain','CheckpointWriteDelay','PgSleep','VacuumDelay','IoWorkerMain','AutovacuumMain','BgwriterHibernate','BgwriterMain')
GROUP BY 1 ORDER BY count(*) DESC;
\pset tableattr 'id="tblsess" class="thidden"'
\C 'Sessions'
SELECT * FROM (
WITH w AS (SELECT pid, string_agg( wait_event ||': '|| cnt*100::float/2000 ||'%',', ') waits, sum(cnt) pidwcnt, max(max) itr_max, min(min) itr_min FROM
(SELECT pid,COALESCE(wait_event,'CPU') wait_event,count(*) cnt, max(itr),min(itr) FROM pg_pid_wait GROUP BY 1,2 ORDER BY cnt DESC) pw GROUP BY 1),
g AS (SELECT max(ts) ts,max(mx_xid) mx_xid FROM
(SELECT MAX(state_change) as ts,MAX(GREATEST(backend_xid::text::bigint,backend_xmin::text::bigint)) mx_xid FROM pg_get_activity
UNION
SELECT NULL, pg_snapshot_xmax(snapshot)::xid::text::bigint mx_xid FROM pg_gather) a),
wrk AS (select leader_pid, count(*) from pg_get_activity where leader_pid is not null group by 1),
itr AS (SELECT max(itr_max) gitr_max FROM w)
SELECT a.pid,to_jsonb(ROW(d.datname,application_name,client_hostname,sslversion,wrk.count)), a.state,r.rolname "User"
, CASE WHEN a.leader_pid IS NULL THEN host(client_addr) ELSE 'Worker of ' || a.leader_pid END "client"
, CASE query WHEN '' THEN '**'||backend_type||' process**' ELSE query END "Last statement"
, g.ts - backend_start "Connection Since", g.ts - xact_start "Transaction Since", g.mx_xid - backend_xmin::text::bigint "xmin age",
g.ts - query_start "Statement since",g.ts - state_change "State since", w.waits ||
CASE WHEN (itr_max - itr_min)::float/itr.gitr_max*2000 - pidwcnt > 0 THEN
', Net/Delay*: ' || round(((itr_max - itr_min)::float/itr.gitr_max*2000 - pidwcnt)::numeric*100/2000,2) || '%'
ELSE '' END waits
FROM pg_get_activity a
LEFT JOIN w ON a.pid = w.pid
LEFT JOIN itr ON true
LEFT JOIN g ON true
LEFT JOIN wrk ON wrk.leader_pid = a.pid
LEFT JOIN pg_get_roles r ON a.usesysid = r.oid
LEFT JOIN pg_get_db d on a.datid = d.datid
ORDER BY "xmin age" DESC NULLS LAST) AS sess
WHERE waits IS NOT NULL OR state != 'idle';
\pset tableattr 'id="tblstmnt"'
\C 'Top Statements'
SELECT DENSE_RANK() OVER (ORDER BY ranksum) "Rank", "Statement",time_pct "DB.time%", calls "Execs",round((total_time/calls)::numeric,2) "Avg.ExecTime","Avg.Reads","C.Hit%"
,"Avg.Dirty","Avg.Write","Avg.Temp(r)","Avg.Temp(w)" FROM
(select query "Statement",total_time
,round((100*total_time/sum(total_time) OVER ())::numeric,2) AS time_pct, DENSE_RANK() OVER (ORDER BY total_time DESC) AS tottrank,calls
,round((total_time/calls)::numeric,2), DENSE_RANK() OVER (ORDER BY round((total_time/calls)::numeric,2) DESC) as avgtrank
,DENSE_RANK() OVER (ORDER BY total_time DESC)+DENSE_RANK() OVER (ORDER BY round((total_time/calls)::numeric,2) DESC) ranksum
,shared_blks_read/calls "Avg.Reads",
shared_blks_dirtied/calls "Avg.Dirty",
shared_blks_written/calls "Avg.Write",
temp_blks_read/calls "Avg.Temp(r)",
temp_blks_written/calls "Avg.Temp(w)"
,100 * shared_blks_hit / nullif((shared_blks_read + shared_blks_hit),0) as "C.Hit%"
from pg_get_statements) AS stmnts
WHERE tottrank < 15 OR avgtrank < 15 ;
\pset tableattr 'id="tblreplstat"'
WITH M AS (SELECT GREATEST((SELECT(current_wal) FROM pg_gather),(SELECT MAX(sent_lsn) FROM pg_replication_stat))),
g AS (SELECT max(mx_xid) mx_xid FROM
(SELECT MAX(GREATEST(backend_xid::text::bigint,backend_xmin::text::bigint)) mx_xid FROM pg_get_activity
UNION
SELECT pg_snapshot_xmax(snapshot)::xid::text::bigint mx_xid FROM pg_gather) a)
SELECT usename AS "Replication User",client_addr AS "Replica Address",pid,state,
pg_wal_lsn_diff(M.greatest, sent_lsn) "Transmission Lag (Bytes)",pg_wal_lsn_diff(sent_lsn,write_lsn) "Replica Write lag(Bytes)",
pg_wal_lsn_diff(write_lsn,flush_lsn) "Replica Flush lag(Bytes)",pg_wal_lsn_diff(write_lsn,replay_lsn) "Replay at Replica lag(Bytes)",
slot_name "Slot",plugin,slot_type "Type",datname "DB name",temporary,active,GREATEST(g.mx_xid-old_xmin::text::bigint,0) as "xmin age",
GREATEST(g.mx_xid-catalog_xmin::text::bigint,0) as "catalog xmin age", GREATEST(pg_wal_lsn_diff(M.greatest,restart_lsn),0) as "Restart LSN lag(Bytes)",
GREATEST(pg_wal_lsn_diff(M.greatest,confirmed_flush_lsn),0) as "Confirmed LSN lag(Bytes)"
FROM pg_replication_stat JOIN M ON TRUE
FULL OUTER JOIN pg_get_slots s ON pid = active_pid
LEFT JOIN g ON TRUE
LEFT JOIN pg_get_db ON s.datoid = datid;
\pset tableattr 'id="tblchkpnt"'
SELECT round(checkpoints_req*100/tot_cp,1) "Forced Checkpoint %" ,
round(min_since_reset/tot_cp,2) "avg mins between CP",
round(checkpoint_write_time::numeric/(tot_cp*1000),4) "Avg CP write time (s)",
round(checkpoint_sync_time::numeric/(tot_cp*1000),4) "Avg CP sync time (s)",
round(total_buffers::numeric*8192/(1024*1024),2) "Tot MB Written",
round((buffers_checkpoint::numeric/tot_cp)*8192/(1024*1024),4) "MB per CP",
round(buffers_checkpoint::numeric*8192/(min_since_reset*60*1024*1024),4) "Checkpoint MBps",
round(buffers_clean::numeric*8192/(min_since_reset*60*1024*1024),4) "Bgwriter MBps",
round(bg.buffers_backend::numeric*8192/(min_since_reset*60*1024*1024),4) "Backend MBps",
round(total_buffers::numeric*8192/(min_since_reset*60*1024*1024),4) "Total MBps",
round(buffers_alloc::numeric/total_buffers,3) "New buffers ratio",
round(100.0*buffers_checkpoint/total_buffers,1) "Clean by checkpoints (%)",
round(100.0*buffers_clean/total_buffers,1) "Clean by bgwriter (%)",
round(100.0*bg.buffers_backend/total_buffers,1) "Clean by backends (%)",
round(100.0*maxwritten_clean/(min_since_reset*60000 / delay.setting::numeric),2) "Bgwriter halts (%) per runs",
coalesce(round(100.0*maxwritten_clean/(nullif(buffers_clean,0)/ lru.setting::numeric),2),0) "Bgwriter halt (%) due to LRU hit",
round(min_since_reset/(60*24),1) "Reset days"
FROM pg_get_bgwriter
CROSS JOIN
(WITH client AS (SELECT sum(evictions) buffers_backend FROM pg_get_io WHERE btype='c')
SELECT
NULLIF(round(extract('epoch' from (select collect_ts from pg_gather) - stats_reset)/60)::numeric,0) min_since_reset,
GREATEST(buffers_checkpoint + buffers_clean + COALESCE(client.buffers_backend,pg_get_bgwriter.buffers_backend),1) total_buffers,
NULLIF(checkpoints_timed+checkpoints_req,0) tot_cp,
COALESCE(client.buffers_backend,pg_get_bgwriter.buffers_backend) buffers_backend
FROM pg_get_bgwriter,client) AS bg
LEFT JOIN pg_get_confs delay ON delay.name = 'bgwriter_delay'
LEFT JOIN pg_get_confs lru ON lru.name = 'bgwriter_lru_maxpages';
\pset tableattr 'id="tbliostat"'
\if :pg18
WITH cts AS ( SELECT COALESCE(collect_ts, (SELECT max(state_change) FROM pg_stat_activity)) AS c_ts FROM pg_gather),
rst AS ( SELECT max(stats_reset) AS max_reset FROM pg_get_io),
d AS (SELECT cts.c_ts, rst.max_reset,cts.c_ts - rst.max_reset, EXTRACT(EPOCH FROM (cts.c_ts - COALESCE(rst.max_reset,:'reset_ts')))/86400 AS dys FROM cts, rst),
blk AS (SELECT COALESCE((SELECT setting::INT FROM pg_get_confs WHERE name = 'block_size'),8192) AS blksize)
SELECT
CASE btype WHEN 'a' THEN 'Autovacuum' WHEN 'C' THEN 'Client Backend' WHEN 'G' THEN 'BG writer' WHEN 'b' THEN 'Background Parallel workers' WHEN 'c' THEN 'Client Backends' WHEN 'i' THEN 'I/O Worker'
WHEN 'k' THEN 'Checkpointer' WHEN 'w' THEN 'WAL Sender' WHEN 'W' THEN 'WAL Writer' WHEN 'r' THEN 'WAL Receiver' WHEN 'l' THEN 'Slot Sync' ELSE btype END As "Backend",
(sum(reads)/any_value(d.dys))::bigint "Reads/day",(sum(read_bytes)/any_value(d.dys))::bigint "Read Bytes/day",(sum(writes)/any_value(d.dys))::bigint "Writes/day",(sum(write_bytes)/any_value(d.dys))::bigint "Write Bytes/day",(sum(writebacks)*any_value(blksize)/any_value(d.dys))::bigint "Writebacks/day",(sum(extends)/any_value(d.dys))::bigint "Extends/day",(sum(extend_bytes)/any_value(d.dys))::bigint "Extend Bytes/day",
(sum(hits)/any_value(d.dys))::bigint "Hits/day",(sum(evictions)/any_value(d.dys))::bigint "Evictions/day", (sum(reuses)/any_value(d.dys))::bigint "Reuse/day", (sum(fsyncs)/any_value(d.dys))::bigint "FSyncs/day"
FROM pg_get_io,d,blk
-- WHERE reads > 0 OR writes > 0 OR writebacks > 0 or extends > 0 OR hits > 0 OR evictions > 0 OR reuses > 0 OR fsyncs > 0
GROUP BY 1;
\else
WITH cts AS ( SELECT COALESCE(collect_ts, (SELECT max(state_change) FROM pg_stat_activity)) AS c_ts FROM pg_gather),
rst AS ( SELECT max(stats_reset) AS max_reset FROM pg_get_io),
d AS (SELECT cts.c_ts, rst.max_reset,cts.c_ts - rst.max_reset, EXTRACT(EPOCH FROM (cts.c_ts - COALESCE(rst.max_reset,:'reset_ts')))/86400 AS dys FROM cts, rst),
blk AS (SELECT COALESCE((SELECT setting::INT FROM pg_get_confs WHERE name = 'block_size'),8192) AS blksize)
SELECT
CASE btype WHEN 'a' THEN 'Autovacuum' WHEN 'C' THEN 'Client Backend' WHEN 'G' THEN 'BG writer' WHEN 'b' THEN 'background Parallel workers' WHEN 'c' THEN 'Client Backends'
WHEN 'k' THEN 'Checkpointer' WHEN 'w' THEN 'WALSender' ELSE btype END As "Backend",
(sum(reads)*any_value(blksize)/any_value(d.dys))::bigint "Read bytes/day",(sum(writes)*any_value(blksize)/any_value(d.dys))::bigint "Write bytes/day",(sum(writebacks)*any_value(blksize)/any_value(d.dys))::bigint "Writeback bytes/day", (sum(extends)*any_value(blksize)/any_value(d.dys))::bigint "Extend bytes/day",
(sum(hits)*any_value(blksize)/any_value(d.dys))::bigint "Avg. Cache Hit bytes/day",(sum(evictions)*any_value(blksize)/any_value(d.dys))::bigint "Evictions bytes/day", (sum(reuses)/any_value(d.dys))::bigint "Avg. Reuse", (sum(fsyncs)/any_value(d.dys))::bigint "Avg. FSyncs"
FROM pg_get_io, blk, d
WHERE reads > 0 OR writes > 0 OR writebacks > 0 or extends > 0 OR hits > 0 OR evictions > 0 OR reuses > 0 OR fsyncs > 0
GROUP BY 1;
\endif
\echo <ol id="finditem" style="padding:2em;position:relative">
\echo <h3 style="font: italic bold 2em Georgia, serif;text-decoration: underline; margin: 0 0 0.5em;">Findings:</h3>
\echo </ol>
\echo </div> <!--End of "sections"-->
\echo <footer>End of <a href="https://github.com/jobinau/pg_gather">pgGather</a> Report</footer>
\echo <script>
\pset format unaligned
\pset tuples_only on
SELECT 'obj='||to_jsonb(r)::text FROM
(SELECT
(select recovery from pg_gather) AS clsr,
(SELECT to_jsonb(ROW(count(*),COUNT(*) FILTER (WHERE last_vac IS NULL), COUNT(*) FILTER (WHERE b.table_oid IS NULL AND r.n_live_tup != 0 ),COUNT(*) FILTER (WHERE last_anlyze IS NULL)))
FROM pg_get_rel r JOIN pg_get_class c ON r.relid = c.reloid AND c.relkind NOT IN ('t','p')
LEFT JOIN pg_tab_bloat b ON c.reloid = b.table_oid) AS tabs,
(SELECT to_jsonb(ROW(COUNT(*),COUNT(*) FILTER (WHERE CONN < interval '15 minutes' ) )) FROM
(WITH g AS (SELECT MAX(state_change) as ts FROM pg_get_activity)
SELECT pid,g.ts - backend_start CONN
FROM pg_get_activity
LEFT JOIN g ON true
WHERE EXISTS (SELECT pid FROM pg_pid_wait WHERE pid=pg_get_activity.pid)
AND backend_type='client backend') cn) AS cn,
(SELECT to_jsonb(ROW(count(*) FILTER (WHERE relkind='p'), count(*) FILTER (WHERE relkind='r' AND relpersistence='u'), max(reloid))) from pg_get_class) as clas,
(SELECT to_jsonb(ROW(count(*) FILTER (WHERE state='active' AND state IS NOT NULL),
count(*) FILTER (WHERE state='idle in transaction'), count(*) FILTER (WHERE state='idle'),
count(*) FILTER (WHERE state IS NULL), count(*) FILTER (WHERE leader_pid IS NOT NULL) ,
count(*), count(distinct backend_type)))
FROM pg_get_activity) as sess,
(WITH curdb AS (SELECT
CASE WHEN (SELECT COUNT(*) FROM pg_srvr) > 0
THEN (SELECT trim(both '\"' from substring(connstr from '\"\w*\"')) "curdb" FROM pg_srvr WHERE connstr like '%to database%') ELSE (SELECT 'template1' "curdb")
END),
cts AS (SELECT COALESCE((SELECT COALESCE(collect_ts,(SELECT max(state_change) FROM pg_get_activity)) FROM pg_gather),current_timestamp) AS c_ts)
SELECT to_jsonb(ROW(curdb,COALESCE(pg_get_db.stats_reset,:'reset_ts'),c_ts,days))
FROM curdb LEFT JOIN pg_get_db ON pg_get_db.datname=curdb.curdb
LEFT JOIN LATERAL (SELECT GREATEST((EXTRACT(epoch FROM(c_ts- COALESCE(pg_get_db.stats_reset,:'reset_ts')))/86400)::bigint,1) as days FROM cts) AS lat1 ON TRUE
LEFT JOIN cts ON true) as dbts,
(WITH maxmxid AS (SELECT max(mxidage) FROM pg_get_db),
topdbmx AS (SELECT array_agg(datname),maxmxid.max FROM pg_get_db JOIN maxmxid ON pg_get_db.mxidage=maxmxid.max AND pg_get_db.mxidage > 1000 GROUP BY 2)
SELECT to_jsonb(ROW(array_agg,max)) FROM topdbmx) AS mxiddbs,
(SELECT json_agg(pg_get_ns) FROM pg_get_ns) AS ns,
(SELECT json_agg(pg_get_tablespace) FROM pg_get_tablespace) AS tbsp,
(SELECT to_jsonb((extract (EPOCH FROM (collect_ts - last_archived_time)), pg_wal_lsn_diff( current_wal,
(coalesce(nullif(CASE WHEN length(last_archived_wal) < 24 THEN '' ELSE ltrim(substring(last_archived_wal, 9, 8), '0') END, ''), '0') || '/' || substring(last_archived_wal, 23, 2) || '000001' ) :: pg_lsn )
, last_archived_wal, last_archived_time::text || ' (' || CASE WHEN EXTRACT(EPOCH FROM(collect_ts - last_archived_time)) < 0 THEN 'Right Now'::text ELSE (collect_ts - last_archived_time)::text END || ')'))
FROM pg_gather, pg_archiver_stat) AS arcfail,
(SELECT to_jsonb(ROW(max(setting) FILTER (WHERE name = 'archive_library'), max(setting) FILTER (WHERE name = 'cluster_name'),count(*) FILTER (WHERE source = 'command line'),any_value(setting) FILTER (WHERE name = 'block_size'),
json_agg(row(name ,setting)) FILTER
(WHERE name in ('block_size','max_identifier_length','max_function_args','max_index_keys','segment_size','wal_block_size') AND
(name,setting) NOT IN (('block_size','8192'),('max_identifier_length','63'),('max_function_args','100'),('max_index_keys','32'),('segment_size','131072'),('wal_block_size','8192'))
OR (name = 'wal_segment_size' AND unit ='8kB' AND setting != '2048') OR (name = 'wal_segment_size' AND unit ='B' AND setting != '16777216')),
count(*) FILTER (WHERE source IS NOT NULL)
)) FROM pg_get_confs ) AS params,
(SELECT json_agg(row(error, name, setting, sourcefile)) FROM pg_get_file_confs WHERE error IS NOT NULL) AS errparams,
(WITH g AS (SELECT collect_ts,pg_start_ts,reload_ts,to_timestamp ( systemid >> 32 ) init_ts from pg_gather),
r AS (SELECT LEAST(min(last_vac),min(last_anlyze)) known_ts FROM pg_get_rel)
SELECT CASE WHEN (g.init_ts IS NULL OR g.reload_ts - g.init_ts > '80 minutes'::interval) AND ( r.known_ts > g.reload_ts OR r.known_ts IS NULL) AND g.collect_ts - g.reload_ts < '10 days'::interval
THEN g.reload_ts END crash_ts FROM g,r) crash,
(WITH blockers AS (select array_agg(victim_pid) OVER () victim,blocking_pids blocker from pg_get_pidblock),
ublokers as (SELECT unnest(blocker) AS blkr FROM blockers)
SELECT json_agg(blkr) FROM ublokers
WHERE NOT EXISTS (SELECT 1 FROM blockers WHERE ublokers.blkr = ANY(victim))) blkrs,
(select json_agg((victim_pid,blocking_pids)) from pg_get_pidblock) victims,
(SELECT to_jsonb(( EXTRACT(epoch FROM (end_ts - collect_ts)), pg_wal_lsn_diff(end_lsn, current_wal) * 60 * 60 / EXTRACT( epoch FROM (end_ts - collect_ts) ),
wal_bytes/(extract (EPOCH FROM (collect_ts - stats_reset))/3600)))
FROM pg_gather JOIN pg_gather_end ON true
LEFT JOIN pg_get_wal ON true) sumry,
(SELECT json_agg((relname,maint_work_mem_gb)) FROM (SELECT relname,n_live_tup*0.2*6 maint_work_mem_gb
FROM pg_get_rel JOIN pg_get_class ON n_live_tup > 894784853 AND pg_get_rel.relid = pg_get_class.reloid
ORDER BY 2 DESC LIMIT 3) AS wmemuse) wmemuse,
(WITH w AS (SELECT pid,count(*) cnt, max(itr) itr_max,min(itr) itr_min FROM pg_pid_wait group by 1),
g AS (SELECT max(itr_max) gmax_itr FROM w)
SELECT to_jsonb(ROW(SUM(((itr_max - itr_min)::float/gmax_itr)*2000 - cnt),max(gmax_itr),count(pid))) FROM w,g
WHERE ((itr_max - itr_min)::float/gmax_itr)*2000 - cnt > 0) netdlay,
(SELECT to_jsonb(ROW(count(*) FILTER (WHERE indisvalid=false)
,count(*) FILTER (WHERE numscans=0 AND tst.toastid IS NULL) --Unused Indexes of user tables
,count(*) FILTER (WHERE numscans=0 AND tst.toastid > 16384) --Unused TOAST index of user tables
,count(*) FILTER (WHERE tst.toastid IS NULL) --TOTAL User/Regular indexes
,count(*) FILTER (WHERE tst.toastid > 16384) --TOTAL Toast Indexes
,sum(size) FILTER (WHERE numscans=0)))
FROM pg_get_index i
JOIN pg_get_class ct ON i.indrelid = ct.reloid
LEFT JOIN pg_get_toast tst ON ct.reloid = tst.toastid) induse,
(WITH pkuk AS (SELECT indrelid,bool_or(indisprimary) pk,bool_or(indisunique) uk FROM pg_get_index GROUP BY indrelid)
SELECT to_jsonb(ROW(COUNT(*) FILTER (WHERE pkuk.pk IS NULL OR NOT pkuk.pk), COUNT(*) FILTER (WHERE pkuk.uk IS NULL OR NOT pkuk.uk)))
FROM pg_get_class c LEFT JOIN pkuk ON pkuk.indrelid = c.reloid WHERE c.relkind IN ('r')) nokey,
(SELECT to_jsonb(ROW(sum(tab_ind_size) FILTER (WHERE relid < 16384),sum(tab_ind_size),count(*))) FROM pg_get_rel) meta
) r;
\echo ver="33";
\echo docurl="https://jobinau.github.io/pg_gather/";
\echo meta={"pgvers":["14.22","15.17","16.13","17.9","18.3"],"commonExtn":["plpgsql","pg_stat_statements","pg_repack"],"riskyExtn":["citus","tds_fdw","pglogical"]};
\echo let eventMaps;
\echo let colorMaps = new Map([["Activity","#00EE00"],["BufferPin","#8B0000"],["Client","#999999"],["CPU","#00CC00"],["Extension","#6B4226"],["IO","#0000CC"],["IPC","#FF9900"],["LWLock","#8B0000"],["Lock","#FF0000"],["Timeout","#FF00FF"]]);
\echo mgrver="";
\echo datadir="";
\echo autovacuum_freeze_max_age = 0;
\echo let strfind = "";
\echo totdb=0;
\echo totCPU=4;
\echo totMem=8;
\echo wrkld="";
\echo let trnsTimout=900;
\echo flsys= "";
\echo let blokers = []
\echo let blkvictims = []
\echo let params = []
\echo const canvas=document.createElement("canvas");
\echo const canvascontext=canvas.getContext("2d");
\echo async function fetchJsonWithTimeout(url, timeout) {
\echo const controller = new AbortController();
\echo const timeoutId = setTimeout(() => controller.abort(), timeout);
\echo try {
\echo const response = await fetch(url + "?_=" + new Date().getDate(), { signal: controller.signal });
\echo clearTimeout(timeoutId);
\echo if (!response.ok) throw new Error("HTTP error! status:" + response.status );
\echo else return await response.json();
\echo } catch (error) {
\echo clearTimeout(timeoutId);
\echo if (error.name === "AbortError") throw new Error("Request timed out");
\echo else throw error;
\echo }
\echo }
\echo async function fetchWithTimeout(url, timeout) {
\echo const controller = new AbortController();
\echo const timeoutId = setTimeout(() => controller.abort(), timeout);
\echo try {
\echo const response = await fetch(url + "?_=" + new Date().getDate(), { signal: controller.signal });
\echo clearTimeout(timeoutId);
\echo if (!response.ok) throw new Error("HTTP error! status:" + response.status );
\echo else return await response;
\echo } catch (error) {
\echo clearTimeout(timeoutId);
\echo if (error.name === "AbortError") throw new Error("Request timed out");
\echo else throw error;
\echo }
\echo }
\echo function afterRenderingComplete(callback) { requestAnimationFrame(() => { requestAnimationFrame(callback); }); }
\echo async function doAllChecks(){
\echo await fetchJsonWithTimeout(docurl + "meta.json",500).then(data => { meta = data; })
\echo .catch(error => { console.error("Error fetching JSON:", error); });
\echo try {eventMaps = new Map(await fetchJsonWithTimeout(docurl + "waitevents.json", 5000));}
\echo catch (error) { console.error("Error fetching wait events JSON:", error); eventMaps = new Map(); }
\echo console.log("Starting all checks");
\echo afterRenderingComplete(() => { console.log("This runs after the current rendering is complete.");
\echo document.getElementById("sections").style="display:table";
\echo document.getElementById("busy").style="display:none";
\echo });
\echo checkgather();
\echo checkpars();
\echo checkdbs();
\echo checkconns();
\echo checkusers();
\echo checkdbtime();
\echo checksess();
\echo checkiostat();
\echo checkreplstat();
\echo checktabs();
\echo checktabPart();
\echo checkindex();
\echo checkextn();
\echo checkhba();
\echo checkstmnts();
\echo checkchkpntbgwrtr();
\echo checkfindings();
\echo console.log("All checks completed");
\echo }
\echo document.addEventListener("DOMContentLoaded", () => {
\echo if (obj.victims !== null){
\echo obj.victims.forEach(function(victim){
\echo blkvictims.push(victim.f1);
\echo });
\echo obj.victims.forEach(function(victim){
\echo victim.f2.forEach(function(blker){
\echo if (blkvictims.indexOf(blker) == -1 && blokers.indexOf(blker) == -1) blokers.push(blker);
\echo });
\echo });
\echo }
\echo doAllChecks();
\echo });
\echo function setTitles(tr,tiltes){
\echo for(i=0;i<tiltes.length;i++) tr.cells[i].title=tiltes[i];
\echo }
\echo function checkgather(){
\echo const trs=document.getElementById("tblgather").rows
\echo let days,xmax=0;
\echo for (let i = 0; i < trs.length; i++) {
\echo val = trs[i].cells[1];
\echo switch(trs[i].cells[0].innerText){
\echo case "pg_gather" :
\echo val.innerText = val.innerText + "-v" + ver;
\echo break;
\echo case "Collected By" :
\echo if (val.innerText.slice(-2) < ver || val.innerText.slice(-2) < meta.ver ) { val.classList.add("warn"); val.title = "Data is collected using old/obsolete version of gather.sql file. Please use v" + ver;
\echo strfind += "<li><b>Old/obsolete version (v"+ val.innerText.slice(-2) + ") of pg_gather script (gather.sql) is used for data collection</b>. Please use the latest relase <b>(v" + meta.ver + ")</b> <a href='"+ docurl +"versionpolicy.html'>Details</a></li>";
\echo }
\echo break;
\echo case "In recovery?" :
\echo if(val.innerText == "true") {val.classList.add("lime"); val.title="Data collected at standby"; obj.primary = false;}
\echo else obj.primary = true;
\echo break;
\echo case "System" :
\echo let startIndex = val.innerText.indexOf("(") + 1;
\echo days = parseInt(val.innerText.substring(startIndex,val.innerText.indexOf(" days", startIndex)));
\echo break;
\echo case "PG Bin Dir." :
\echo {const pattern = new RegExp("(\\/usr\\/lib\\/postgresql\\/\\d+|\\/usr\\/pgsql-\\d+\\/)");
\echo if(!pattern.test(val.innerText) ) {
\echo val.classList.add("warn"); val.title = "Unusual PostgreSQL binary directory : " + val.innerText + ". Could be due to source build or portable binaries.";
\echo strfind += "<li><b>Unusual PostgreSQL binary directory : " + val.innerText + "</b>. Could be due to custom build or portable binaries. Understand the <a href='"+ docurl +"pgbinary.html'>risk involved</a></li>";
\echo }
\echo }
\echo break;
\echo case "Latest xid" :
\echo xmax = parseInt(val.innerText);
\echo break;
\echo case "Oldest xid ref" :
\echo let diff=xmax - parseInt(val.innerText);
\echo val.innerText += " (" + (diff).toString() + " xids old)";
\echo if (diff > 10000) {val.classList.add("warn"); val.title = "The oldest transaction is " + diff + " xids old, as per xid horizon";
\echo strfind += "<li>The oldest transaction is <b>" + diff + " xids old</b>. This can have serious concequnces. Refer <a href='"+ docurl +"xidhorizon.html'>Details</a></li>";
\echo }
\echo break;
\echo case "Time Line" :
\echo let Failover = parseInt(val.innerText.substring(0,val.innerText.indexOf(" (")))-1;
\echo if (days > 30 && Failover > 5){
\echo let MTBF = days/Failover;
\echo if (MTBF < 180){
\echo val.classList.add("warn"); val.title = "Poor MTBF / Availability number. There were " + Failover + " failovers in " + days + " days." ;
\echo strfind += "<li><b>Poor MTBF / Availability number: "+ Math.round(MTBF) +" days!</b>. There were " + Failover + " failovers in " + days + " days</li>";
\echo }
\echo }
\echo }
\echo }
\echo }
\echo function checkfindings(){
\echo let tmpstr = "";
\echo if (obj.sess.f7 < 4){
\echo strfind += "<li><b>The pg_gather data is collected by a user who don't have necessary privilege OR Content of the output file (out.txt) is copy-pasted destroying the TSV format</b><br/><b>1.</b>Please run the gather.sql as a privileged user (superuser, rds_superuser etc.) or some account with pg_monitor privilege and <b>2.</b> Please provide the output file as it is without copy-pasting</li>"
\echo document.getElementById("tblDBTime").title="Waitevents data will be growsly incorrect because the pg_gather data is collected by a user who don't have proper privilege OR content of output file is copy-pasted. Please refer the Findings section";
\echo document.getElementById("tblDBTime").caption.innerHTML += "<br/>" + document.getElementById("tblDBTime").title
\echo document.getElementById("tblDBTime").classList.add("high");
\echo }
\echo if (obj.sess.f2 > 0) strfind += "<li><b>Found " + obj.sess.f2 + " session(s) in idle-in-transaction state</b>. This can cause poor concurrency. Details in <a href=#tblsess>Sessions</a> section. Consider improving the application code and design</li>";
\echo if (obj.cn.f1 > 0){
\echo strfind +="<li><b>" + obj.cn.f2 + " / " + obj.cn.f1 + " connections </b> in use are new. "
\echo if (obj.cn.f2 > 9 || obj.cn.f2/obj.cn.f1 > 0.7 ){
\echo strfind+="Please consider this for improving connection pooling"
\echo }
\echo strfind += "</li>";
\echo }
\echo if (obj.induse.f1 > 0 ) strfind += "<li><b>"+ obj.induse.f1 +" Invalid Index(es)</b> found. Recreate or drop them. Refer <a href='"+ docurl +"InvalidIndexes.html'>Details</a></li>";
\echo if (obj.induse.f2 > 0 ) strfind += "<li><b>"+ obj.induse.f2 +" regular user indexes and " + obj.induse.f3 + " Toast Indexes are unused,</b> out of " + obj.induse.f4 + " user indexes and " + obj.induse.f5 + " Toast Indexes . Currently the unused indexes needs <b>additional "+ bytesToSize(obj.induse.f6) +" to cache</b>. <a href='"+ docurl +"unusedIndexes.html'>Details</a></li>";
\echo if (obj.mxiddbs !== null) strfind += "<li> Multi Transaction ID age : <b>" + obj.mxiddbs.f2 + "</b> for databases <b>" + obj.mxiddbs.f1 + "</b> <a href='"+ docurl +"mxid.html'>Details</a></li>"
\echo if (obj.clas.f1 > 0) strfind += "<li><b>"+ obj.clas.f1 +" Natively partitioned tables</b> found. Tables section could contain partitions</li>";
\echo if (obj.clas.f2 > 0) strfind += "<li><b>"+ obj.clas.f2 +" Unlogged tables found.</b> These tables and associated indexes are ephemeral. <a href='"+ docurl +"unloggedtables.html'>Details</a></li>";
\echo if (obj.params.f3 > 10) strfind += "<li> Patroni/HA PG cluster :<b>" + obj.params.f2 + "</b></li>"
\echo if (obj.params.f5 != null && obj.params.f5.length > 0) strfind += "<li><b>Non-standard compile/Initialization time parameters detected : " + obj.params.f5.map(function(item) { return item.f1 + ": " + item.f2;}).join(", ") + "</b>. Custom Compilation is prone to bugs, and problems which are difficult to find</li>";
\echo if (obj.params.f6 == null || obj.params.f6 ==0 ) strfind += "<li><b>No Parameter values found. The data collection could be partial or corrupt Parameter file(s).</b></li>";
\echo if (obj.errparams !== null && obj.errparams.length > 0) {
\echo strfind += "<li> <b>Parameter file errors detected :<ul>";
\echo obj.errparams.forEach(function(t,idx){ strfind += "<li>" + t.f1 + " : " + t.f2 + " = "+ t.f3 +" in file " + t.f4 +"</li>" });
\echo strfind += "</ul></b></li>";
\echo }
\echo if (obj.crash !== null) strfind += "<li>Detected a <b>suspected crash / unclean shutdown around : " + obj.crash + ".</b> Please check the PostgreSQL logs</li>"
\echo if (obj.nokey.f1 > 0) strfind += "<li><b>"+ obj.nokey.f1 +" Tables without Primary Key</b> and <b>"+ obj.nokey.f2 +" Tables without neither Primary key nor Unique keys</b> found. Please refer <a href='"+ docurl +"pkuk.html'>Details</a></li>";
\echo if (obj.netdlay.f1 > 10) {
\echo if (obj.netdlay.f1 / obj.netdlay.f2 * 100 > 20 ){ strfind += "<li> There are <b>"+ obj.netdlay.f3 +" Sessions with considerable Net/Delays</b>"
\echo tmpstr = "Total <a href='"+ docurl +"NetDelay.html'>Net/Delay<a>"
\echo if (obj.netdlay.f1 / obj.netdlay.f2 > 1){
\echo tmpstr += " is <b>" + (obj.netdlay.f1 / obj.netdlay.f2).toFixed(1) + "Times ! </b> of overall server activity. which is huge"
\echo }else if(obj.netdlay.f1 / obj.netdlay.f2 > 0.1){
\echo tmpstr += " is equivalent to <b>" + (obj.netdlay.f1 * 100 / obj.netdlay.f2).toFixed(2) + "% </b> of server activity"
\echo }
\echo if (tmpstr.length > 100 ){
\echo strfind += "<li>" + tmpstr + "</li>"
\echo document.getElementById("tblDBTime").tFoot.children[0].children[0].innerHTML += "<br/>" + tmpstr
\echo }
\echo }
\echo }
\echo for (let item of params) {
\echo if (typeof item.warn != "undefined"){
\echo strfind += "<li>" + item.warn +"</li>";
\echo }
\echo }
\echo if(obj.clsr){
\echo strfind += "<li>PostgreSQL is in Standby mode or in Recovery</li>";
\echo }else{
\echo if ( obj.tabs.f2 > 0 ) strfind += "<li> <b>No vacuum info for " + obj.tabs.f2 + "</b> tables/objects </li>";
\echo if (obj.arcfail != null) {
\echo if (obj.arcfail.f1 == null) strfind += "<li>No working WAL archiving and backup detected. PITR may not be possible</li>";
\echo if (obj.arcfail.f1 > 300) strfind += "<li>No WAL archiving happened in last "+ Math.round(obj.arcfail.f1/60) +" minutes. <b>Archiving could be failing</b>; please check PG logs</li>";
\echo if (obj.arcfail.f2 && obj.arcfail.f2 > 0) strfind += "<li>WAL archiving is <b>lagging by "+ bytesToSize(obj.arcfail.f2,1024) +"</b>. Last archived WAL is : <b>"+ obj.arcfail.f3 +"</b> at "+ obj.arcfail.f4 +".<a href='"+ docurl +"walarchive.html'> Details<a></li>";
\echo }
\echo if (obj.wmemuse !== null && obj.wmemuse.length > 0){ strfind += "<li> Biggest <code>maintenance_work_mem</code> consumers are :<b>"; obj.wmemuse.forEach(function(t,idx){ strfind += (idx+1)+". "+t.f1 + " (" + bytesToSize(t.f2) + ") " }); strfind += "</b></li>"; }
\echo if (obj.victims !== null && obj.victims.length > 0) strfind += "<li><b>" + obj.victims.length + " session(s) blocked.</b></li>"
\echo if (obj.sumry !== null){ strfind += "<li>Data collection took <b>" + obj.sumry.f1 + " seconds. </b>";
\echo if ( obj.sumry.f1 < 23 ) strfind += "System response is good</li>";
\echo else if ( obj.sumry.f1 < 28 ) strfind += "System response is below average</li>";
\echo else strfind += "System response appears to be poor</li>";
\echo strfind += "<li>Current WAL generation rate is <b>" + bytesToSize(obj.sumry.f2) + " / hour</b>";
\echo if (obj.sumry.f3 !== null ) strfind += ", Long term average WAL generation rate is <b>" + bytesToSize(obj.sumry.f3) + "/hour</b></li>";
\echo else strfind += "</li>" }
\echo if ( obj.clas.f3 > 50000 ) strfind += "<li>Currently <b>OID of pg_class stands at " + Number(obj.clas.f3).toLocaleString("en-US") + "</b>. indicating the usage of temporary tables / High DDL activity </li>";
\echo if (obj.meta.f1 > 15728640){
\echo strfind += "<li>" + "The catalog metadata is :<b>" + bytesToSize(obj.meta.f1) + " For " + obj.meta.f3 + " objects. </b><a href='"+ docurl +"catalogbloat.html'> Details<a></li>"
\echo }
\echo if (obj.tbsp !== null && obj.tbsp.length > 0){
\echo const result=obj.tbsp.map(function(item) { return item.tsname + ": " + item.location;}).join(", ");
\echo strfind += "<li>Found additional <b>" + obj.tbsp.length + " tablespaces ("+ result +")</b> . <a href='"+ docurl +"tablespace.html'> Details<a></li>"
\echo }
\echo }
\echo if ( obj.tabs.f3 > 0 ) strfind += "<li> <b>No statistics available for " + obj.tabs.f3 + " tables/objects</b>, query planning can go wrong. <a href='"+ docurl +"missingstats.html'>Learn Details</a></li>";
\echo if ( obj.tabs.f1 > 10000) strfind += "<li> There are <b>" + obj.tabs.f1 + " tables/objects</b> in the database. Only the biggest 10000 will be displayed in the <a href=#tabInfo >Tables</a> section. Avoid too many tables/objects in single database. <a href='"+ docurl +"table_object.html'>Learn Details</a></li>";
\echo if (obj.ns !== null){
\echo let tempNScnt = obj.ns.filter(n => n.nsname.indexOf("pg_temp") > -1).length + obj.ns.filter(n => n.nsname.indexOf("pg_toast_temp") > -1).length ;
\echo strfind += "<li><b>" + (obj.ns.length - tempNScnt).toString() + " Regular schema(s) and " + tempNScnt + " temporary schema(s)</b> in this database. <a href='"+ docurl +"schema.html'> Details<a></li>";
\echo }
\echo const sharedBuffers = params.find(p => p.param === "shared_buffers");
\echo const hugePages = params.find(p => p.param === "huge_pages");
\echo if ( sharedBuffers?.val > 2097152 && hugePages?.val != "on" ){
\echo strfind += "<li><b>IMPORTANT : Enabling and enforcing huge_pages is essential for stability and reliability</b>. Especially when the system has shared_buffers of <b>"+ bytesToSize(sharedBuffers.val*8192) +"</b>.</b><a href='"+ docurl +"params/huge_pages.html'>Details<a></li>"
\echo }
\echo if (obj.tabs.bloatTabNum > 0) strfind += "<li>Found <b>"+ obj.tabs.bloatTabNum +" bloated tables</b> in this database. This could affect performance. <a href='"+ docurl +"bloat.html'>Details</a></li>";
\echo document.getElementById("finditem").innerHTML += strfind;
\echo }
\echo function checkconns(){
\echo tab=document.getElementById("tblcs");
\echo tab.caption.innerHTML="<span>DB Connections</span>";
\echo const trs=tab.rows
\echo let nonssl=0;
\echo for (var i=1;i<trs.length;i++){
\echo tr=trs[i];
\echo if (tr.cells[7].innerText > 0) nonssl += parseInt(tr.cells[7].innerText);
\echo if (tr.cells[5].innerText > 20 && tr.cells[7].innerText/tr.cells[5].innerText > 0.5 ){
\echo tr.cells[7].classList.add("warn");
\echo tr.cells[7].title="Large precentage of unencrypted connections"
\echo }
\echo }
\echo if (nonssl > 10) strfind += "<li>Number of unencrypted connections : <b>"+ nonssl +"</b></li>"
\echo el=document.createElement("tfoot");
\echo el.innerHTML = "<th colspan='7'>Active: "+ obj.sess.f1 +", Idle-in-transaction: " + obj.sess.f2 + ", Idle: " + obj.sess.f3 + ", Background: " + obj.sess.f4 + ", Workers: " + obj.sess.f5 + ", Total: " + obj.sess.f6 + "</th>";
\echo tab.appendChild(el);
\echo }
\echo ["cpus","mem","strg","wrkld","flsys"].forEach(function(t) {document.getElementById(t).addEventListener("change", (event) => { getreccomendation(); })});
\echo function getreccomendation(){
\echo totMem = document.getElementById("mem").value;
\echo totCPU = document.getElementById("cpus").value;
\echo wrkld = document.getElementById("wrkld").value;
\echo flsys = document.getElementById("flsys").value;
\echo trnsTimout = (wrkld === "oltp") ? 900 :
\echo (wrkld === "olap") ? 18000 :
\echo (wrkld === "mixed") ? 3600 : 900;
\echo checkpars();
\echo let reccomandations = document.getElementById("paramtune").children[1];
\echo let reccos = "";
\echo for (let item of params) {
\echo if (typeof item.suggest != "undefined"){
\echo reccos += "<li>" + item.param + " = " + item.suggest + " <a href='"+ docurl +"params/" + item.param +".html'>#Explanation</a></li>"
\echo }
\echo }
\echo reccomandations.innerHTML = reccos;
\echo }
\echo function flash(msg){
\echo var el=document.createElement("div");
\echo el.setAttribute("id", "cur");
\echo el.setAttribute("style", "position: fixed;top: 50%;left: 50%;transform: translate(-50%, -50%);");
\echo el.textContent = msg;
\echo document.body.appendChild(el);
\echo setTimeout(() => { el.remove();},2000);
\echo }
\echo function copyashtml(){
\echo let elem = document.getElementById("paramtune");
\echo let paramtune = elem.cloneNode(true);
\echo paramtune.style="font-weight:initial;line-height:1.5em;background-color:#FAFFEA;border: 2px solid blue; border-radius: 5px; padding: 1em;box-shadow: 0px 20px 30px -10px grey";
\echo navigator.clipboard.writeText(paramtune.outerHTML);
\echo flash("Parameter recommendations are copied to clipboard as HTML code");
\echo }
\echo function copyrichhtml(){
\echo let elem = document.getElementById("paramtune")
\echo let paramtune = elem.cloneNode(true);
\echo paramtune.style="font-weight:initial;line-height:1.5em;background-color:#FAFFEA;border: 2px solid blue; border-radius: 5px; padding: 1em;box-shadow: 0px 20px 30px -10px grey";
\echo const clipboardItem = new ClipboardItem({ "text/plain": new Blob([paramtune.innerText], { type: "text/plain" }),
\echo "text/html": new Blob([paramtune.outerHTML],{ type: "text/html" })});
\echo navigator.clipboard.write([clipboardItem]);
\echo flash("Parameter recommendations are copied to clipboard as HTML Rich object");
\echo }
\echo function bytesToSize(bytes,divisor = 1000) {
\echo const sizes = ["B","KB","MB","GB","TB"];
\echo if (bytes == 0) return "0B";
\echo const i = parseInt(Math.floor(Math.log(bytes) / Math.log(divisor)), 10);
\echo if (i === 0) return bytes + sizes[i];
\echo return (bytes / (divisor ** i)).toFixed(1) + sizes[i];
\echo }
\echo function formatNumber(n) {
\echo const ranges = [ {divisor: 1e9, suffix: " Billion"}, {divisor: 1e6, suffix: " Million"}, {divisor: 1e3, suffix: " Thousand"} ];
\echo const range = ranges.find(r => n >= r.divisor);
\echo return range ? (n/range.divisor).toFixed(1) + range.suffix : n.toString();
\echo };
\echo function setheadtip(th,tips){
\echo for (i in tips) th.cells[i].title = tips[i];
\echo }
\echo function updateJson(jsonString, key, value) {
\echo const jsonObject = JSON.parse(jsonString);
\echo jsonObject[key] = value;
\echo return JSON.stringify(jsonObject);
\echo }
\echo function DurationtoSeconds(duration){
\echo let days=0,dayIdx
\echo dayIdx=duration.indexOf("day")
\echo if(dayIdx>0){
\echo days=parseInt(duration.substring(0,dayIdx))
\echo if(duration[dayIdx+4] == "s") dayIdx=dayIdx+5
\echo else dayIdx=dayIdx+4
\echo duration=duration.substring(dayIdx)
\echo }
\echo const [hours, minutes, seconds] = duration.split(":");
\echo return days * 24 * 60 * 60 +(hours) * 60 * 60 + Number(minutes) * 60 + Number(seconds);
\echo };
\echo var paramDespatch = {
\echo archive_mode : function(rowref){
\echo val=rowref.cells[1];
\echo if(obj.primary == true && val.innerHTML == "off"){ val.classList.add("warn"); val.title="Primary server without WAL archiving configured. No PITR possible"}
\echo },
\echo archive_command : function(rowref) {
\echo val=rowref.cells[1];
\echo if (obj.params !== null && obj.params.f1 !== null && obj.params.f1.length > 0) { val.classList.add("warn"); val.title="archive_command won't be in-effect, because archive_library : " + obj.arclib + " is specified" }
\echo else if (val.innerText.includes("barman")){ strfind += "<li><b>Use of Barman is detected</b>. Please be aware of the possible risks, if <code>rsync</code> is used as backup_method. <a href='"+ docurl +"barman.html'> Details<a></li>"; }
\echo else if (val.innerText.includes("cp ") || val.innerText.includes("rsync ")) { val.classList.add("warn"); strfind +="<li><b>Use of 'cp'/'rsync' command is detected in archive_commnad</b>, which is highly discouraged. Please use reliable backup tools for WAL archiving.<a href='"+ docurl +"cp.html'> Details<a></li></li>" }
\echo else if (val.innerText.length < 5) {val.classList.add("warn"); val.title="A valid archive_command is expected for WAL archiving, unless archive library is used" ; }
\echo },
\echo autovacuum : function(rowref) {
\echo val=rowref.cells[1];
\echo if(val.innerText != "on") {
\echo val.classList.add("warn"); val.title="Autovacuum must be on" ;
\echo let param = params.find(p => p.param === "autovacuum");
\echo param["warn"] = "<b>Autovacuum is disabled</b>. This prevents essential maintenance, and can cause bloat and performance issues. Please enable autovacuum. <a href='"+ docurl +"params/autovacuum.html'>Details</a>";
\echo param["suggest"] = "on";
\echo }
\echo },
\echo autovacuum_max_workers : function(rowref) {
\echo val=rowref.cells[1];
\echo if(val.innerText > 3) { val.classList.add("warn"); val.title="High number of workers causes each workers to run slower because of the cost limit" ;
\echo let param = params.find(p => p.param === "autovacuum_max_workers");
\echo param["suggest"] = "3";
\echo }
\echo },
\echo autovacuum_vacuum_cost_limit: function(rowref){
\echo val=rowref.cells[1];
\echo if(val.innerText > 800 || val.innerText == -1 ) { val.classList.add("warn"); val.title="Better to specify this with a value less than 800" }
\echo },
\echo autovacuum_freeze_max_age: function(rowref){
\echo val=rowref.cells[1];
\echo autovacuum_freeze_max_age = Number(val.innerText);
\echo if (autovacuum_freeze_max_age > 800000000) val.classList.add("warn");
\echo },
\echo bgwriter_lru_maxpages: function(rowref){
\echo let param = params.find(p => p.param === "bgwriter_lru_maxpages");
\echo if (typeof param["suggest"] != "undefined"){
\echo val = val=rowref.cells[1];
\echo val.classList.add("warn");
\echo val.title="bgwriter_lru_maxpages is too low. Increase this to :" + param["suggest"];
\echo }
\echo },
\echo checkpoint_completion_target: function(rowref){
\echo val=rowref.cells[1];
\echo if(val.innerText < 0.7) { val.classList.add("warn"); val.title="Low checkpoint_completion_target can cause I/O spikes during checkpoints" ;
\echo let param = params.find(p => p.param === "checkpoint_completion_target");
\echo param["suggest"] = "0.9";
\echo }
\echo },
\echo checkpoint_timeout: function(rowref){
\echo val=rowref.cells[1];
\echo if(val.innerText < 1200) { val.classList.add("warn"); val.title="Too small gap between checkpoints"
\echo let param = params.find(p => p.param === "checkpoint_timeout");
\echo param["suggest"] = "1800";
\echo }
\echo },
\echo client_connection_check_interval :function(rowref){
\echo val=rowref.cells[1];
\echo let param = params.find(p => p.param === "client_connection_check_interval");
\echo param["suggest"] = "'" + trnsTimout/60 + "s'";
\echo if(val.innerText == 0) { val.classList.add("warn"); val.title="It is highly recommended to set this to a value between 10 and 60 seconds to detect dead connections and prevent connection pool exhaustion" ;}
\echo },
\echo data_directory: function(rowref){
\echo datadir=val.innerText;
\echo },
\echo deadlock_timeout: function(rowref){ val=rowref.cells[1]; val.classList.add("lime"); },
\echo default_toast_compression: function(rowref){
\echo val=rowref.cells[1];
\echo let param = params.find(p => p.param === "default_toast_compression");
\echo if (val.innerText != "lz4") { val.classList.add("warn"); val.title="Better to use lz4 for TOAST compression";
\echo param["suggest"] = "lz4";
\echo }
\echo },
\echo effective_cache_size: function(rowref){ val=rowref.cells[1]; val.classList.add("lime"); val.title=bytesToSize(val.innerText*8192,1024); },
\echo huge_pages: function(rowref){
\echo val=rowref.cells[1];
\echo if (val.innerText != "on" ) {
\echo val.classList.add("warn");
\echo val.title="Please configure TLBHugePages and set huge_pages=on. This is essential for stability and reliability";
\echo let param = params.find(p => p.param === "huge_pages");
\echo param["suggest"] = "on";
\echo } else val.classList.add("lime");
\echo },
\echo huge_page_size: function(rowref){ val=rowref.cells[1]; val.classList.add("lime"); },
\echo huge_pages_status: function(rowref){ val=rowref.cells[1]; if (val.innerText == "off") { val.classList.add("warn"); val.title="Huge pages are not used"; }
\echo else val.classList.add("lime"); },
\echo hot_standby_feedback: function(rowref){ val=rowref.cells[1]; val.classList.add("lime"); },
\echo idle_session_timeout:function(rowref){
\echo val=rowref.cells[1];
\echo if (val.innerText > 0) { val.classList.add("warn"); val.title="It is dangerous to use idle_session_timeout. Avoid using this" }
\echo },
\echo idle_in_transaction_session_timeout: function(rowref){
\echo val=rowref.cells[1];
\echo if (val.innerText == 0){ val.classList.add("warn"); val.title="Highly suggestable to use atleast 5min to prevent application misbehaviour" }
\echo let param = params.find(p => p.param === "idle_in_transaction_session_timeout");
\echo param["suggest"] = "'5min'";
\echo },
\echo jit: function(rowref){ val=rowref.cells[1]; if (val.innerText=="on") {
\echo val.classList.add("warn");
\echo val.title="Avoid JIT globally (Disable), Use only at smaller scope"
\echo let param = params.find(p => p.param === "jit");
\echo param["suggest"] = "off";
\echo }},
\echo log_hostname: function(rowref){
\echo val=rowref.cells[1];
\echo if (val.innerText == "on"){
\echo let param = params.find(p => p.param === "log_hostname");
\echo val.classList.add("warn"); val.title="Unless you are particular about logging the hostnames, better to turn this off to avoid DNS lookup overhead";
\echo param["suggest"] = "off";
\echo }},
\echo log_temp_files: function(rowref){
\echo val = val=rowref.cells[1];
\echo let param = params.find(p => p.param === "log_temp_files");
\echo if (typeof param["suggest"] != "undefined"){
\echo val.classList.add("warn");
\echo val.title="Heavy temporary file generation is detected. Consider setting log_temp_files=" + param["suggest"] ;
\echo } else if ((param["val"] > -1)){
\echo val.classList.add("lime");
\echo val.title="log_temp_files is already set. Analyze PostgreSQL log for problematic SQLs. Adjust parameter value if required";
\echo }
\echo },
\echo log_truncate_on_rotation: function(rowref){
\echo val=rowref.cells[1];
\echo let param = params.find(p => p.param === "log_truncate_on_rotation");
\echo if (val.innerText == "off") param["suggest"] = "on";
\echo },
\echo log_lock_waits: function(rowref){
\echo val=rowref.cells[1]; let param = params.find(p => p.param === "log_lock_waits");
\echo if(val.innerText == "off") param["suggest"] = "on";
\echo },
\echo lock_timeout: function(rowref){
\echo val=rowref.cells[1]; let param = params.find(p => p.param === "lock_timeout");
\echo if(val.innerText == "0") param["suggest"] = "'1min'";
\echo },
\echo maintenance_work_mem: function(rowref){ val=rowref.cells[1]; val.classList.add("lime"); val.title=bytesToSize(val.innerText*1024,1024); },
\echo max_connections: function(rowref){
\echo val=rowref.cells[1];
\echo val.title="Avoid value exceeding 10x of the CPUs"
\echo let conns = params.find(p => p.param === "max_connections");