-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdata_walk.py
More file actions
813 lines (745 loc) · 62.9 KB
/
data_walk.py
File metadata and controls
813 lines (745 loc) · 62.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 11 08:43:07 2024
@author: ACRANMER
# code for Maynard Climate Resilience Data Walk
"""
import numpy as np
import pandas as pd
import streamlit as st
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import json
st.set_page_config(#layout='wide',
page_title='Maynard Climate Resilience Data Walk'
)
st.title('Maynard Climate Resilience Data Walk')
st.subheader('Welcome! This page offers data on the Town of Maynard for you \
to explore. Click on each of the titles below to expand the \
sections. Each section poses questions for you to consider. Please \
share your thoughts with us by filling out the survey (link tbd).')
text_lang = st.radio('Choose your preferred language:',
['English','Georgian','Spanish','Portugese'],
key='text_lang')
@st.cache_resource
def load_data():
gdf = json.load(open('maynard_shapes.json'))
#print(gdf)
census_api_url = 'https://api.census.gov/data/2021/acs/acs5'
geography = '&for=county%20subdivision:39625&in=state:25%20county:017'
bgs1 = '&for=block%20group:*&in=state:25%20county:017&in=tract:364101'
bgs2 = '&for=block%20group:*&in=state:25%20county:017&in=tract:364102'
# dictionary for income columns
income_col_dict = {'[["B19001_001E"':'Total Households (Inc)',
'B19001_001M':'Total Households (Inc) Margin of Error',
'B19001_002E':'Less than $10,000',
'B19001_002M':'Less than $10,000 Margin of Error',
'B19001_003E':'$10,000 to $14,999',
'B19001_003M':'$10,000 to $14,999 Margin of Error',
'B19001_004E':'$15,000 to $19,999',
'B19001_004M':'$15,000 to $19,999 Margin of Error',
'B19001_005E':'$20,000 to $24,999',
'B19001_005M':'$20,000 to $24,999 Margin of Error',
'B19001_006E':'$25,000 to $29,999',
'B19001_006M':'$25,000 to $29,999 Margin of Error',
'B19001_007E':'$30,000 to $34,999',
'B19001_007M':'$30,000 to $34,999 Margin of Error',
'B19001_008E':'$35,000 to $39,999',
'B19001_008M':'$35,000 to $39,999 Margin of Error',
'B19001_009E':'$40,000 to $44,999',
'B19001_009M':'$40,000 to $44,999 Margin of Error',
'B19001_010E':'$45,000 to $49,999',
'B19001_010M':'$45,000 to $49,999 Margin of Error',
'B19001_011E':'$50,000 to $59,999',
'B19001_011M':'$50,000 to $59,999 Margin of Error',
'B19001_012E':'$60,000 to $74,999',
'B19001_012M':'$60,000 to $74,999 Margin of Error',
'B19001_013E':'$75,000 to $99,999',
'B19001_013M':'$75,000 to $99,999 Margin of Error',
'B19001_014E':'$100,000 to $124,999',
'B19001_014M':'$100,000 to $124,999 Margin of Error',
'B19001_015E':'$125,000 to $149,999',
'B19001_015M':'$125,000 to $149,999 Margin of Error',
'B19001_016E':'$150,000 to $199,999',
'B19001_016M':'$150,000 to $199,999 Margin of Error',
'B19001_017E':'$200,000 or more',
'B19001_017M':'$200,000 or more Margin of Error'
}
age_col_dict = {'S0101_C01_001E':'Total Population',
'S0101_C01_001M':'Total Population Margin of Error',
'S0101_C01_002E':'Under 5 years',
'S0101_C01_002M':'Under 5 years Margin of Error',
'S0101_C01_003E':'5 to 9 years',
'S0101_C01_003M':'5 to 9 years Margin of Error',
'S0101_C01_004E':'10 to 14 years',
'S0101_C01_004M':'10 to 14 years Margin of Error',
'S0101_C01_005E':'15 to 19 years',
'S0101_C01_005M':'15 to 19 years Margin of Error',
'S0101_C01_006E':'20 to 24 years',
'S0101_C01_006M':'20 to 24 years Margin of Error',
'S0101_C01_007E':'25 to 29 years',
'S0101_C01_007M':'25 to 29 years Margin of Error',
'S0101_C01_008E':'30 to 34 years',
'S0101_C01_008M':'30 to 34 years Margin of Error',
'S0101_C01_009E':'35 to 39 years',
'S0101_C01_009M':'35 to 39 years Margin of Error',
'S0101_C01_010E':'40 to 44 years',
'S0101_C01_010M':'40 to 44 years Margin of Error',
'S0101_C01_011E':'45 to 49 years',
'S0101_C01_011M':'45 to 49 years Margin of Error',
'S0101_C01_012E':'50 to 54 years',
'S0101_C01_012M':'50 to 54 years Margin of Error',
'S0101_C01_013E':'55 to 59 years',
'S0101_C01_013M':'55 to 59 years Margin of Error',
'S0101_C01_014E':'60 to 64 years',
'S0101_C01_014M':'60 to 64 years Margin of Error',
'S0101_C01_015E':'65 to 69 years',
'S0101_C01_015M':'65 to 69 years Margin of Error',
'S0101_C01_016E':'70 to 74 years',
'S0101_C01_016M':'70 to 74 years Margin of Error',
'S0101_C01_017E':'75 to 79 years',
'S0101_C01_017M':'75 to 79 years Margin of Error',
'S0101_C01_018E':'80 to 84 years',
'S0101_C01_018M':'80 to 84 years Margin of Error',
'S0101_C01_019E':'85 years and over',
'S0101_C01_019M':'85 years and over Margin of Error',
'S0101_C01_020E':'5 to 14 years',
'S0101_C01_020M':'5 to 14 years Margin of Error',
'S0101_C01_021E':'15 to 17 years',
'S0101_C01_021M':'15 to 17 years Margin of Error',
'S0101_C01_022E':'Under 18 years',
'S0101_C01_022M':'Under 18 years Margin of Error',
'S0101_C01_023E':'18 to 24 years',
'S0101_C01_023M':'18 to 24 years Margin of Error',
'S0101_C01_024E':'15 to 44 years',
'S0101_C01_024M':'15 to 44 years Margin of Error',
'S0101_C01_025E':'16 years and over',
'S0101_C01_025M':'16 years and over Margin of Error',
'S0101_C01_026E':'18 years and over',
'S0101_C01_026M':'18 years and over Margin of Error',
'S0101_C01_027E':'21 years and over',
'S0101_C01_027M':'21 years and over Margin of Error',
'S0101_C01_028E':'60 years and over',
'S0101_C01_028M':'60 years and over Margin of Error',
'S0101_C01_029E':'62 years and over',
'S0101_C01_029M':'62 years and over Margin of Error',
'S0101_C01_030E':'65 years and over',
'S0101_C01_030M':'65 years and over Margin of Error',
'S0101_C01_031E':'75 years and over',
'S0101_C01_031M':'75 years and over Margin of Error',
'S0101_C01_032E':'Median age (years)',
'S0101_C01_032M':'Median age (years) Margin of Error',
'S0101_C01_033E':'Sex ratio (males per 100 females)',
'S0101_C01_033M':'Sex ratio (males per 100 females) Margin of Error',
'S0101_C01_034E':'Age dependency ratio',
'S0101_C01_034M':'Age dependency ratio Margin of Error',
'S0101_C01_035E':'Old-age dependency ratio',
'S0101_C01_035M':'Old-age dependency ratio Margin of Error',
'S0101_C01_036E':'Child dependency ratio',
'S0101_C01_036M':'Child dependency ratio Margin of Error'
}
# import data
incomebg_url1 = census_api_url+'?get=group(B19001)'+bgs1
incomebg_url2 = census_api_url+'?get=group(B19001)'+bgs2
incomebg_data = pd.concat([pd.read_csv(incomebg_url1),pd.read_csv(incomebg_url2)])
incomebg_data = incomebg_data.drop(columns=incomebg_data.columns[incomebg_data.columns.str.contains('A')])
incomebg_data = incomebg_data.drop(columns=['state','county'])
incomebg_data = incomebg_data.rename(columns=income_col_dict)
incomebg_data['Total Households (Inc)'] = incomebg_data['Total Households (Inc)'].str[2:-1]
income_url = census_api_url+'?get=group(B19001)'+geography
income_data = pd.read_csv(income_url)
income_data = income_data.drop(columns=income_data.columns[income_data.columns.str.contains('A')])
income_data = income_data.drop(columns=['GEO_ID','state','county','county subdivision]','Unnamed: 73'])
income_data = income_data.rename(columns=income_col_dict)
income_data['Total Households (Inc)'] = income_data['Total Households (Inc)'].str[2:-1]
income_data['Less than $25,000'] = income_data.loc[0,['Less than $10,000','$10,000 to $14,999','$15,000 to $19,999','$20,000 to $24,999']].sum()
income_data['$25,000 to $49,999'] = income_data.loc[0,['$25,000 to $29,999','$30,000 to $34,999','$35,000 to $39,999','$40,000 to $44,999','$45,000 to $49,999']].sum()
income_data['$50,000 to $74,999'] = income_data.loc[0,['$50,000 to $59,999','$60,000 to $74,999']].sum()
income_cols = ['Less than $25,000','$25,000 to $49,999','$50,000 to $74,999','$75,000 to $99,999',
'$100,000 to $124,999','$125,000 to $149,999','$150,000 to $199,999','$200,000 or more']
income_fig = make_subplots(1,1,
#subplot_titles=('Number of Households by Income Bracket')
)
income_fig.add_trace(go.Bar(x=income_data[income_cols].columns,
y=income_data.loc[0,income_cols],
#error_y=income_data.loc[0,income_data.columns[income_data.columns.str.contains('Margin of Error')]],
name='Households by Income Bracket'
))
income_fig.update_layout(title=dict(text='Number of Households by Income Bracket',font=dict(size=28)),
yaxis=dict(title=dict(text='# Households',font=dict(size=18)),
tickfont=dict(size=14))
)
income_fig.update_xaxes(tickfont=dict(size=14))
income_fig.update_layout(hovermode='x',showlegend=False)
med_income_url = census_api_url+'?get=group(B19013)'+geography
med_inc_data = pd.read_csv(med_income_url)
med_inc_data = med_inc_data.drop(columns=med_inc_data.columns[med_inc_data.columns.str.contains('A')])
med_inc_data = med_inc_data.rename(columns={'[["B19013_001E"':'Median Household Income',
'B19013_001M':'MHI Margin of Error'})
med_inc_data['Median Household Income'] = med_inc_data['Median Household Income'].str[2:-1].astype('int')
med_inc_data = med_inc_data.drop(columns=['GEO_ID','state','county','county subdivision]','Unnamed: 9'])
med_incomebg1_url = census_api_url+'?get=group(B19013)'+bgs1
med_incomebg2_url = census_api_url+'?get=group(B19013)'+bgs2
med_incbg_data = pd.concat([pd.read_csv(med_incomebg1_url),
pd.read_csv(med_incomebg2_url)])
med_incbg_data = med_incbg_data.drop(columns=med_incbg_data.columns[med_incbg_data.columns.str.contains('A')])
med_incbg_data = med_incbg_data.rename(columns={'[["B19013_001E"':'Median Household Income',
'B19013_001M':'MHI Margin of Error'})
med_incbg_data['Median Household Income'] = med_incbg_data['Median Household Income'].str[2:-1].astype('int')
med_incbg_data = med_incbg_data.drop(columns=['state','county'])
med_incbg_data = med_incbg_data.rename(columns={'GEO_ID':'GEOID'})
med_incbg_data['GEOID'] = med_incbg_data['GEOID'].str[9:]
med_income_hh_url = census_api_url+'?get=group(B19019)'+geography
med_inc_hh_data = pd.read_csv(med_income_hh_url)
med_inc_hh_data = med_inc_hh_data.drop(columns=med_inc_hh_data.columns[med_inc_hh_data.columns.str.contains('A')])
med_inc_hh_data = med_inc_hh_data.rename(columns={'[["B19019_001E"':'Median Household Income by Household Size',
'B19019_001M':'MHI by Household Size Margin of Error',
'B19019_002E':'1-person households',
'B19019_002M':'1-person households Margin of Error',
'B19019_003E':'2-person households',
'B19019_003M':'2-person households Margin of Error',
'B19019_004E':'3-person households',
'B19019_004M':'3-person households Margin of Error',
'B19019_005E':'4-person households',
'B19019_005M':'4-person households Margin of Error',
'B19019_006E':'5-person households',
'B19019_006M':'5-person households Margin of Error',
'B19019_007E':'6-person households',
'B19019_007M':'6-person households Margin of Error',
'B19019_008E':'7-or-more-person households',
'B19019_008M':'7-or-more-person households'
})
med_inc_hh_data['Median Household Income by Household Size'] = med_inc_hh_data['Median Household Income by Household Size'].str[2:-1].astype('int')
med_inc_hh_data = med_inc_hh_data.drop(columns=['GEO_ID','state','county','county subdivision]','Unnamed: 37'])
print(med_incbg_data['GEOID'])
print(gdf['features'][0]['properties'])
income_map = px.choropleth(med_incbg_data,geojson=gdf,locations='GEOID',
featureidkey='properties.GEOID',
color='Median Household Income',
projection='mercator',
labels={'Median Household Income':'Income ($)'}
)
income_map.update_geos(fitbounds='locations',visible=False)
income_map.update_layout(height=300,width=600,
margin={"r":0,"t":0,"l":0,"b":0}
)
#S0101
age_url = census_api_url+'/subject?get=group(S0101)'+geography
age_data = pd.read_csv(age_url)
age_data = age_data.drop(columns=age_data.columns[age_data.columns.str.contains('A')])
age_data = age_data.drop(columns=['[["GEO_ID"','state','county','county subdivision]','Unnamed: 917'])
age_data = age_data.rename(columns=age_col_dict)
age_data = age_data.drop(columns=age_data.columns[age_data.columns.str.contains('S0101')])
age_data['25 to 34 years'] = age_data.loc[0,['25 to 29 years','30 to 34 years']].sum()
age_data['35 to 44 years'] = age_data.loc[0,['35 to 39 years','40 to 44 years']].sum()
age_data['45 to 54 years'] = age_data.loc[0,['45 to 49 years','50 to 54 years']].sum()
age_data['55 to 64 years'] = age_data.loc[0,['55 to 59 years','60 to 64 years']].sum()
age_cols = ['Under 5 years','5 to 14 years','15 to 17 years','18 to 24 years',
'25 to 34 years','35 to 44 years','45 to 54 years','55 to 64 years',
'65 years and over']
age_fig = make_subplots(1,1)
age_fig.add_trace(go.Bar(x=age_data[age_cols].columns,
y=age_data.loc[0,age_cols],
#error_y=age_data.loc[0,age_data.columns[age_data.columns.str.contains('Margin of Error')]],
name='People by Age Bracket'
))
age_fig.update_layout(title=dict(text='Number of People by Age Bracket',font=dict(size=28)),
yaxis=dict(title=dict(text='# People',font=dict(size=18)),
tickfont=dict(size=14))
)
age_fig.update_xaxes(tickfont=dict(size=14))
age_fig.update_layout(hovermode='x',showlegend=False)
race_url = census_api_url+'?get=group(B03002)'+geography
race_data = pd.read_csv(race_url)
race_data = race_data.drop(columns=race_data.columns[race_data.columns.str.contains('A')])
race_data = race_data.drop(columns=['GEO_ID','state','county','county subdivision]','Unnamed: 89'])
race_data = race_data.rename(columns={'[["B03002_001E"':'Total Population (R&E)',
'B03002_001M':'Total Population (R&E) Margin of Error',
'B03002_003E':'White',
'B03002_003M':'White Margin of Error',
'B03002_004E':'Black or African American',
'B03002_004M':'Black or African American Margin of Error',
'B03002_005E':'American Indian and Alaska Native',
'B03002_005M':'American Indian and Alaska Native Margin of Error',
'B03002_006E':'Asian',
'B03002_006M':'Asian Margin of Error',
'B03002_007E':'Native Hawaiian and Other Pacific Islander',
'B03002_007M':'Native Hawaiian and Other Pacific Islander Margin of Error',
'B03002_008E':'Some other race',
'B03002_008M':'Some other race Margin of Error',
'B03002_009E':'Two or more races',
'B03002_009M':'Two or more races Margin of Error',
'B03002_012E':'Hispanic or Latino',
'B03002_012M':'Hispanic or Latino Margin of Error'})
race_data = race_data.drop(columns=race_data.columns[race_data.columns.str.contains('B03002')])
race_cols = ['White','Hispanic or Latino','Asian',
'Black or African American',
'Two or more races','Some other race']
race_fig = make_subplots(1,1)
race_fig.add_trace(go.Bar(x=race_data[race_cols].columns,
y=race_data.loc[0,race_cols],
#error_y=race_data.loc[0,race_data.columns[race_data.columns.str.contains('Margin of Error')]],
name='People by Age Bracket'
))
race_fig.update_layout(title=dict(text='Number of People by Race and Ethnicity',font=dict(size=28)),
yaxis=dict(title=dict(text='# People',font=dict(size=18)),
tickfont=dict(size=14))
)
race_fig.update_xaxes(tickfont=dict(size=14))
race_fig.update_layout(hovermode='x',showlegend=False)
#Or S1501
education_url = census_api_url+'?get=group(B15003)'+geography
education_data = pd.read_csv(education_url)
education_data = education_data.drop(columns=education_data.columns[education_data.columns.str.contains('A')])
education_data = education_data.drop(columns=['GEO_ID','state','county','county subdivision]','Unnamed: 105'])
education_data = education_data.rename(columns={'[["B15003_001E"':'Total Population 25 Years and Over',
'B15003_001M':'Total Population 25 Years and Over Margin of Error',
'B15003_002E':'No schooling completed',
'B15003_002M':'No schooling completed Margin of Error',
'B15003_003E':'Nursery school',
'B15003_003M':'Nursery school Margin of Error',
'B15003_004E':'Kindergarten',
'B15003_004M':'Kindergarten Margin of Error',
'B15003_005E':'1st grade',
'B15003_005M':'1st grade Margin of Error',
'B15003_006E':'2nd grade',
'B15003_006M':'2nd grade Margin of Error',
'B15003_007E':'3rd grade',
'B15003_007M':'3rd grade Margin of Error',
'B15003_008E':'4th grade',
'B15003_008M':'4th grade Margin of Error',
'B15003_009E':'5th grade',
'B15003_009M':'5th grade Margin of Error',
'B15003_010E':'6th grade',
'B15003_010M':'6th grade Margin of Error',
'B15003_011E':'7th grade',
'B15003_011M':'7th grade Margin of Error',
'B15003_012E':'8th grade',
'B15003_012M':'8th grade Margin of Error',
'B15003_013E':'9th grade',
'B15003_013M':'9th grade Margin of Error',
'B15003_014E':'10th grade',
'B15003_014M':'10th grade Margin of Error',
'B15003_015E':'11th grade',
'B15003_015M':'11th grade Margin of Error',
'B15003_016E':'12th grade, no diploma',
'B15003_016M':'12th grade, no diploma Margin of Error',
'B15003_017E':'Regular high school diploma',
'B15003_017M':'Regular high school diploma Margin of Error',
'B15003_018E':'GED or alternative credential',
'B15003_018M':'GED or alternative credential Margin of Error',
'B15003_019E':'Some college, less than 1 year',
'B15003_019M':'Some college, less than 1 year Margin of Error',
'B15003_020E':'Some college, 1 or more years, no degree',
'B15003_020M':'Some college, 1 or more years, no degree Margin of Error',
'B15003_021E':"Associate's degree",
'B15003_021M':"Associate's degree Margin of Error",
'B15003_022E':"Bachelor's degree",
'B15003_022M':"Bachelor's degree Margin of Error",
'B15003_023E':"Master's degree",
'B15003_023M':"Master's degree Margin of Error",
'B15003_024E':'Professional school degree',
'B15003_024M':'Professional school degree Margin of Error',
'B15003_025E':'Doctorate degree',
'B15003_025M':'Doctorate degree Margin of Error'
})
education_data['Total Population 25 Years and Over'] = education_data['Total Population 25 Years and Over'].str[2:-1].astype('int')
education_data['No high school diploma'] = education_data.loc[0,['No schooling completed','Nursery school','Kindergarten','1st grade',
'2nd grade','3rd grade','4th grade','5th grade','6th grade','7th grade',
'8th grade','9th grade','10th grade','11th grade','12th grade, no diploma']].sum()
education_data['High school diploma'] = education_data.loc[0,['Regular high school diploma','GED or alternative credential']].sum()
education_data['Some college, no degree'] = education_data.loc[0,['Some college, less than 1 year','Some college, 1 or more years, no degree']].sum()
education_data['Graduate or Professional degree'] = education_data.loc[0,["Master's degree",'Professional school degree','Doctorate degree']].sum()
edu_cols = ['No high school diploma','High school diploma','Some college, no degree',
"Associate's degree","Bachelor's degree",'Graduate or Professional degree']
edu_fig = make_subplots(1,1)
edu_fig.add_trace(go.Bar(x=education_data[edu_cols].columns,
y=education_data.loc[0,edu_cols],
#error_y=education_data.loc[0,education_data.columns[education_data.columns.str.contains('Margin of Error')]],
name='People by Educational Attainment'
))
edu_fig.update_layout(title=dict(text='Number of People by Educational Attainment',font=dict(size=28)),
yaxis=dict(title=dict(text='# People',font=dict(size=18)),
tickfont=dict(size=14))
)
edu_fig.update_xaxes(tickfont=dict(size=14))
edu_fig.update_layout(hovermode='x',showlegend=False)
# S1810 includes race along with disability, summary tables work differently from detailed tables
disability_url = census_api_url+'/subject?get=group(S1810)'+geography
disability_data = pd.read_csv(disability_url)
disability_data = disability_data.drop(columns=disability_data.columns[disability_data.columns.str.contains('A')])
disability_data = disability_data.drop(columns=['[["GEO_ID"','state','county','county subdivision]','Unnamed: 833'])
disability_data = disability_data.rename(columns={'S1810_C02_001E':'Total civilian noninstitutionalized population with a disability',
'S1810_C02_001M':'Total civilian noninstitutionalized population with a disability Margin of Error',
'S1810_C02_019E':'With a hearing difficulty',
'S1810_C02_019M':'With a hearing difficulty Margin of Error',
'S1810_C02_029E':'With a vision difficulty',
'S1810_C02_029M':'With a vision difficulty Margin of Error',
'S1810_C02_039E':'With a cognitive difficulty',
'S1810_C02_039M':'With a cognitive difficulty Margin of Error',
'S1810_C02_047E':'With an ambulatory difficulty',
'S1810_C02_047M':'With an ambulatory difficulty Margin of Error',
'S1810_C02_055E':'With a self-care difficulty',
'S1810_C02_055M':'With a self-care difficulty Margin of Error',
'S1810_C02_063E':'With an independent living difficulty',
'S1810_C02_063M':'With an independent living difficulty Margin of Error'
})
disability_data = disability_data.drop(columns=disability_data.columns[disability_data.columns.str.contains('S1810')])
disability_cols = ['With a hearing difficulty','With a vision difficulty',
'With a cognitive difficulty','With an ambulatory difficulty',
'With a self-care difficulty','With an independent living difficulty']
disab = disability_data[disability_cols].rename(columns={'With a hearing difficulty':'Hearing difficulty',
'With a vision difficulty':'Vision difficulty',
'With a cognitive difficulty':'Cognitive difficulty',
'With an ambulatory difficulty':'Ambulatory difficulty',
'With a self-care difficulty':'Self-care difficulty',
'With an independent living difficulty':'Independent living difficulty'})
disability_fig = make_subplots(2,1,specs=[[{'type':'domain'}],
[{'type':'scatter'}]],
subplot_titles=('Disability by Percent','People by Disability Type'))
disability_fig.add_trace(go.Pie(labels=disab.columns,values=disab.loc[0,:],
textinfo='label+percent'),row=1,col=1)
disability_fig.add_trace(go.Bar(x=disab.columns,y=disab.loc[0,:]
),
row=2,col=1)
disability_fig.update_layout(title=dict(text='Types of Disabilities Experienced in Maynard',font=dict(size=28)),
yaxis=dict(title=dict(text='# People',font=dict(size=18)),
tickfont=dict(size=14))
)
disability_fig.update_xaxes(tickfont=dict(size=14))
disability_fig.update_layout(hovermode='x',showlegend=False,
height=800)
#B16004
language_url = census_api_url+'?get=group(B16004)'+geography
language_data = pd.read_csv(language_url)
language_data = language_data.drop(columns=language_data.columns[language_data.columns.str.contains('A')])
language_data = language_data.drop(columns=['GEO_ID','state','county','county subdivision]','Unnamed: 273'])
language_data = language_data.rename(columns={'[["B16004_001E"':'Total Population 5 Years and Over',
'B16004_001M':'Total Population 5 Years and Over Margin of Error',
'B16004_004E':'5 to 17 years: Speak Spanish',
'B16004_004M':'5 to 17 years: Speak Spanish Margin of Error',
'B16004_007E':'5 to 17 years: Speak Spanish, Speak English not well',
'B16004_007M':'5 to 17 years: Speak Spanish, Speak English not well Margin of Error',
'B16004_008E':'5 to 17 years: Speak Spanish, Speak English not at all',
'B16004_008M':'5 to 17 years: Speak Spanish, Speak English not at all Margin of Error',
'B16004_009E':'5 to 17 years: Speak other Indo-European languages',
'B16004_009M':'5 to 17 years: Speak other Indo-European languages Margin of Error',
'B16004_012E':'5 to 17 years: Speak other Indo-European languages, Speak English not well',
'B16004_012M':'5 to 17 years: Speak other Indo-European languages, Speak English not well Margin of Error',
'B16004_013E':'5 to 17 years: Speak other Indo-European languages, Speak English not at all',
'B16004_013M':'5 to 17 years: Speak other Indo-European languages, Speak English not at all Margin of Error',
'B16004_014E':'5 to 17 years: Speak Asian and Pacific Island Languages',
'B16004_014M':'5 to 17 years: Speak Asian and Pacific Island Languages Margin of Error',
'B16004_017E':'5 to 17 years: Speak Asian and Pacific Island Languages, Speak English not well',
'B16004_017M':'5 to 17 years: Speak Asian and Pacific Island Languages, Speak English not well Margin of Error',
'B16004_018E':'5 to 17 years: Speak Asian and Pacific Island Languages, Speak English not at all',
'B16004_018M':'5 to 17 years: Speak Asian and Pacific Island Languages, Speak English not at all Margin of Error',
'B16004_019E':'5 to 17 years: Speak other languages',
'B16004_019M':'5 to 17 years: Speak other languages Margin of Error',
'B16004_022E':'5 to 17 years: Speak other languages, Speak English not well',
'B16004_022M':'5 to 17 years: Speak other languages, Speak English not well Margin of Error',
'B16004_023E':'5 to 17 years: Speak other languages, Speak English not at all',
'B16004_023M':'5 to 17 years: Speak other languages, Speak English not at all Margin of Error',
'B16004_026E':'18 to 64 years: Speak Spanish',
'B16004_026M':'18 to 64 years: Speak Spanish Margin of Error',
'B16004_029E':'18 to 64 years: Speak Spanish, Speak English not well',
'B16004_029M':'18 to 64 years: Speak Spanish, Speak English not well Margin of Error',
'B16004_030E':'18 to 64 years: Speak Spanish, Speak English not at all',
'B16004_030M':'18 to 64 years: Speak Spanish, Speak English not at all Margin of Error',
'B16004_031E':'18 to 64 years: Speak other Indo-European languages',
'B16004_031M':'18 to 64 years: Speak other Indo-European languages Margin of Error',
'B16004_034E':'18 to 64 years: Speak other Indo-European languages, Speak English not well',
'B16004_034M':'18 to 64 years: Speak other Indo-European languages, Speak English not well Margin of Error',
'B16004_035E':'18 to 64 years: Speak other Indo-European languages, Speak English not at all',
'B16004_035M':'18 to 64 years: Speak other Indo-European languages, Speak English not at all Margin of Error',
'B16004_036E':'18 to 64 years: Speak Asian and Pacific Island Languages',
'B16004_036M':'18 to 64 years: Speak Asian and Pacific Island Languages Margin of Error',
'B16004_039E':'18 to 64 years: Speak Asian and Pacific Island Languages, Speak English not well',
'B16004_039M':'18 to 64 years: Speak Asian and Pacific Island Languages, Speak English not well Margin of Error',
'B16004_040E':'18 to 64 years: Speak Asian and Pacific Island Languages, Speak English not at all',
'B16004_040M':'18 to 64 years: Speak Asian and Pacific Island Languages, Speak English not at all Margin of Error',
'B16004_041E':'18 to 64 years: Speak other languages',
'B16004_041M':'18 to 64 years: Speak other languages Margin of Error',
'B16004_044E':'18 to 64 years: Speak other languages, Speak English not well',
'B16004_044M':'18 to 64 years: Speak other languages, Speak English not well Margin of Error',
'B16004_045E':'18 to 64 years: Speak other languages, Speak English not at all',
'B16004_045M':'18 to 64 years: Speak other languages, Speak English not at all Margin of Error',
'B16004_048E':'65 years and over: Speak Spanish',
'B16004_048M':'65 years and over: Speak Spanish Margin of Error',
'B16004_051E':'65 years and over: Speak Spanish, Speak English not well',
'B16004_051M':'65 years and over: Speak Spanish, Speak English not well Margin of Error',
'B16004_052E':'65 years and over: Speak Spanish, Speak English not at all',
'B16004_052M':'65 years and over: Speak Spanish, Speak English not at all Margin of Error',
'B16004_053E':'65 years and over: Speak other Indo-European languages',
'B16004_053M':'65 years and over: Speak other Indo-European languages Margin of Error',
'B16004_056E':'65 years and over: Speak other Indo-European languages, Speak English not well',
'B16004_056M':'65 years and over: Speak other Indo-European languages, Speak English not well Margin of Error',
'B16004_057E':'65 years and over: Speak other Indo-European languages, Speak English not at all',
'B16004_057M':'65 years and over: Speak other Indo-European languages, Speak English not at all Margin of Error',
'B16004_058E':'65 years and over: Speak Asian and Pacific Island Languages',
'B16004_058M':'65 years and over: Speak Asian and Pacific Island Languages Margin of Error',
'B16004_061E':'65 years and over: Speak Asian and Pacific Island Languages, Speak English not well',
'B16004_061M':'65 years and over: Speak Asian and Pacific Island Languages, Speak English not well Margin of Error',
'B16004_062E':'65 years and over: Speak Asian and Pacific Island Languages, Speak English not at all',
'B16004_062M':'65 years and over: Speak Asian and Pacific Island Languages, Speak English not at all Margin of Error',
'B16004_063E':'65 years and over: Speak other languages',
'B16004_063M':'65 years and over: Speak other languages Margin of Error',
'B16004_066E':'65 years and over: Speak other languages, Speak English not well',
'B16004_066M':'65 years and over: Speak other languages, Speak English not well Margin of Error',
'B16004_067E':'65 years and over: Speak other languages, Speak English not at all',
'B16004_067M':'65 years and over: Speak other languages, Speak English not at all Margin of Error'
})
language_data = language_data.drop(columns=language_data.columns[language_data.columns.str.contains('B16004')])
language_data['Total Population 5 Years and Over'] = language_data['Total Population 5 Years and Over'].str[2:-1]
language_data['Limited English Proficiency'] = language_data.loc[0,['5 to 17 years: Speak Spanish, Speak English not well',
'5 to 17 years: Speak Spanish, Speak English not at all',
'5 to 17 years: Speak other Indo-European languages, Speak English not well',
'5 to 17 years: Speak other Indo-European languages, Speak English not at all',
'5 to 17 years: Speak Asian and Pacific Island Languages, Speak English not well',
'5 to 17 years: Speak Asian and Pacific Island Languages, Speak English not at all',
'5 to 17 years: Speak other languages, Speak English not well',
'5 to 17 years: Speak other languages, Speak English not at all',
'18 to 64 years: Speak Spanish, Speak English not well',
'18 to 64 years: Speak Spanish, Speak English not at all',
'18 to 64 years: Speak other Indo-European languages, Speak English not well',
'18 to 64 years: Speak other Indo-European languages, Speak English not at all',
'18 to 64 years: Speak Asian and Pacific Island Languages, Speak English not well',
'18 to 64 years: Speak Asian and Pacific Island Languages, Speak English not at all',
'18 to 64 years: Speak other languages, Speak English not well',
'18 to 64 years: Speak other languages, Speak English not at all',
'65 years and over: Speak Spanish, Speak English not well',
'65 years and over: Speak Spanish, Speak English not at all',
'65 years and over: Speak other Indo-European languages, Speak English not well',
'65 years and over: Speak other Indo-European languages, Speak English not at all',
'65 years and over: Speak Asian and Pacific Island Languages, Speak English not well',
'65 years and over: Speak Asian and Pacific Island Languages, Speak English not at all',
'65 years and over: Speak other languages, Speak English not well',
'65 years and over: Speak other languages, Speak English not at all',
]].sum()
language_cols = ['Limited English Proficiency']
language_fig = make_subplots(1,2)
language_fig.add_trace(go.Bar(x=language_data[language_cols].columns,
y=language_data.loc[0,language_cols],
#error_y=disability_data.loc[0,disability_data.columns[disability_data.columns.str.contains('Margin of Error')]],
name='People with Limited English Proficiency'
))
language_fig.update_layout(title=dict(text='Number of People with Limited English Proficiency',font=dict(size=28)),
yaxis=dict(title=dict(text='# People',font=dict(size=18)),
tickfont=dict(size=14))
)
language_fig.update_xaxes(tickfont=dict(size=14))
language_fig.update_layout(hovermode='x',showlegend=False)
#B11001 Household type
housing_url = census_api_url+'?get=group(B11001)'+geography
housing_data = pd.read_csv(housing_url)
housing_data = housing_data.drop(columns=housing_data.columns[housing_data.columns.str.contains('A')])
housing_data = housing_data.drop(columns=['GEO_ID','state','county','county subdivision]','Unnamed: 41'])
housing_data = housing_data.rename(columns={'[["B11001_001E"':'Total Households',
'B11001_001M':'Total Households Margin of Error',
'B11001_002E':'Family households',
'B11001_002M':'Family households Margin of Error',
'B11001_003E':'Married-couple family',
'B11001_003M':'Married-couple family Margin of Error',
'B11001_004E':'Other family',
'B11001_004M':'Other family Margin of Error',
'B11001_005E':'Male householder, no spouse present',
'B11001_005M':'Male householder, no spouse present Margin of Error',
'B11001_006E':'Female householder, no spouse present',
'B11001_006M':'Female householder, no spouse present Margin of Error',
'B11001_007E':'Nonfamily households',
'B11001_007M':'Nonfamily households Margin of Error',
'B11001_008E':'Householder living alone',
'B11001_008M':'Householder living alone Margin of Error',
'B11001_009E':'Householder not living alone',
'B11001_009M':'Householder not living alone Margin of Error'
})
housing_data['Total Households'] = housing_data['Total Households'].str[2:-1].astype('int')
housing_data['Single parent'] = housing_data.loc[0,['Male householder, no spouse present','Female householder, no spouse present']].sum()
housing_cols = ['Married-couple family','Single parent',
'Householder living alone','Householder not living alone']
# add own vs rent and cost of rent
renting_url = census_api_url+'?get=group(B25003)'+geography
renting_data = pd.read_csv(renting_url)
renting_data = renting_data.drop(columns=renting_data.columns[renting_data.columns.str.contains('A')])
renting_data = renting_data.drop(columns=['GEO_ID','state','county','county subdivision]'])
renting_data = renting_data.rename(columns={'[["B25003_001E"':'Total Households (Tenure)',
'B25003_001M':'Total Households (Tenure) Margin of Error',
'B25003_002E':'Owner occupied',
'B25003_002M':'Owner occupied Margin of Error',
'B25003_003E':'Renter occupied',
'B25003_003M':'Renter occupied Margin of Error'})
renting_data['Total Households (Tenure)'] = renting_data['Total Households (Tenure)'].str[2:-1].astype('int')
housing_fig = make_subplots(2,1)
housing_fig.add_trace(go.Bar(x=housing_data[housing_cols].columns,
y=housing_data.loc[0,housing_cols],
name='Households by Type'
),
row=1,col=1)
housing_fig.update_layout(title=dict(text='Number of Households by Type',font=dict(size=28)),
yaxis=dict(title=dict(text='# Households',font=dict(size=18)),
tickfont=dict(size=14))
)
housing_fig.update_xaxes(tickfont=dict(size=14))
housing_fig.update_layout(hovermode='x',showlegend=False)
# energy data,
#B08301 Means of transportation to work
#B08303 Travel time to work
#B08014 Vehicles available
transportation_url = census_api_url+'?get=group(B08301)'+geography
transportation_data = pd.read_csv(transportation_url)
transportation_data = transportation_data.drop(columns=transportation_data.columns[transportation_data.columns.str.contains('A')])
transportation_data = transportation_data.drop(columns=['GEO_ID','state','county','county subdivision]','Unnamed: 89'])
transportation_data = transportation_data.rename(columns={'[["B08301_001E"':'Total workers 16 years and over',
'B08301_001M':'Total workers 16 years and over Margin of Error',
'B08301_003E':'Drove alone',
'B08301_003M':'Drove alone Margin of Error',
'B08301_004E':'Carpooled',
'B08301_004M':'Carpooled Margin of Error',
'B08301_010E':'Public transportation',
'B08301_010M':'Public transportation Margin of Error',
'B08301_016E':'Taxicab',
'B08301_016M':'Taxicab Margin of Error',
'B08301_017E':'Motorcycle',
'B08301_017M':'Motorcycle Margin of Error',
'B08301_018E':'Bicycle',
'B08301_018M':'Bicycle Margin of Error',
'B08301_019E':'Walked',
'B08301_019M':'Walked Margin of Error',
'B08301_020E':'Other means',
'B08301_020M':'Other means Margin of Error',
'B08301_021E':'Worked from home',
'B08301_021M':'Worked from home Margin of Error'})
transportation_data = transportation_data.drop(columns=transportation_data.columns[transportation_data.columns.str.contains('B08301')])
transportation_data['Other'] = transportation_data.loc[0,['Taxicab','Motorcycle','Bicycle','Other means']]
trans_means_cols = ['Drove alone','Carpooled','Public transportation','Walked','Other','Worked from home']
transportation2_url = census_api_url+'?get=group(B08303)'+geography
transportation2_data = pd.read_csv(transportation2_url)
transportation2_data = transportation2_data.drop(columns=transportation2_data.columns[transportation2_data.columns.str.contains('A')])
transportation2_data = transportation2_data.drop(columns=['GEO_ID','state','county','county subdivision]','Unnamed: 57'])
transportation2_data = transportation2_data.rename(columns={'[["B08303_001E"':'Total workers 16 years and over (Time)',
'B08303_001M':'Total workers 16 years and over (Time) Margin of Error',
'B08303_002E':'Less than 5 minutes',
'B08303_002M':'Less than 5 minutes Margin of Error',
'B08303_003E':'5 to 9 minutes',
'B08303_003M':'5 to 9 minutes Margin of Error',
'B08303_004E':'10 to 14 minutes',
'B08303_004M':'10 to 14 minutes Margin of Error',
'B08303_005E':'15 to 19 minutes',
'B08303_005M':'15 to 19 minutes Margin of Error',
'B08303_006E':'20 to 24 minutes',
'B08303_006M':'20 to 24 minutes Margin of Error',
'B08303_007E':'25 to 29 minutes',
'B08303_007M':'25 to 29 minutes Margin of Error',
'B08303_008E':'30 to 34 minutes',
'B08303_008M':'30 to 34 minutes Margin of Error',
'B08303_009E':'35 to 39 minutes',
'B08303_009M':'35 to 39 minutes Margin of Error',
'B08303_010E':'40 to 44 minutes',
'B08303_010M':'40 to 44 minutes Margin of Error',
'B08303_011E':'45 to 59 minutes',
'B08303_011M':'45 to 59 minutes Margin of Error',
'B08303_012E':'60 to 89 minutes',
'B08303_012M':'60 to 89 minutes Margin of Error',
'B08303_013E':'90 or more minutes',
'B08303_013M':'90 or more minutes Margin of Error'})
transportation2_data['Less than 15 minutes'] = transportation2_data.loc[0,['Less than 5 minutes','5 to 9 minutes','10 to 14 minutes']].sum()
transportation2_data['15 to 29 minutes'] = transportation2_data.loc[0,['15 to 19 minutes','20 to 24 minutes','25 to 29 minutes']].sum()
transportation2_data['30 to 44 minutes'] = transportation2_data.loc[0,['30 to 34 minutes','35 to 39 minutes','40 to 44 minutes']].sum()
transportation2_data['45 minutes or more'] = transportation2_data.loc[0,['45 to 59 minutes','60 to 89 minutes','90 or more minutes']].sum()
trans_time_cols = ['Less than 15 minutes','15 to 29 minutes','30 to 44 minutes','45 minutes or more']
# add # of cars available per household
transportation3_url = census_api_url+'?get=group(B08014)'+geography
transportation3_data = pd.read_csv(transportation3_url)
transportation3_data = transportation3_data.drop(columns=transportation3_data.columns[transportation3_data.columns.str.contains('A')])
transportation3_data = transportation3_data.drop(columns=['GEO_ID','state','county','county subdivision]'])
transportation3_data = transportation3_data.rename(columns={'[["B08014_001E"':'Total workers (Vehicles)',
'B08014_001M':'Total workers (Vehicles) Margin of Error',
'B08014_002E':'No vehicles available',
'B08014_002M':'No vehicles available Margin of Error',
'B08014_003E':'1 vehicle available',
'B08014_003M':'1 vehicle available Margin of Error',
'B08014_004E':'2 vehicles available',
'B08014_004M':'2 vehicles available Margin of Error',
'B08014_005E':'3 vehicles available',
'B08014_005M':'3 vehicles available Margin of Error',
'B08014_006E':'4 vehicles available',
'B08014_006M':'4 vehicles available Margin of Error',
'B08014_007E':'5 or more vehicles available',
'B08014_007M':'5 or more vehicles available Margin of Error'})
transportation3_data = transportation3_data.drop(columns=transportation3_data.columns[transportation3_data.columns.str.contains('B08014')])
transportation_fig = make_subplots(3,1)
transportation_fig.add_trace(go.Bar(x=transportation_data[trans_means_cols].columns,
y=transportation_data.loc[0,trans_means_cols],
name='Commutes by Type'
),row=1,col=1)
transportation_fig.add_trace(go.Bar(x=transportation2_data[trans_time_cols].columns,
y=transportation2_data.loc[0,trans_time_cols],
name='Commutes by Time'
),row=2,col=1)
transportation_fig.update_layout(title=dict(text='Number of People by Commuting Characteristics',font=dict(size=28)),
yaxis=dict(title=dict(text='# People',font=dict(size=18)),
tickfont=dict(size=14)),
yaxis2=dict(title=dict(text='# People',font=dict(size=18)),
tickfont=dict(size=14))
)
transportation_fig.update_xaxes(tickfont=dict(size=14))
transportation_fig.update_layout(hovermode='x',showlegend=False,
height=1200)
data = pd.concat([income_data,med_inc_data,race_data,age_data,
education_data,disability_data,language_data,
housing_data,renting_data,transportation_data],axis=1)
return data, income_fig, income_map, race_fig, age_fig, edu_fig, disability_fig, language_fig, housing_fig, transportation_fig
data, income_fig, income_map, race_fig, age_fig, education_fig, disability_fig, language_fig, housing_fig, transportation_fig = load_data()
with st.expander('Maynard, Environmental Justice and Income'):
st.write('Maynard has a population of '+str('{:,}'.format(data.loc[0,'Total Population']))+' \
people living in '+str('{:,}'.format(data.loc[0,'Total Households']))+' households.')
st.write('Maynard is composed of two census tracts and seven block groups. \
One block group has been designated by the state as an environmental \
justice community based on its median household income. Households \
with lower incomes are more vulnerable to the impacts of climate change.')
st.image('images/Maynard EJ cropped.png',
caption='State-defined environmental justice census blocks in Maynard.',
width=300)
st.plotly_chart(income_map)
#st.table(income_data[income_cols])
col1,col2 = st.columns(2)
with col1:
st.metric('Median Household Income','$'+str('{:,}'.format(data.loc[0,'Median Household Income'])))
with col2:
st.metric('Median Age',data.loc[0,'Median age (years)'])
st.plotly_chart(income_fig)
st.write('Note: 2021 data from the U.S. Census Bureau is used for consistency with the state GEAR tool.\
The U.S. Census Bureau uses two "tracts" and seven "block groups" \
to represent the Town of Maynard. These are developed to create \
areas across the country with similar amounts of people for \
comparison and analysis.')
with st.expander('Race and Ethnicity in Maynard'):
st.write('Racial and ethnicity data will go here.')
st.plotly_chart(race_fig)
with st.expander('Education in Maynard'):
st.image('images/Maynard DESE.png',caption='Maynard Public School data from MA Department of Elementary and Secondary Education (DESE).')
st.plotly_chart(education_fig)
with st.expander('Disabilities in Maynard'):
st.plotly_chart(disability_fig)
with st.expander('Languages in Maynard'):
st.write('What languages do we speak in Maynard?')
percent_limited_english = data.loc[0,'Limited English Proficiency']/data.loc[0,'Total Population']*100
st.metric('Percent of residents with limited English proficiency',str(percent_limited_english.round(2))+'%')
#st.plotly_chart(language_fig)
with st.expander('Housing in Maynard'):
st.write('Household types, housing cost and insecurity')
percent_renter = (data.loc[0,'Renter occupied']/data.loc[0,'Total Households (Tenure)']*100).round(2)
st.metric('Percent Households Renting',str(percent_renter)+'%')
st.plotly_chart(housing_fig)
with st.expander('Food insecurity in Maynard'):
st.write('Food insecurity data goes here')
with st.expander('Energy insecurity in Maynard'):
st.image('images/Maynard Mass Save cropped.jpg',
caption='Maynard block groups ranked by participation in the \
Mass Save program for energy efficiency. Darker colors signal \
lower rates of participation.',
width=400)
#st.write('Areas in Maynard with more rental properties have lower rates of participation.')
with st.expander('Transportation in Maynard'):
st.plotly_chart(transportation_fig)