From 32b2690f9e6f8c2170733b153902b18f080ce2ad Mon Sep 17 00:00:00 2001 From: wcurrangroome Date: Fri, 16 Jan 2026 13:57:02 -0500 Subject: [PATCH] significant documentation updates and new vignettes --- .claude/settings.local.json | 10 + DESCRIPTION | 11 +- NAMESPACE | 1 + R/get_business_patterns.R | 34 +- R/get_current_fire_perimeters.R | 24 +- R/get_emergency_managerment_performance.R | 25 +- R/get_fema_disaster_declarations.R | 25 +- R/get_government_finances.R | 23 + R/get_hazard_mitigation_assistance.R | 36 +- R/get_ihp_registrations.R | 33 +- R/get_lodes.R | 40 +- R/get_nfip_claims.R | 85 +-- R/get_nfip_policies.R | 19 +- R/get_preliminary_damage_assessments.R | 41 +- R/get_public_assistance.R | 16 +- R/get_sba_loans.R | 23 +- R/get_sheldus.R | 25 +- R/get_structures.R | 20 +- R/get_wildfire_burn_zones.R | 97 ++++ R/test.R | 0 _pkgdown.yml | 1 + man/get_business_patterns.Rd | 34 +- man/get_current_fire_perimeters.Rd | 22 +- man/get_emergency_management_performance.Rd | 26 +- man/get_fema_disaster_declarations.Rd | 22 +- man/get_government_finances.Rd | 24 +- man/get_hazard_mitigation_assistance.Rd | 37 +- man/get_ihp_registrations.Rd | 34 +- man/get_lodes.Rd | 42 +- man/get_nfip_claims.Rd | 9 +- man/get_nfip_policies.Rd | 6 +- man/get_preliminary_damage_assessments.Rd | 36 +- man/get_public_assistance.Rd | 15 +- man/get_sba_loans.Rd | 23 +- man/get_sheldus.Rd | 25 +- man/get_structures.Rd | 18 +- man/get_wildfire_burn_zones.Rd | 58 +++ renv.lock | 485 ++++++++++++++---- tests/testthat.R | 4 + tests/testthat/test-get_wildfire_burn_zones.R | 137 +++++ vignettes/get_sheldus.Rmd | 222 ++++++++ vignettes/get_structures.Rmd | 229 +++++++++ vignettes/get_wildfire_burn_zones.Rmd | 215 ++++++++ 43 files changed, 2055 insertions(+), 257 deletions(-) create mode 100644 .claude/settings.local.json create mode 100644 R/get_wildfire_burn_zones.R create mode 100644 R/test.R create mode 100644 man/get_wildfire_burn_zones.Rd create mode 100644 tests/testthat.R create mode 100644 tests/testthat/test-get_wildfire_burn_zones.R create mode 100644 vignettes/get_sheldus.Rmd create mode 100644 vignettes/get_structures.Rmd create mode 100644 vignettes/get_wildfire_burn_zones.Rmd diff --git a/.claude/settings.local.json b/.claude/settings.local.json new file mode 100644 index 0000000..348e6d9 --- /dev/null +++ b/.claude/settings.local.json @@ -0,0 +1,10 @@ +{ + "permissions": { + "allow": [ + "Bash(dpkg:*)", + "Bash(pkg-config:*)", + "Bash(apt list:*)", + "Bash(Rscript:*)" + ] + } +} diff --git a/DESCRIPTION b/DESCRIPTION index f381b96..2c758bd 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -15,14 +15,15 @@ Authors@R: c( Description: This packages combines a set of utilities for acquiring and processing climate and climate-adjacent datasets under a consistent API. It takes opinionated stances on how to manipulate raw data in an effort to produce standard workflows - that enable project teams to devote more time to substantive analysis and inference-making. + that enable project teams to devote more time to substantive analysis. License: MIT + file LICENSE Encoding: UTF-8 Roxygen: list(markdown = TRUE) -RoxygenNote: 7.3.2 -Imports: +RoxygenNote: 7.3.3 +Imports: arrow, censusapi, + sfarrow, dplyr, esri2sf (>= 0.1.1), ellmer, @@ -60,9 +61,11 @@ Remotes: UI-Research/urbnindicators, UrbanInstitute/urbnthemes URL: https://ui-research.github.io/climateapi/ -Suggests: +Suggests: knitr, qualtRics, rmarkdown, + testthat (>= 3.0.0), tidyverse +Config/testthat/edition: 3 VignetteBuilder: knitr diff --git a/NAMESPACE b/NAMESPACE index 0e38021..00e50c7 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -22,6 +22,7 @@ export(get_sheldus) export(get_spatial_extent_census) export(get_structures) export(get_system_username) +export(get_wildfire_burn_zones) export(inflation_adjust) export(interpolate_demographics) export(polygons_to_linestring) diff --git a/R/get_business_patterns.R b/R/get_business_patterns.R index e8a01d8..5458c72 100644 --- a/R/get_business_patterns.R +++ b/R/get_business_patterns.R @@ -1,9 +1,35 @@ #' Obtain County Business Patterns (CBP) Estimates per County #' -#' @param year The vintage of CBP data desired. Data are available from 1986, though this function likely only supports more recent years (it it tested on 2022-vintage data only). Default is 2022. -#' @param naics_code_digits One of c(2, 3). Default is 2. NAICS codes range in specificity; 2-digit codes describe the highest groupings of industries, while six-digit codes are exceedingly detailed. There are 20 2-digit NAICS codes and 196 3-digit codes. -#' @param naics_codes A vector of NAICS codes to query. If NULL, the function will query all available codes with the specified number of digits. If not NULL, this argument overrides the `naics_code_digits` argument. -#' @return A tibble with data on county-level employees, employers, and aggregate annual payrolls by industry and employer size +#' @description Retrieves County Business Patterns data from the Census Bureau, +#' providing counts of establishments, employees, and payroll by industry and +#' employer size at the county level. +#' +#' @param year The vintage of CBP data desired. Data are available from 1986, though +#' this function likely only supports more recent years (it is tested on 2022-vintage +#' data only). Default is 2022. +#' @param naics_code_digits One of c(2, 3). Default is 2. NAICS codes range in specificity; +#' 2-digit codes describe the highest groupings of industries, while six-digit codes +#' are exceedingly detailed. There are 20 2-digit NAICS codes and 196 3-digit codes. +#' @param naics_codes A vector of NAICS codes to query. If NULL, the function will query +#' all available codes with the specified number of digits. If not NULL, this argument +#' overrides the `naics_code_digits` argument. +#' +#' @details Data are from the U.S. Census Bureau's County Business Patterns program. +#' See \url{https://www.census.gov/programs-surveys/cbp.html} and +#' \url{https://www.census.gov/naics/} for NAICS code definitions. +#' +#' @return A tibble with data on county-level employees, employers, and aggregate +#' annual payrolls by industry and employer size. Columns include: +#' \describe{ +#' \item{state}{Two-digit state FIPS code.} +#' \item{county}{Three-digit county FIPS code.} +#' \item{employees}{Number of employees mid-March of the reference year.} +#' \item{employers}{Number of establishments.} +#' \item{annual_payroll}{Annual payroll in thousands of dollars.} +#' \item{industry}{NAICS industry description (lowercase, underscored).} +#' \item{employee_size_range_label}{Human-readable employer size category.} +#' \item{employee_size_range_code}{Census code for employer size range.} +#' } #' @export #' #' @examples diff --git a/R/get_current_fire_perimeters.R b/R/get_current_fire_perimeters.R index 1d81d97..dcc0905 100644 --- a/R/get_current_fire_perimeters.R +++ b/R/get_current_fire_perimeters.R @@ -1,14 +1,30 @@ -# Author: Will Curran-Groome - #' @importFrom magrittr %>% -#' @title Acquire wildfire perimeters +#' @title Acquire current wildfire perimeters +#' +#' @description Retrieves current wildfire perimeter data from the NIFC (National +#' Interagency Fire Center) via the Wildland Fire Interagency Geospatial Services +#' (WFIGS) API. +#' #' @param geography Included only for API consistency; this must be NULL. #' @param file_path Included only for API consistency; this must be NULL. #' @param bbox Optionally, an sf::st_bbox() object, or an object that can be converted to such. #' @param api Included only for API consistency; this must be TRUE. #' -#' @returns A library(sf) enabled dataframe comprising perimeters of current wildfires. +#' @details Data are from the NIFC WFIGS service. See +#' \url{https://data-nifc.opendata.arcgis.com/datasets/nifc::wfigs-interagency-fire-perimeters/about}. +#' +#' @returns An sf dataframe comprising perimeters of current wildfires. Columns include: +#' \describe{ +#' \item{unique_id}{Unique identifier for each observation (generated).} +#' \item{incident_name}{Name of the fire incident (title case).} +#' \item{incident_size_acres}{Size of the fire in acres.} +#' \item{incident_short_description}{Brief description of the incident.} +#' \item{percent_contained}{Percent of fire contained (0-100).} +#' \item{identified_date}{Date/time the fire was discovered.} +#' \item{updated_date}{Date/time the record was last updated.} +#' \item{geometry}{Polygon geometry of the fire perimeter.} +#' } #' @export #' #' @examples diff --git a/R/get_emergency_managerment_performance.R b/R/get_emergency_managerment_performance.R index b176c82..9022c0b 100644 --- a/R/get_emergency_managerment_performance.R +++ b/R/get_emergency_managerment_performance.R @@ -1,9 +1,30 @@ -#' Get EMPG data +#' Get Emergency Management Performance Grant (EMPG) data +#' +#' @description Retrieves Emergency Management Performance Grant (EMPG) award data +#' from FEMA, which supports state and local emergency management agencies. #' #' @param file_path Path to the downloaded dataset on Box. -#' @param api Logical indicating whether to use the OpenFEMA API to retrieve the data. Default is TRUE. +#' @param api Logical indicating whether to use the OpenFEMA API to retrieve the data. +#' Default is TRUE. +#' +#' @details Data are from FEMA's OpenFEMA API. See +#' \url{https://www.fema.gov/openfema-data-page/emergency-management-performance-grants-v2}. #' #' @return A data frame containing emergency management performance grant (EMPG) data. +#' Columns include: +#' \describe{ +#' \item{id}{Unique identifier for the grant record.} +#' \item{state_name}{Full state name.} +#' \item{state_code}{Two-digit state FIPS code.} +#' \item{state_abbreviation}{Two-letter state abbreviation.} +#' \item{year_project_start}{Year the project started.} +#' \item{project_start_date}{Date the project started.} +#' \item{project_end_date}{Date the project ended.} +#' \item{grant_amount}{Total grant amount in dollars.} +#' \item{federal_share}{Federal portion of the grant in dollars.} +#' \item{non_federal_share}{Non-federal cost share in dollars.} +#' \item{program}{EMPG program type.} +#' } #' @export get_emergency_management_performance = function( diff --git a/R/get_fema_disaster_declarations.R b/R/get_fema_disaster_declarations.R index b7b3584..6e8137a 100644 --- a/R/get_fema_disaster_declarations.R +++ b/R/get_fema_disaster_declarations.R @@ -1,9 +1,28 @@ -# Author: Kameron Lloyd - #' @title Get major disaster declarations by county +#' +#' @description Retrieves FEMA Major Disaster Declarations at the county level, +#' aggregated by year and month. Tribal declarations are stored separately as +#' an attribute. +#' #' @param file_path The path (on Box) to the file containing the raw data. #' @param api If TRUE (default), access data from the API. Else, read locally from `file_path`. -#' @returns A dataframe comprising Major Disaster Declarations by month by year by county. Tribal declarations are stored as an attribute of the primary dataframe called `tribal_declarations`. +#' +#' @details Data are from FEMA's OpenFEMA API. See +#' \url{https://www.fema.gov/openfema-data-page/disaster-declarations-summaries-v2}. +#' Statewide declarations are expanded to all counties in the state. +#' +#' @returns A dataframe comprising Major Disaster Declarations by month by year by county. +#' Tribal declarations are stored as an attribute (`tribal_declarations`). Columns include: +#' \describe{ +#' \item{unique_id}{Unique identifier for each observation.} +#' \item{GEOID}{Five-digit county FIPS code.} +#' \item{year_declared}{Year the disaster was declared.} +#' \item{month_declared}{Month the disaster was declared (1-12).} +#' \item{declaration_title}{Title(s) of the disaster declaration(s).} +#' \item{incidents_all}{Total count of disaster declarations in the county-month.} +#' \item{incidents_natural_hazard}{Count of natural hazard declarations.} +#' \item{incidents_*}{Additional columns for other incident types, each of which reflects the count of the given incident type.} +#' } #' @export #' @examples #' \dontrun{ diff --git a/R/get_government_finances.R b/R/get_government_finances.R index e9d4f55..de9ca11 100644 --- a/R/get_government_finances.R +++ b/R/get_government_finances.R @@ -1,8 +1,31 @@ #' Get government unit-level expenses from the Census of Governments #' +#' @description Retrieves government unit-level finance data from the Census of Governments, +#' including expenses by category for state, county, city, township, special district, +#' and school district government units. +#' #' @param year A four-digit year. The default is 2022. #' +#' @details Data are from the U.S. Census Bureau's Annual Survey of State and Local +#' Government Finances and Census of Governments. See +#' \url{https://www.census.gov/programs-surveys/gov-finances.html}. +#' #' @return A dataframe containing government unit-level expenses for the specified year. +#' Columns include: +#' \describe{ +#' \item{unit_id}{Unique identifier for the government unit.} +#' \item{year_data}{Year of the financial data.} +#' \item{amount_thousands}{Total expenses in thousands of dollars.} +#' \item{government_type}{Type of government (State, County, City, Township, Special District, School District).} +#' \item{data_quality}{Proportion of expense items that were reported (vs. imputed).} +#' \item{state_code}{Two-digit state FIPS code.} +#' \item{county_code}{Three-digit county FIPS code.} +#' \item{unit_name}{Name of the government unit.} +#' \item{county_name}{Name of the county.} +#' \item{population}{Population served by the unit.} +#' \item{enrollment}{Student enrollment (for school districts).} +#' \item{amount_per_capita}{Expenses per capita or per enrolled student.} +#' } #' @export get_government_finances = function(year = 2022) { diff --git a/R/get_hazard_mitigation_assistance.R b/R/get_hazard_mitigation_assistance.R index 935cffb..29ad50b 100644 --- a/R/get_hazard_mitigation_assistance.R +++ b/R/get_hazard_mitigation_assistance.R @@ -26,11 +26,39 @@ clean_county = function(county) { #' Get Hazard Mitigation Assistance (HMA) Project Details #' -#' @param file_path_old_grant_system The file path to raw data for HMA applications from the older grant-reporting system. These data are typically available from: https://www.fema.gov/openfema-data-page/hazard-mitigation-assistance-projects-v4 -#' @param file_path_new_grant_system The file path to raw data for HMA applications from the newer (FEMA GO) grant-reporting system. These data are typically available from: https://www.fema.gov/openfema-data-page/hma-subapplications-v2 -#' @param state_abbreviations NULL by default, in which case data are returned for all 51 states. Provide a vector of two-character USPS state abbreviations to obtain data for a sub-selection of states. +#' @description Retrieves Hazard Mitigation Assistance project data from both the legacy +#' HMA Projects dataset and the newer FEMA GO subapplications dataset, harmonized +#' at the project-county level. #' -#' @return A dataframe of project-county HMA application data, aggregated across both old and new grant reporting systems. +#' @param file_path_old_grant_system The file path to raw data for HMA applications from +#' the older grant-reporting system. These data are typically available from: +#' \url{https://www.fema.gov/openfema-data-page/hazard-mitigation-assistance-projects-v4} +#' @param file_path_new_grant_system The file path to raw data for HMA applications from +#' the newer (FEMA GO) grant-reporting system. These data are typically available from: +#' \url{https://www.fema.gov/openfema-data-page/hma-subapplications-v2} +#' @param state_abbreviations NULL by default, in which case data are returned for all 51 +#' states. Provide a vector of two-character USPS state abbreviations to obtain data for +#' a sub-selection of states. +#' +#' @details Data are from FEMA's OpenFEMA API, combining two data sources: the legacy +#' Hazard Mitigation Assistance Projects (v4) and the newer HMA Subapplications (v2). +#' Multi-county projects are split across counties based on population proportions. +#' +#' @return A dataframe of project-county HMA application data. Only `project_cost_federal_split` +#' should be used for county-level aggregations. Columns include: +#' \describe{ +#' \item{data_source}{"hma-projects" (legacy) or "hma-subapplications" (FEMA GO).} +#' \item{project_id}{Unique project identifier.} +#' \item{disaster_number}{FEMA disaster number (if disaster-related).} +#' \item{project_program_area}{HMA program: HMGP, BRIC, FMA, or PDM.} +#' \item{project_fiscal_year}{Fiscal year of the project.} +#' \item{state_name}{Full state name.} +#' \item{county_geoid}{Five-digit county FIPS code.} +#' \item{county_population}{County population used for allocation.} +#' \item{project_status}{Current project status (e.g., "Closed", "Active").} +#' \item{project_cost_federal}{Total federal cost at project level.} +#' \item{project_cost_federal_split}{Federal cost allocated to this county.} +#' } #' @export #' #' @examples diff --git a/R/get_ihp_registrations.R b/R/get_ihp_registrations.R index a33114f..37ec2aa 100644 --- a/R/get_ihp_registrations.R +++ b/R/get_ihp_registrations.R @@ -2,12 +2,41 @@ #' @title Get Individuals and Households Program (IHP) registrations #' -#' @param state_fips A character vector of two-letter state abbreviations. If NULL (default), return data for all 51 states. Otherwise return data for the specified states. +#' @description Retrieves FEMA Individual and Households Program (IHP) registration data, +#' which captures applications for disaster assistance from individuals and households. +#' +#' @param state_fips A character vector of two-letter state abbreviations. If NULL (default), +#' return data for all 51 states. Otherwise return data for the specified states. #' @param file_name The name (not the full path) of the Box file containing the raw data. #' @param api If TRUE, query the API. If FALSE (default), read from disk. #' @param outpath The path to save the parquet-formatted datafile. Applicable only when `api = FALSE`. #' -#' @returns A dataframe comprising IHP registrations +#' @details Data are from FEMA's OpenFEMA API. See +#' \url{https://www.fema.gov/openfema-data-page/individuals-and-households-program-valid-registrations-v2}. +#' +#' @returns A dataframe comprising IHP registrations. Note that records are duplicated +#' due to a many-to-many join with a ZCTA-to-county crosswalk; use `allocation_factor_zcta_to_county` +#' to properly aggregate. Columns include: +#' \describe{ +#' \item{unique_id}{Unique identifier for the original registration.} +#' \item{allocation_factor_zcta_to_county}{Weight for attributing registration to county.} +#' \item{geoid_county}{Five-digit county FIPS code.} +#' \item{zcta_code}{Five-digit ZIP Code Tabulation Area.} +#' \item{geoid_tract}{11-digit census tract FIPS code.} +#' \item{geoid_block_group}{12-digit census block group FIPS code.} +#' \item{disaster_number}{FEMA disaster number.} +#' \item{amount_individual_housing_program}{Total IHP assistance amount in dollars.} +#' \item{amount_housing_assistance}{Housing assistance amount in dollars.} +#' \item{amount_other_needs_assistance}{Other needs assistance amount in dollars.} +#' \item{amount_rental_assistance}{Rental assistance amount in dollars.} +#' \item{amount_repairs}{Repair assistance amount in dollars.} +#' \item{amount_replacement}{Replacement assistance amount in dollars.} +#' \item{amount_personal_property}{Personal property assistance amount in dollars.} +#' \item{amount_flood_insurance_premium_paid_by_fema}{FEMA-paid flood insurance premium.} +#' \item{state_name}{Full state name.} +#' \item{state_abbreviation}{Two-letter state abbreviation.} +#' \item{state_code}{Two-digit state FIPS code.} +#' } #' @export #' #' @examples diff --git a/R/get_lodes.R b/R/get_lodes.R index e398069..3a57166 100644 --- a/R/get_lodes.R +++ b/R/get_lodes.R @@ -91,16 +91,46 @@ rename_lodes_variables = function(.df) { } #' Get LEHD Origin-Destination Employment Statistics (LODES) data -#' Returned data are from LODES Version 8, which is enumerated in 2020-vintage geometries. #' -#' @param lodes_type One of c("rac", "wac", "od"). "rac" = Residence Area Characteristics, where jobs are associated with employees' residences. "wac" = Workplace Area Characteristics, where jobs are associated with employees' workplaces. "od" = Origin-Destination data, where jobs are associated with both workers' residences and their workplaces. -#' @param jobs_type One of c("all", "primary"). Default is "all", which includes multiple jobs for workers with multiple jobs. "primary" includes only the highest-paying job per worker. +#' @description Retrieves LODES employment data at various geographic levels. +#' Returned data are from LODES Version 8, which is enumerated in 2020-vintage geometries. +#' +#' @param lodes_type One of c("rac", "wac", "od"). "rac" = Residence Area Characteristics, +#' where jobs are associated with employees' residences. "wac" = Workplace Area Characteristics, +#' where jobs are associated with employees' workplaces. "od" = Origin-Destination data, +#' where jobs are associated with both workers' residences and their workplaces. +#' @param jobs_type One of c("all", "primary"). Default is "all", which includes multiple +#' jobs for workers with multiple jobs. "primary" includes only the highest-paying job per worker. #' @param states A vector of state abbreviations. #' @param years A vector of years. #' @param geography One of c("block", "block group", "tract", "county", "state"). Default is "tract". -#' @param state_part One of c("main", "aux"). Default is "main", which includes only workers who reside inside the state where they work. "aux" returns only workers who work in the specified state but live outside of that state. +#' @param state_part One of c("main", "aux"). Default is "main", which includes only workers +#' who reside inside the state where they work. "aux" returns only workers who work in the +#' specified state but live outside of that state. +#' +#' @details Data are from the Longitudinal Employer-Household Dynamics (LEHD) program. +#' See \url{https://lehd.ces.census.gov/data/} and the technical documentation at +#' \url{https://lehd.ces.census.gov/data/lodes/LODES8/LODESTechDoc8.0.pdf}. #' -#' @return A tibble with one record per geography per year per job type. Attributes include total jobs and jobs by worker earnings, industry, and demographics; the origin-destination results have more limited demographics compared to the "wac" and "rac" results. +#' @return A tibble with one record per geography per year per job type. Key columns include: +#' \describe{ +#' \item{year}{Year of the data.} +#' \item{state}{Two-letter state abbreviation.} +#' \item{job_type}{"all" or "federal" indicating the job category.} +#' \item{h_GEOID / w_GEOID}{Home (h) or work (w) GEOID depending on lodes_type.} +#' \item{jobs}{Total number of jobs.} +#' \item{jobs_workers_age_29_or_younger}{Jobs for workers age 29 or younger.} +#' \item{jobs_workers_age_30_to_54}{Jobs for workers age 30 to 54.} +#' \item{jobs_workers_age_55_or_older}{Jobs for workers age 55 or older.} +#' \item{jobs_earnings_1250_month_or_less}{Jobs with earnings $1250/month or less.} +#' \item{jobs_earnings_1251_month_to_3333_month}{Jobs with earnings $1251-$3333/month.} +#' \item{jobs_earnings_greater_than_3333_month}{Jobs with earnings >$3333/month.} +#' \item{jobs_industry_*}{Jobs by NAICS industry sector (20 sectors for WAC/RAC).} +#' \item{jobs_workers_race_*}{Jobs by worker race (WAC/RAC only).} +#' \item{jobs_workers_ethnicity_*}{Jobs by worker ethnicity (WAC/RAC only).} +#' \item{jobs_workers_educational_attainment_*}{Jobs by education level (WAC/RAC only).} +#' \item{jobs_workers_sex_*}{Jobs by worker sex (WAC/RAC only).} +#' } #' @export get_lodes = function( lodes_type, diff --git a/R/get_nfip_claims.R b/R/get_nfip_claims.R index ffc1bd1..e280682 100644 --- a/R/get_nfip_claims.R +++ b/R/get_nfip_claims.R @@ -1,10 +1,15 @@ #' @title Access county-level data on NFIP claims -#' @param county_geoids A character vector of five-digit county codes. NULL by default; must be non-NULL if `api = TRUE`. +#' +#' @description Retrieves National Flood Insurance Program (NFIP) claims data at +#' the county level, including damage amounts, payments, and building characteristics. +#' +#' @param county_geoids A character vector of five-digit county codes. NULL by default; +#' must be non-NULL if `api = TRUE`. #' @param file_name The name (not the full path) of the Box file containing the raw data. #' @param api If TRUE, query the API. FALSE by default. #' -#' @details -#' These data are from: https://www.fema.gov/openfema-data-page/fima-nfip-redacted-claims-v2. +#' @details Data are from FEMA's OpenFEMA API. See +#' \url{https://www.fema.gov/openfema-data-page/fima-nfip-redacted-claims-v2}. #' Per FEMA: This data set represents more than 2,000,000 NFIP claims transactions. It is #' derived from the NFIP system of record, staged in the NFIP reporting platform and #' redacted to protect policy holder personally identifiable information. The @@ -114,7 +119,8 @@ get_nfip_claims = function( df1b = df1a |> tidytable::mutate( ### taking county_code if it exists, if not extract from census_tract - county_geoid = tidytable::if_else(!is.na(county_code), county_code, stringr::str_sub(census_tract, 1, 5))) + county_geoid = tidytable::if_else( + !is.na(county_code), county_code, stringr::str_sub(census_tract, 1, 5))) if (!is.null(county_geoids)) { df1b = df1b |> @@ -138,46 +144,49 @@ get_nfip_claims = function( #state_abbreviation = state, ## this is unreliable--other fields appear to be more consistent county_geoid, county_name, - occupancy_type = dplyr::case_when(occupancy_type %in% c(1, 11) ~ "single family", - occupancy_type %in% c(2, 3, 12, 13, 16, 15) ~ "multi-family", - occupancy_type %in% c(14) ~ "mobile/manufactured home", - occupancy_type %in% c(4, 6, 17, 18, 19) ~ "non-residential"), + occupancy_type = dplyr::case_when( + occupancy_type %in% c(1, 11) ~ "single family", + occupancy_type %in% c(2, 3, 12, 13, 16, 15) ~ "multi-family", + occupancy_type %in% c(14) ~ "mobile/manufactured home", + occupancy_type %in% c(4, 6, 17, 18, 19) ~ "non-residential"), year_loss = year_of_loss, year_construction = lubridate::year(original_construction_date), count_units_insured = policy_count, ## number of insured units associated with the claim #cause_of_damage, #flood_event_name = flood_event, #flood_zone_firm_current = flood_zone_current, - deductible_building = dplyr::case_when(building_deductible_code == "0" ~ 500, - building_deductible_code == "1" ~ 1000, - building_deductible_code == "2" ~ 2000, - building_deductible_code == "3" ~ 3000, - building_deductible_code == "4" ~ 4000, - building_deductible_code == "5" ~ 5000, - building_deductible_code == "9" ~ 750, - building_deductible_code == "A" ~ 10000, - building_deductible_code == "B" ~ 15000, - building_deductible_code == "C" ~ 20000, - building_deductible_code == "D" ~ 25000, - building_deductible_code == "E" ~ 50000, - building_deductible_code == "F" ~ 1250, - building_deductible_code == "G" ~ 500, - building_deductible_code == "H" ~ 200), - deductible_contents = dplyr::case_when(contents_deductible_code == "0" ~ 500, - contents_deductible_code == "1" ~ 1000, - contents_deductible_code == "2" ~ 2000, - contents_deductible_code == "3" ~ 3000, - contents_deductible_code == "4" ~ 4000, - contents_deductible_code == "5" ~ 5000, - contents_deductible_code == "9" ~ 750, - contents_deductible_code == "A" ~ 10000, - contents_deductible_code == "B" ~ 15000, - contents_deductible_code == "C" ~ 20000, - contents_deductible_code == "D" ~ 25000, - contents_deductible_code == "E" ~ 50000, - contents_deductible_code == "F" ~ 1250, - contents_deductible_code == "G" ~ 500, - contents_deductible_code == "H" ~ 200), + deductible_building = dplyr::case_when( + building_deductible_code == "0" ~ 500, + building_deductible_code == "1" ~ 1000, + building_deductible_code == "2" ~ 2000, + building_deductible_code == "3" ~ 3000, + building_deductible_code == "4" ~ 4000, + building_deductible_code == "5" ~ 5000, + building_deductible_code == "9" ~ 750, + building_deductible_code == "A" ~ 10000, + building_deductible_code == "B" ~ 15000, + building_deductible_code == "C" ~ 20000, + building_deductible_code == "D" ~ 25000, + building_deductible_code == "E" ~ 50000, + building_deductible_code == "F" ~ 1250, + building_deductible_code == "G" ~ 500, + building_deductible_code == "H" ~ 200), + deductible_contents = dplyr::case_when( + contents_deductible_code == "0" ~ 500, + contents_deductible_code == "1" ~ 1000, + contents_deductible_code == "2" ~ 2000, + contents_deductible_code == "3" ~ 3000, + contents_deductible_code == "4" ~ 4000, + contents_deductible_code == "5" ~ 5000, + contents_deductible_code == "9" ~ 750, + contents_deductible_code == "A" ~ 10000, + contents_deductible_code == "B" ~ 15000, + contents_deductible_code == "C" ~ 20000, + contents_deductible_code == "D" ~ 25000, + contents_deductible_code == "E" ~ 50000, + contents_deductible_code == "F" ~ 1250, + contents_deductible_code == "G" ~ 500, + contents_deductible_code == "H" ~ 200), value_building = building_property_value, value_contents = contents_property_value, replacement_cost_building = building_replacement_cost, diff --git a/R/get_nfip_policies.R b/R/get_nfip_policies.R index 0770af4..aaf34ff 100644 --- a/R/get_nfip_policies.R +++ b/R/get_nfip_policies.R @@ -1,10 +1,15 @@ #' @title Access county-level data on NFIP policies +#' +#' @description Retrieves National Flood Insurance Program (NFIP) policy data at +#' the county level, including both current and historical policies. +#' #' @param state_abbreviation A 2 letter state abbreviation (e.g. TX). #' @param county_geoids A character vector of five-digit county codes. #' @param file_name The name (not the full path) of the Box file containing the raw data. #' @param api If TRUE, query the API. If FALSE (default), read from `file_name`. #' -#' @details +#' @details Data are from FEMA's OpenFEMA API. See +#' \url{https://www.fema.gov/openfema-data-page/fima-nfip-redacted-policies-v2}. #' #' The following dataset houses information on NFIP policies (both historic and current). #' In order to filter to current policies, the analyst will need to filter on the @@ -88,7 +93,8 @@ get_nfip_policies = function( tidytable::mutate(county_geoid = tidytable::if_else( !is.na(county_code), county_code, - stringr::str_sub(census_tract, 1, 5))) ### taking county_code if it exists, if not extract from census_tract + ### taking county_code if it exists, if not extract from census_tract + stringr::str_sub(census_tract, 1, 5))) if (!is.null(county_geoids)) { df1b = df1b |> @@ -119,10 +125,11 @@ get_nfip_policies = function( policy_premium_total_cost = total_insurance_premium_of_the_policy, policy_date_termination = policy_termination_date, policy_date_effective = policy_effective_date, - building_occupancy_type = dplyr::case_when(occupancy_type %in% c(1, 11) ~ "single family", - occupancy_type %in% c(2, 12, 3, 13, 16, 15) ~ "multi-family", - occupancy_type %in% c(14) ~ "mobile/manufactured home", - occupancy_type %in% c(4, 6, 17, 18, 19) ~ "non-residential"), + building_occupancy_type = dplyr::case_when( + occupancy_type %in% c(1, 11) ~ "single family", + occupancy_type %in% c(2, 12, 3, 13, 16, 15) ~ "multi-family", + occupancy_type %in% c(14) ~ "mobile/manufactured home", + occupancy_type %in% c(4, 6, 17, 18, 19) ~ "non-residential"), building_replacement_cost = building_replacement_cost) message(" diff --git a/R/get_preliminary_damage_assessments.R b/R/get_preliminary_damage_assessments.R index 4a12f51..a4c81b5 100644 --- a/R/get_preliminary_damage_assessments.R +++ b/R/get_preliminary_damage_assessments.R @@ -248,17 +248,40 @@ extract_pda_attributes = function(path) { #' Get Data from Preliminary Damage Assessments Submitted to FEMA for Disaster Declarations #' -#' These data reflect extracted attributes from PDF preliminary damage assessments -#' hosted on FEMA's website at: https://www.fema.gov/disaster/how-declared/preliminary-damage-assessments/reports. -#' Owing to the unstructured nature of the source documents, some fields may be incorrect -#' in the data returned by the function, though significant quality checks have been -#' implemented in an effort to produce a high-quality dataset. +#' @description Retrieves data extracted from PDF preliminary damage assessment (PDA) +#' reports submitted to FEMA for disaster declarations. #' -#' @param file_path The file path to the cached dataset, or if there is no cache, the path at which to cache the resulting data. -#' @param directory_path The path to the directory where PDA PDFs are stored. Use `scrape_pda_pdfs` to generate these files. -#' @param use_cache Boolean. Read the existing dataset stored at `file_path`? If FALSE, data will be generated anew. Else, if a file exists at `file_path`, this file will be returned. +#' @details Data are extracted from PDF reports hosted at +#' \url{https://www.fema.gov/disaster/how-declared/preliminary-damage-assessments/reports}. +#' Owing to the unstructured nature of the source documents, some fields may be incorrect +#' in the data returned by the function, though significant quality checks have been +#' implemented in an effort to produce a high-quality dataset. #' -#' @return A dataframe of preliminary damage assessment reports. +#' @param file_path The file path to the cached dataset, or if there is no cache, the path +#' at which to cache the resulting data. +#' @param directory_path The path to the directory where PDA PDFs are stored. Use +#' `scrape_pda_pdfs` to generate these files. +#' @param use_cache Boolean. Read the existing dataset stored at `file_path`? If FALSE, +#' data will be generated anew. Else, if a file exists at `file_path`, this file will be returned. +#' +#' @return A dataframe of preliminary damage assessment reports. Key columns include: +#' \describe{ +#' \item{disaster_number}{FEMA disaster number.} +#' \item{event_type}{Type of decision: "approved", "denial", "appeal_approved", or "appeal_denial".} +#' \item{event_title}{Title/description of the disaster event.} +#' \item{event_date_determined}{Date the PDA determination was made.} +#' \item{event_native_flag}{1 if tribal request, 0 otherwise.} +#' \item{ia_requested}{1 if Individual Assistance was requested, 0 otherwise.} +#' \item{ia_residences_impacted}{Total residences impacted.} +#' \item{ia_residences_destroyed}{Number of residences destroyed.} +#' \item{ia_residences_major_damage}{Number of residences with major damage.} +#' \item{ia_residences_minor_damage}{Number of residences with minor damage.} +#' \item{ia_cost_estimate_total}{Estimated total Individual Assistance cost.} +#' \item{pa_requested}{1 if Public Assistance was requested, 0 otherwise.} +#' \item{pa_cost_estimate_total}{Estimated total Public Assistance cost.} +#' \item{pa_per_capita_impact_statewide}{Statewide per capita impact amount.} +#' \item{pa_per_capita_impact_indicator_statewide}{Met/Not Met indicator for statewide threshold.} +#' } #' @export #' #' @examples diff --git a/R/get_public_assistance.R b/R/get_public_assistance.R index 2dec622..0bf8b9d 100644 --- a/R/get_public_assistance.R +++ b/R/get_public_assistance.R @@ -2,24 +2,28 @@ #' Get FEMA Public Assistance (PA) funding #' -#' Project- and county-level data on PA funding over time +#' @description Retrieves FEMA Public Assistance (PA) project funding data, +#' crosswalked to the county level for geographic analysis. #' -#' @param state_abbreviations A character vector of state abbreviations. NULL by default, which returns records for all 51 states. Only the 51 states are supported at this time. +#' @param state_abbreviations A character vector of state abbreviations. NULL by default, +#' which returns records for all 51 states. Only the 51 states are supported at this time. #' @param file_path The file path to the raw data contained in a .parquet file. #' -#' @details +#' @details Data are from FEMA's OpenFEMA API. See +#' \url{https://www.fema.gov/openfema-data-page/public-assistance-funded-projects-details-v2}. +#' #' These data have been crosswalked so that estimates can be aggregated at the county level. #' This is necessary (for county-level estimates) because many projects are statewide #' projects and do not have county-level observations in the data. #' #' Analysts thus have two options for working with these data: #' (1) De-select the variables suffixed with `_split` and then run `distinct(df)`. -#' This will provide unique observations for projects; projects are both county-level -#' and statewide. These data can be aggregated to the state level but cannot be +#' This will provide unique observations for projects; projects can be either county-level +#' or statewide. These data can be aggregated to the state level but cannot be #' comprehensively aggregated to the county level. #' (2) Group the data at the county level and summarize to produce county-level #' characterizations of PA projects and funding, using the `_split`-suffixed -#' variables to calculate funding totals. For example, this might look like: +#' variables to calculate funding totals. #' #' The attribution of statewide projects to the county level occurs by proportionally attributing #' project costs based on county-level populations. For example, in a fictional state with two diff --git a/R/get_sba_loans.R b/R/get_sba_loans.R index a6e470c..5a7d0a0 100644 --- a/R/get_sba_loans.R +++ b/R/get_sba_loans.R @@ -3,8 +3,27 @@ #' @importFrom magrittr %>% #' @title Access SBA data on disaster loans - -#' @returns A dataframe comprising city- and zip-level data on SBA loanmaking +#' @description Retrieves Small Business Administration (SBA) disaster loan data +#' for both home and business loans at the city and zip code level. +#' +#' @details Data are sourced from the SBA's disaster loan reports. See +#' \url{https://www.sba.gov/funding-programs/disaster-assistance}. +#' +#' @returns A dataframe comprising city- and zip-level data on SBA loanmaking. +#' Columns include: +#' \describe{ +#' \item{fiscal_year}{The federal fiscal year of the loan.} +#' \item{disaster_number_fema}{FEMA disaster number associated with the loan.} +#' \item{disaster_number_sba_physical}{SBA physical disaster declaration number.} +#' \item{disaster_number_sba_eidl}{SBA Economic Injury Disaster Loan (EIDL) declaration number.} +#' \item{damaged_property_zip_code}{ZIP code of the damaged property.} +#' \item{damaged_property_city_name}{City name of the damaged property.} +#' \item{damaged_property_state_code}{Two-letter state abbreviation.} +#' \item{verified_loss_total}{Total verified loss amount in dollars.} +#' \item{approved_amount_total}{Total approved loan amount in dollars.} +#' \item{approved_amount_real_estate}{Approved loan amount for real estate in dollars.} +#' \item{loan_type}{Type of loan: "residential" or "business".} +#' } #' @export #' #' @examples diff --git a/R/get_sheldus.R b/R/get_sheldus.R index 7cc1321..02a0516 100644 --- a/R/get_sheldus.R +++ b/R/get_sheldus.R @@ -1,7 +1,30 @@ -#' @title Access temporal county-level SHELDUS hazard damage data. +#' @title Access temporal county-level SHELDUS hazard damage data +#' +#' @description Retrieves county-level hazard event data from the Spatial Hazard Events +#' and Losses Database for the United States (SHELDUS), including property damage, +#' crop damage, fatalities, and injuries. +#' #' @param file_path The path to the raw SHELDUS data. #' +#' @details Data are from Arizona State University's SHELDUS database. Access requires +#' a subscription. See \url{https://cemhs.asu.edu/sheldus}. +#' #' @returns A dataframe comprising hazard x month x year x county observations of hazard events. +#' Columns include: +#' \describe{ +#' \item{unique_id}{Unique identifier for each observation.} +#' \item{GEOID}{Five-digit county FIPS code.} +#' \item{state_name}{Full state name (sentence case).} +#' \item{county_name}{County name.} +#' \item{year}{Year of the hazard event(s).} +#' \item{month}{Month of the hazard event(s).} +#' \item{hazard}{Type of hazard (e.g., "Flooding", "Hurricane/Tropical Storm").} +#' \item{damage_property}{Property damage in 2023 inflation-adjusted dollars.} +#' \item{damage_crop}{Crop damage in 2023 inflation-adjusted dollars.} +#' \item{fatalities}{Number of fatalities.} +#' \item{injuries}{Number of injuries.} +#' \item{records}{Number of individual events aggregated into this observation.} +#' } #' @export #' #' @examples diff --git a/R/get_structures.R b/R/get_structures.R index 9bc7e32..3f97b67 100644 --- a/R/get_structures.R +++ b/R/get_structures.R @@ -1,12 +1,26 @@ #' @importFrom magrittr %>% #' @title Estimate counts of hazard-impacted structures by structure type +#' @description Retrieves building footprint data from the USA Structures dataset and +#' summarizes structure counts by type at the tract or county level. #' @param geography The desired geography of the results. One of "tract" or "county". #' @param boundaries A POLYGON or MULTIPOLYGON object, or an sf::st_bbox()-style bbox. #' @param keep_structures Logical. If TRUE, the raw structure data will be returned alongside the summarized data. - -#' @returns A dataframe comprising estimated counts of each structure type, at the specified `geography`, for all such geographic units intersecting the `boundaries` object. If keep_structure = TRUE, returns a list with two elements: the summarized data and the raw structure data. - +#' +#' @details Data are sourced from the USA Structures dataset maintained by the +#' Department of Homeland Security. See \url{https://geoplatform.gov/metadata/9d4a3ae3-8637-4707-92a7-b7d67b769a6b}. +#' +#' @returns A dataframe comprising estimated counts of each structure type, at the +#' specified `geography`, for all such geographic units intersecting the `boundaries` +#' object. If keep_structure = TRUE, returns a list with two elements: the summarized +#' data and the raw structure data. Columns include: +#' \describe{ +#' \item{GEOID}{Census tract (11-digit) or county (5-digit) FIPS code.} +#' \item{primary_occupancy}{The primary use of the structure (e.g., "Residential", "Commercial").} +#' \item{occupancy_class}{Broader classification of occupancy type.} +#' \item{count}{Number of structures of this type in the geographic unit.} +#' } +#' #' @export #' @examples #' \dontrun{ diff --git a/R/get_wildfire_burn_zones.R b/R/get_wildfire_burn_zones.R new file mode 100644 index 0000000..a4f7e0f --- /dev/null +++ b/R/get_wildfire_burn_zones.R @@ -0,0 +1,97 @@ +#' @title Get wildfire burn zones +#' +#' @description Returns spatial data on wildfire burn zones in the US from 2000-2025. +#' This dataset harmonizes six wildfire datasets (FIRED, MTBS, NIFC, ICS-209, RedBook, and FEMA) +#' to identify wildfires that burned near communities and resulted in civilian fatalities, +#' destroyed structures, or received federal disaster relief. +#' +#' @param file_path The path to the geojson file containing the raw data. Defaults to a +#' path within Box. +#' +#' @details Data are from a harmonized wildfire burn zone disaster dataset combining +#' FIRED, MTBS, NIFC, ICS-209, RedBook, and FEMA data sources. Geometries are in +#' NAD83 / Conus Albers (EPSG:5070). +#' +#' @returns An sf dataframe comprising wildfire burn zone disasters. Each row represents a +#' single wildfire event, with polygon geometries representing burn zones. +#' Columns include: +#' \describe{ +#' \item{wildfire_id}{Unique identifier for the wildfire event.} +#' \item{id_fema}{FEMA disaster identifier (if applicable).} +#' \item{year}{Year of the wildfire.} +#' \item{wildfire_name}{Name of the wildfire or fire complex.} +#' \item{county_fips}{Pipe-delimited string of five-digit county FIPS codes for all +#' counties affected by the wildfire.} +#' \item{county_name}{Pipe-delimited string of county names for all counties +#' affected by the wildfire.} +#' \item{area_sq_km}{Burned area in square kilometers.} +#' \item{wildfire_complex_binary}{Whether the fire is a complex (multiple fires).} +#' \item{date_start}{Ignition date.} +#' \item{date_containment}{Containment date.} +#' \item{fatalities_total}{Total fatalities.} +#' \item{injuries_total}{Total injuries.} +#' \item{structures_destroyed}{Number of structures destroyed.} +#' \item{structures_threatened}{Number of structures threatened.} +#' \item{evacuation_total}{Total evacuations.} +#' \item{wui_type}{Wildland-urban interface type.} +#' \item{density_people_sq_km_wildfire_buffer}{Population density in wildfire buffer area.} +#' \item{geometry}{Burn zone polygon geometry.} +#' } +#' @export +#' @examples +#' \dontrun{ +#' burn_zones <- get_wildfire_burn_zones() +#' } +get_wildfire_burn_zones <- function( + file_path = file.path( + get_box_path(), "hazards", "other-sources", "wildfire-burn-zones", + "wfbz_disasters_2000-2025.geojson")) { + + if (!file.exists(file_path)) { + stop(stringr::str_c( + "The path to the dataset does not point to a valid file. ", + "Please ensure there is a file located at this path: ", file_path, ".")) + } + + burn_zones1 <- sf::st_read(file_path, quiet = TRUE) |> + janitor::clean_names() |> + sf::st_transform(5070) + + burn_zones2 <- burn_zones1 |> + dplyr::transmute( + wildfire_id = wildfire_id, + id_fema = fema_id, + year = as.integer(wildfire_year), + wildfire_name = wildfire_complex_names, + county_fips = wildfire_counties_fips, + county_name = wildfire_counties, + area_sq_km = wildfire_area, + wildfire_complex_binary = wildfire_complex, + date_start = lubridate::as_date(wildfire_ignition_date), + date_containment = lubridate::as_date(wildfire_containment_date), + fatalities_total = as.integer(wildfire_total_fatalities), + injuries_total = as.integer(wildfire_total_injuries), + structures_destroyed = as.integer(wildfire_struct_destroyed), + structures_threatened = as.integer(wildfire_struct_threatened), + evacuation_total = as.integer(wildfire_total_evacuation), + wui_type = wildfire_wui, + ## codebook says this is per square meter, but that must be a typo based on distribution of density values + ## other values are per square kilometer, which makes more sense + density_people_sq_km_wildfire_buffer = wildfire_buffered_avg_pop_den) + + message(stringr::str_c( + "Each observation represents a wildfire burn zone disaster. ", + "Counties affected by each wildfire are stored as pipe-delimited strings in county_fips and county_name columns. ", + "Disasters are defined as wildfires that burned near a community and resulted in ", + "at least one civilian fatality, one destroyed structure, or received federal disaster relief. ", + "Geometries represent burn zone perimeters sourced from FIRED, MTBS, or NIFC datasets.")) + + return(burn_zones2) +} + +utils::globalVariables(c( + "wildfire_id", "fema_id", "wildfire_year", "wildfire_complex_names", "wildfire_counties", + "wildfire_counties_fips", "wildfire_area", "wildfire_complex", "wildfire_ignition_date", + "wildfire_containment_date", "wildfire_total_fatalities", "wildfire_total_injuries", + "wildfire_struct_destroyed", "wildfire_struct_threatened", "wildfire_total_evacuation", + "wildfire_wui", "wildfire_buffered_avg_pop_den")) diff --git a/R/test.R b/R/test.R new file mode 100644 index 0000000..e69de29 diff --git a/_pkgdown.yml b/_pkgdown.yml index def5c1f..44e7cb8 100644 --- a/_pkgdown.yml +++ b/_pkgdown.yml @@ -31,6 +31,7 @@ reference: - title: Event boundaries contents: - get_current_fire_perimeters + - get_wildfire_burn_zones - title: Land use contents: - estimate_zoning_envelope diff --git a/man/get_business_patterns.Rd b/man/get_business_patterns.Rd index 3c2f7a5..0083198 100644 --- a/man/get_business_patterns.Rd +++ b/man/get_business_patterns.Rd @@ -7,17 +7,41 @@ get_business_patterns(year = 2022, naics_code_digits = 2, naics_codes = NULL) } \arguments{ -\item{year}{The vintage of CBP data desired. Data are available from 1986, though this function likely only supports more recent years (it it tested on 2022-vintage data only). Default is 2022.} +\item{year}{The vintage of CBP data desired. Data are available from 1986, though +this function likely only supports more recent years (it is tested on 2022-vintage +data only). Default is 2022.} -\item{naics_code_digits}{One of c(2, 3). Default is 2. NAICS codes range in specificity; 2-digit codes describe the highest groupings of industries, while six-digit codes are exceedingly detailed. There are 20 2-digit NAICS codes and 196 3-digit codes.} +\item{naics_code_digits}{One of c(2, 3). Default is 2. NAICS codes range in specificity; +2-digit codes describe the highest groupings of industries, while six-digit codes +are exceedingly detailed. There are 20 2-digit NAICS codes and 196 3-digit codes.} -\item{naics_codes}{A vector of NAICS codes to query. If NULL, the function will query all available codes with the specified number of digits. If not NULL, this argument overrides the \code{naics_code_digits} argument.} +\item{naics_codes}{A vector of NAICS codes to query. If NULL, the function will query +all available codes with the specified number of digits. If not NULL, this argument +overrides the \code{naics_code_digits} argument.} } \value{ -A tibble with data on county-level employees, employers, and aggregate annual payrolls by industry and employer size +A tibble with data on county-level employees, employers, and aggregate +annual payrolls by industry and employer size. Columns include: +\describe{ +\item{state}{Two-digit state FIPS code.} +\item{county}{Three-digit county FIPS code.} +\item{employees}{Number of employees mid-March of the reference year.} +\item{employers}{Number of establishments.} +\item{annual_payroll}{Annual payroll in thousands of dollars.} +\item{industry}{NAICS industry description (lowercase, underscored).} +\item{employee_size_range_label}{Human-readable employer size category.} +\item{employee_size_range_code}{Census code for employer size range.} +} } \description{ -Obtain County Business Patterns (CBP) Estimates per County +Retrieves County Business Patterns data from the Census Bureau, +providing counts of establishments, employees, and payroll by industry and +employer size at the county level. +} +\details{ +Data are from the U.S. Census Bureau's County Business Patterns program. +See \url{https://www.census.gov/programs-surveys/cbp.html} and +\url{https://www.census.gov/naics/} for NAICS code definitions. } \examples{ \dontrun{ diff --git a/man/get_current_fire_perimeters.Rd b/man/get_current_fire_perimeters.Rd index 9676d71..7042505 100644 --- a/man/get_current_fire_perimeters.Rd +++ b/man/get_current_fire_perimeters.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/get_current_fire_perimeters.R \name{get_current_fire_perimeters} \alias{get_current_fire_perimeters} -\title{Acquire wildfire perimeters} +\title{Acquire current wildfire perimeters} \usage{ get_current_fire_perimeters( geography = NULL, @@ -21,10 +21,26 @@ get_current_fire_perimeters( \item{api}{Included only for API consistency; this must be TRUE.} } \value{ -A library(sf) enabled dataframe comprising perimeters of current wildfires. +An sf dataframe comprising perimeters of current wildfires. Columns include: +\describe{ +\item{unique_id}{Unique identifier for each observation (generated).} +\item{incident_name}{Name of the fire incident (title case).} +\item{incident_size_acres}{Size of the fire in acres.} +\item{incident_short_description}{Brief description of the incident.} +\item{percent_contained}{Percent of fire contained (0-100).} +\item{identified_date}{Date/time the fire was discovered.} +\item{updated_date}{Date/time the record was last updated.} +\item{geometry}{Polygon geometry of the fire perimeter.} +} } \description{ -Acquire wildfire perimeters +Retrieves current wildfire perimeter data from the NIFC (National +Interagency Fire Center) via the Wildland Fire Interagency Geospatial Services +(WFIGS) API. +} +\details{ +Data are from the NIFC WFIGS service. See +\url{https://data-nifc.opendata.arcgis.com/datasets/nifc::wfigs-interagency-fire-perimeters/about}. } \examples{ \dontrun{ diff --git a/man/get_emergency_management_performance.Rd b/man/get_emergency_management_performance.Rd index b2aaae7..a909600 100644 --- a/man/get_emergency_management_performance.Rd +++ b/man/get_emergency_management_performance.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/get_emergency_managerment_performance.R \name{get_emergency_management_performance} \alias{get_emergency_management_performance} -\title{Get EMPG data} +\title{Get Emergency Management Performance Grant (EMPG) data} \usage{ get_emergency_management_performance( file_path = file.path(get_box_path(), "hazards", "FEMA", @@ -14,11 +14,31 @@ get_emergency_management_performance( \arguments{ \item{file_path}{Path to the downloaded dataset on Box.} -\item{api}{Logical indicating whether to use the OpenFEMA API to retrieve the data. Default is TRUE.} +\item{api}{Logical indicating whether to use the OpenFEMA API to retrieve the data. +Default is TRUE.} } \value{ A data frame containing emergency management performance grant (EMPG) data. +Columns include: +\describe{ +\item{id}{Unique identifier for the grant record.} +\item{state_name}{Full state name.} +\item{state_code}{Two-digit state FIPS code.} +\item{state_abbreviation}{Two-letter state abbreviation.} +\item{year_project_start}{Year the project started.} +\item{project_start_date}{Date the project started.} +\item{project_end_date}{Date the project ended.} +\item{grant_amount}{Total grant amount in dollars.} +\item{federal_share}{Federal portion of the grant in dollars.} +\item{non_federal_share}{Non-federal cost share in dollars.} +\item{program}{EMPG program type.} +} } \description{ -Get EMPG data +Retrieves Emergency Management Performance Grant (EMPG) award data +from FEMA, which supports state and local emergency management agencies. +} +\details{ +Data are from FEMA's OpenFEMA API. See +\url{https://www.fema.gov/openfema-data-page/emergency-management-performance-grants-v2}. } diff --git a/man/get_fema_disaster_declarations.Rd b/man/get_fema_disaster_declarations.Rd index 18f3f8d..ee0e193 100644 --- a/man/get_fema_disaster_declarations.Rd +++ b/man/get_fema_disaster_declarations.Rd @@ -16,10 +16,28 @@ get_fema_disaster_declarations( \item{api}{If TRUE (default), access data from the API. Else, read locally from \code{file_path}.} } \value{ -A dataframe comprising Major Disaster Declarations by month by year by county. Tribal declarations are stored as an attribute of the primary dataframe called \code{tribal_declarations}. +A dataframe comprising Major Disaster Declarations by month by year by county. +Tribal declarations are stored as an attribute (\code{tribal_declarations}). Columns include: +\describe{ +\item{unique_id}{Unique identifier for each observation.} +\item{GEOID}{Five-digit county FIPS code.} +\item{year_declared}{Year the disaster was declared.} +\item{month_declared}{Month the disaster was declared (1-12).} +\item{declaration_title}{Title(s) of the disaster declaration(s).} +\item{incidents_all}{Total count of disaster declarations in the county-month.} +\item{incidents_natural_hazard}{Count of natural hazard declarations.} +\item{incidents_*}{Additional columns for other incident types, each of which reflects the count of the given incident type.} +} } \description{ -Get major disaster declarations by county +Retrieves FEMA Major Disaster Declarations at the county level, +aggregated by year and month. Tribal declarations are stored separately as +an attribute. +} +\details{ +Data are from FEMA's OpenFEMA API. See +\url{https://www.fema.gov/openfema-data-page/disaster-declarations-summaries-v2}. +Statewide declarations are expanded to all counties in the state. } \examples{ \dontrun{ diff --git a/man/get_government_finances.Rd b/man/get_government_finances.Rd index a7fc04b..4117f1a 100644 --- a/man/get_government_finances.Rd +++ b/man/get_government_finances.Rd @@ -11,7 +11,29 @@ get_government_finances(year = 2022) } \value{ A dataframe containing government unit-level expenses for the specified year. +Columns include: +\describe{ +\item{unit_id}{Unique identifier for the government unit.} +\item{year_data}{Year of the financial data.} +\item{amount_thousands}{Total expenses in thousands of dollars.} +\item{government_type}{Type of government (State, County, City, Township, Special District, School District).} +\item{data_quality}{Proportion of expense items that were reported (vs. imputed).} +\item{state_code}{Two-digit state FIPS code.} +\item{county_code}{Three-digit county FIPS code.} +\item{unit_name}{Name of the government unit.} +\item{county_name}{Name of the county.} +\item{population}{Population served by the unit.} +\item{enrollment}{Student enrollment (for school districts).} +\item{amount_per_capita}{Expenses per capita or per enrolled student.} +} } \description{ -Get government unit-level expenses from the Census of Governments +Retrieves government unit-level finance data from the Census of Governments, +including expenses by category for state, county, city, township, special district, +and school district government units. +} +\details{ +Data are from the U.S. Census Bureau's Annual Survey of State and Local +Government Finances and Census of Governments. See +\url{https://www.census.gov/programs-surveys/gov-finances.html}. } diff --git a/man/get_hazard_mitigation_assistance.Rd b/man/get_hazard_mitigation_assistance.Rd index ca8de19..3937063 100644 --- a/man/get_hazard_mitigation_assistance.Rd +++ b/man/get_hazard_mitigation_assistance.Rd @@ -14,17 +14,44 @@ get_hazard_mitigation_assistance( ) } \arguments{ -\item{file_path_old_grant_system}{The file path to raw data for HMA applications from the older grant-reporting system. These data are typically available from: https://www.fema.gov/openfema-data-page/hazard-mitigation-assistance-projects-v4} +\item{file_path_old_grant_system}{The file path to raw data for HMA applications from +the older grant-reporting system. These data are typically available from: +\url{https://www.fema.gov/openfema-data-page/hazard-mitigation-assistance-projects-v4}} -\item{file_path_new_grant_system}{The file path to raw data for HMA applications from the newer (FEMA GO) grant-reporting system. These data are typically available from: https://www.fema.gov/openfema-data-page/hma-subapplications-v2} +\item{file_path_new_grant_system}{The file path to raw data for HMA applications from +the newer (FEMA GO) grant-reporting system. These data are typically available from: +\url{https://www.fema.gov/openfema-data-page/hma-subapplications-v2}} -\item{state_abbreviations}{NULL by default, in which case data are returned for all 51 states. Provide a vector of two-character USPS state abbreviations to obtain data for a sub-selection of states.} +\item{state_abbreviations}{NULL by default, in which case data are returned for all 51 +states. Provide a vector of two-character USPS state abbreviations to obtain data for +a sub-selection of states.} } \value{ -A dataframe of project-county HMA application data, aggregated across both old and new grant reporting systems. +A dataframe of project-county HMA application data. Only \code{project_cost_federal_split} +should be used for county-level aggregations. Columns include: +\describe{ +\item{data_source}{"hma-projects" (legacy) or "hma-subapplications" (FEMA GO).} +\item{project_id}{Unique project identifier.} +\item{disaster_number}{FEMA disaster number (if disaster-related).} +\item{project_program_area}{HMA program: HMGP, BRIC, FMA, or PDM.} +\item{project_fiscal_year}{Fiscal year of the project.} +\item{state_name}{Full state name.} +\item{county_geoid}{Five-digit county FIPS code.} +\item{county_population}{County population used for allocation.} +\item{project_status}{Current project status (e.g., "Closed", "Active").} +\item{project_cost_federal}{Total federal cost at project level.} +\item{project_cost_federal_split}{Federal cost allocated to this county.} +} } \description{ -Get Hazard Mitigation Assistance (HMA) Project Details +Retrieves Hazard Mitigation Assistance project data from both the legacy +HMA Projects dataset and the newer FEMA GO subapplications dataset, harmonized +at the project-county level. +} +\details{ +Data are from FEMA's OpenFEMA API, combining two data sources: the legacy +Hazard Mitigation Assistance Projects (v4) and the newer HMA Subapplications (v2). +Multi-county projects are split across counties based on population proportions. } \examples{ \dontrun{ diff --git a/man/get_ihp_registrations.Rd b/man/get_ihp_registrations.Rd index a523847..9f859e7 100644 --- a/man/get_ihp_registrations.Rd +++ b/man/get_ihp_registrations.Rd @@ -12,7 +12,8 @@ get_ihp_registrations( ) } \arguments{ -\item{state_fips}{A character vector of two-letter state abbreviations. If NULL (default), return data for all 51 states. Otherwise return data for the specified states.} +\item{state_fips}{A character vector of two-letter state abbreviations. If NULL (default), +return data for all 51 states. Otherwise return data for the specified states.} \item{file_name}{The name (not the full path) of the Box file containing the raw data.} @@ -21,10 +22,37 @@ get_ihp_registrations( \item{outpath}{The path to save the parquet-formatted datafile. Applicable only when \code{api = FALSE}.} } \value{ -A dataframe comprising IHP registrations +A dataframe comprising IHP registrations. Note that records are duplicated +due to a many-to-many join with a ZCTA-to-county crosswalk; use \code{allocation_factor_zcta_to_county} +to properly aggregate. Columns include: +\describe{ +\item{unique_id}{Unique identifier for the original registration.} +\item{allocation_factor_zcta_to_county}{Weight for attributing registration to county.} +\item{geoid_county}{Five-digit county FIPS code.} +\item{zcta_code}{Five-digit ZIP Code Tabulation Area.} +\item{geoid_tract}{11-digit census tract FIPS code.} +\item{geoid_block_group}{12-digit census block group FIPS code.} +\item{disaster_number}{FEMA disaster number.} +\item{amount_individual_housing_program}{Total IHP assistance amount in dollars.} +\item{amount_housing_assistance}{Housing assistance amount in dollars.} +\item{amount_other_needs_assistance}{Other needs assistance amount in dollars.} +\item{amount_rental_assistance}{Rental assistance amount in dollars.} +\item{amount_repairs}{Repair assistance amount in dollars.} +\item{amount_replacement}{Replacement assistance amount in dollars.} +\item{amount_personal_property}{Personal property assistance amount in dollars.} +\item{amount_flood_insurance_premium_paid_by_fema}{FEMA-paid flood insurance premium.} +\item{state_name}{Full state name.} +\item{state_abbreviation}{Two-letter state abbreviation.} +\item{state_code}{Two-digit state FIPS code.} +} } \description{ -Get Individuals and Households Program (IHP) registrations +Retrieves FEMA Individual and Households Program (IHP) registration data, +which captures applications for disaster assistance from individuals and households. +} +\details{ +Data are from FEMA's OpenFEMA API. See +\url{https://www.fema.gov/openfema-data-page/individuals-and-households-program-valid-registrations-v2}. } \examples{ \dontrun{ diff --git a/man/get_lodes.Rd b/man/get_lodes.Rd index a0427a8..c44b7c8 100644 --- a/man/get_lodes.Rd +++ b/man/get_lodes.Rd @@ -2,8 +2,7 @@ % Please edit documentation in R/get_lodes.R \name{get_lodes} \alias{get_lodes} -\title{Get LEHD Origin-Destination Employment Statistics (LODES) data -Returned data are from LODES Version 8, which is enumerated in 2020-vintage geometries.} +\title{Get LEHD Origin-Destination Employment Statistics (LODES) data} \usage{ get_lodes( lodes_type, @@ -15,9 +14,13 @@ get_lodes( ) } \arguments{ -\item{lodes_type}{One of c("rac", "wac", "od"). "rac" = Residence Area Characteristics, where jobs are associated with employees' residences. "wac" = Workplace Area Characteristics, where jobs are associated with employees' workplaces. "od" = Origin-Destination data, where jobs are associated with both workers' residences and their workplaces.} +\item{lodes_type}{One of c("rac", "wac", "od"). "rac" = Residence Area Characteristics, +where jobs are associated with employees' residences. "wac" = Workplace Area Characteristics, +where jobs are associated with employees' workplaces. "od" = Origin-Destination data, +where jobs are associated with both workers' residences and their workplaces.} -\item{jobs_type}{One of c("all", "primary"). Default is "all", which includes multiple jobs for workers with multiple jobs. "primary" includes only the highest-paying job per worker.} +\item{jobs_type}{One of c("all", "primary"). Default is "all", which includes multiple +jobs for workers with multiple jobs. "primary" includes only the highest-paying job per worker.} \item{states}{A vector of state abbreviations.} @@ -25,12 +28,37 @@ get_lodes( \item{geography}{One of c("block", "block group", "tract", "county", "state"). Default is "tract".} -\item{state_part}{One of c("main", "aux"). Default is "main", which includes only workers who reside inside the state where they work. "aux" returns only workers who work in the specified state but live outside of that state.} +\item{state_part}{One of c("main", "aux"). Default is "main", which includes only workers +who reside inside the state where they work. "aux" returns only workers who work in the +specified state but live outside of that state.} } \value{ -A tibble with one record per geography per year per job type. Attributes include total jobs and jobs by worker earnings, industry, and demographics; the origin-destination results have more limited demographics compared to the "wac" and "rac" results. +A tibble with one record per geography per year per job type. Key columns include: +\describe{ +\item{year}{Year of the data.} +\item{state}{Two-letter state abbreviation.} +\item{job_type}{"all" or "federal" indicating the job category.} +\item{h_GEOID / w_GEOID}{Home (h) or work (w) GEOID depending on lodes_type.} +\item{jobs}{Total number of jobs.} +\item{jobs_workers_age_29_or_younger}{Jobs for workers age 29 or younger.} +\item{jobs_workers_age_30_to_54}{Jobs for workers age 30 to 54.} +\item{jobs_workers_age_55_or_older}{Jobs for workers age 55 or older.} +\item{jobs_earnings_1250_month_or_less}{Jobs with earnings $1250/month or less.} +\item{jobs_earnings_1251_month_to_3333_month}{Jobs with earnings $1251-$3333/month.} +\item{jobs_earnings_greater_than_3333_month}{Jobs with earnings >$3333/month.} +\item{jobs_industry_\emph{}{Jobs by NAICS industry sector (20 sectors for WAC/RAC).} +\item{jobs_workers_race_}}{Jobs by worker race (WAC/RAC only).} +\item{jobs_workers_ethnicity_\emph{}{Jobs by worker ethnicity (WAC/RAC only).} +\item{jobs_workers_educational_attainment_}}{Jobs by education level (WAC/RAC only).} +\item{jobs_workers_sex_*}{Jobs by worker sex (WAC/RAC only).} +} } \description{ -Get LEHD Origin-Destination Employment Statistics (LODES) data +Retrieves LODES employment data at various geographic levels. Returned data are from LODES Version 8, which is enumerated in 2020-vintage geometries. } +\details{ +Data are from the Longitudinal Employer-Household Dynamics (LEHD) program. +See \url{https://lehd.ces.census.gov/data/} and the technical documentation at +\url{https://lehd.ces.census.gov/data/lodes/LODES8/LODESTechDoc8.0.pdf}. +} diff --git a/man/get_nfip_claims.Rd b/man/get_nfip_claims.Rd index d480d0f..69672b2 100644 --- a/man/get_nfip_claims.Rd +++ b/man/get_nfip_claims.Rd @@ -11,7 +11,8 @@ get_nfip_claims( ) } \arguments{ -\item{county_geoids}{A character vector of five-digit county codes. NULL by default; must be non-NULL if \code{api = TRUE}.} +\item{county_geoids}{A character vector of five-digit county codes. NULL by default; +must be non-NULL if \code{api = TRUE}.} \item{file_name}{The name (not the full path) of the Box file containing the raw data.} @@ -44,10 +45,12 @@ A data frame comprising county-level data on current NFIP policies } } \description{ -Access county-level data on NFIP claims +Retrieves National Flood Insurance Program (NFIP) claims data at +the county level, including damage amounts, payments, and building characteristics. } \details{ -These data are from: https://www.fema.gov/openfema-data-page/fima-nfip-redacted-claims-v2. +Data are from FEMA's OpenFEMA API. See +\url{https://www.fema.gov/openfema-data-page/fima-nfip-redacted-claims-v2}. Per FEMA: This data set represents more than 2,000,000 NFIP claims transactions. It is derived from the NFIP system of record, staged in the NFIP reporting platform and redacted to protect policy holder personally identifiable information. The diff --git a/man/get_nfip_policies.Rd b/man/get_nfip_policies.Rd index bf86511..8ed550e 100644 --- a/man/get_nfip_policies.Rd +++ b/man/get_nfip_policies.Rd @@ -41,9 +41,13 @@ either because it was cancelled or lapsed.} } } \description{ -Access county-level data on NFIP policies +Retrieves National Flood Insurance Program (NFIP) policy data at +the county level, including both current and historical policies. } \details{ +Data are from FEMA's OpenFEMA API. See +\url{https://www.fema.gov/openfema-data-page/fima-nfip-redacted-policies-v2}. + The following dataset houses information on NFIP policies (both historic and current). In order to filter to current policies, the analyst will need to filter on the policy_date_termination and policy_date_effective columns. diff --git a/man/get_preliminary_damage_assessments.Rd b/man/get_preliminary_damage_assessments.Rd index 0dbb189..563e5f3 100644 --- a/man/get_preliminary_damage_assessments.Rd +++ b/man/get_preliminary_damage_assessments.Rd @@ -13,18 +13,42 @@ get_preliminary_damage_assessments( ) } \arguments{ -\item{file_path}{The file path to the cached dataset, or if there is no cache, the path at which to cache the resulting data.} +\item{file_path}{The file path to the cached dataset, or if there is no cache, the path +at which to cache the resulting data.} -\item{directory_path}{The path to the directory where PDA PDFs are stored. Use \code{scrape_pda_pdfs} to generate these files.} +\item{directory_path}{The path to the directory where PDA PDFs are stored. Use +\code{scrape_pda_pdfs} to generate these files.} -\item{use_cache}{Boolean. Read the existing dataset stored at \code{file_path}? If FALSE, data will be generated anew. Else, if a file exists at \code{file_path}, this file will be returned.} +\item{use_cache}{Boolean. Read the existing dataset stored at \code{file_path}? If FALSE, +data will be generated anew. Else, if a file exists at \code{file_path}, this file will be returned.} } \value{ -A dataframe of preliminary damage assessment reports. +A dataframe of preliminary damage assessment reports. Key columns include: +\describe{ +\item{disaster_number}{FEMA disaster number.} +\item{event_type}{Type of decision: "approved", "denial", "appeal_approved", or "appeal_denial".} +\item{event_title}{Title/description of the disaster event.} +\item{event_date_determined}{Date the PDA determination was made.} +\item{event_native_flag}{1 if tribal request, 0 otherwise.} +\item{ia_requested}{1 if Individual Assistance was requested, 0 otherwise.} +\item{ia_residences_impacted}{Total residences impacted.} +\item{ia_residences_destroyed}{Number of residences destroyed.} +\item{ia_residences_major_damage}{Number of residences with major damage.} +\item{ia_residences_minor_damage}{Number of residences with minor damage.} +\item{ia_cost_estimate_total}{Estimated total Individual Assistance cost.} +\item{pa_requested}{1 if Public Assistance was requested, 0 otherwise.} +\item{pa_cost_estimate_total}{Estimated total Public Assistance cost.} +\item{pa_per_capita_impact_statewide}{Statewide per capita impact amount.} +\item{pa_per_capita_impact_indicator_statewide}{Met/Not Met indicator for statewide threshold.} +} } \description{ -These data reflect extracted attributes from PDF preliminary damage assessments -hosted on FEMA's website at: https://www.fema.gov/disaster/how-declared/preliminary-damage-assessments/reports. +Retrieves data extracted from PDF preliminary damage assessment (PDA) +reports submitted to FEMA for disaster declarations. +} +\details{ +Data are extracted from PDF reports hosted at +\url{https://www.fema.gov/disaster/how-declared/preliminary-damage-assessments/reports}. Owing to the unstructured nature of the source documents, some fields may be incorrect in the data returned by the function, though significant quality checks have been implemented in an effort to produce a high-quality dataset. diff --git a/man/get_public_assistance.Rd b/man/get_public_assistance.Rd index a5f5c8c..311a5b5 100644 --- a/man/get_public_assistance.Rd +++ b/man/get_public_assistance.Rd @@ -13,7 +13,8 @@ get_public_assistance( \arguments{ \item{file_path}{The file path to the raw data contained in a .parquet file.} -\item{state_abbreviations}{A character vector of state abbreviations. NULL by default, which returns records for all 51 states. Only the 51 states are supported at this time.} +\item{state_abbreviations}{A character vector of state abbreviations. NULL by default, +which returns records for all 51 states. Only the 51 states are supported at this time.} } \value{ A dataframe of project-level funding requests and awards, along with variables that can be aggregated to the county level. @@ -40,21 +41,25 @@ to the \code{id}-by-county level. Refer to the details for additional informatio } } \description{ -Project- and county-level data on PA funding over time +Retrieves FEMA Public Assistance (PA) project funding data, +crosswalked to the county level for geographic analysis. } \details{ +Data are from FEMA's OpenFEMA API. See +\url{https://www.fema.gov/openfema-data-page/public-assistance-funded-projects-details-v2}. + These data have been crosswalked so that estimates can be aggregated at the county level. This is necessary (for county-level estimates) because many projects are statewide projects and do not have county-level observations in the data. Analysts thus have two options for working with these data: (1) De-select the variables suffixed with \verb{_split} and then run \code{distinct(df)}. -This will provide unique observations for projects; projects are both county-level -and statewide. These data can be aggregated to the state level but cannot be +This will provide unique observations for projects; projects can be either county-level +or statewide. These data can be aggregated to the state level but cannot be comprehensively aggregated to the county level. (2) Group the data at the county level and summarize to produce county-level characterizations of PA projects and funding, using the \verb{_split}-suffixed -variables to calculate funding totals. For example, this might look like: +variables to calculate funding totals. The attribution of statewide projects to the county level occurs by proportionally attributing project costs based on county-level populations. For example, in a fictional state with two diff --git a/man/get_sba_loans.Rd b/man/get_sba_loans.Rd index e70fb7c..51531e6 100644 --- a/man/get_sba_loans.Rd +++ b/man/get_sba_loans.Rd @@ -7,10 +7,29 @@ get_sba_loans() } \value{ -A dataframe comprising city- and zip-level data on SBA loanmaking +A dataframe comprising city- and zip-level data on SBA loanmaking. +Columns include: +\describe{ +\item{fiscal_year}{The federal fiscal year of the loan.} +\item{disaster_number_fema}{FEMA disaster number associated with the loan.} +\item{disaster_number_sba_physical}{SBA physical disaster declaration number.} +\item{disaster_number_sba_eidl}{SBA Economic Injury Disaster Loan (EIDL) declaration number.} +\item{damaged_property_zip_code}{ZIP code of the damaged property.} +\item{damaged_property_city_name}{City name of the damaged property.} +\item{damaged_property_state_code}{Two-letter state abbreviation.} +\item{verified_loss_total}{Total verified loss amount in dollars.} +\item{approved_amount_total}{Total approved loan amount in dollars.} +\item{approved_amount_real_estate}{Approved loan amount for real estate in dollars.} +\item{loan_type}{Type of loan: "residential" or "business".} +} } \description{ -Access SBA data on disaster loans +Retrieves Small Business Administration (SBA) disaster loan data +for both home and business loans at the city and zip code level. +} +\details{ +Data are sourced from the SBA's disaster loan reports. See +\url{https://www.sba.gov/funding-programs/disaster-assistance}. } \examples{ \dontrun{ diff --git a/man/get_sheldus.Rd b/man/get_sheldus.Rd index dddfdc9..76caf3f 100644 --- a/man/get_sheldus.Rd +++ b/man/get_sheldus.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/get_sheldus.R \name{get_sheldus} \alias{get_sheldus} -\title{Access temporal county-level SHELDUS hazard damage data.} +\title{Access temporal county-level SHELDUS hazard damage data} \usage{ get_sheldus( file_path = file.path(get_box_path(), "hazards", "sheldus", @@ -15,9 +15,30 @@ get_sheldus( } \value{ A dataframe comprising hazard x month x year x county observations of hazard events. +Columns include: +\describe{ +\item{unique_id}{Unique identifier for each observation.} +\item{GEOID}{Five-digit county FIPS code.} +\item{state_name}{Full state name (sentence case).} +\item{county_name}{County name.} +\item{year}{Year of the hazard event(s).} +\item{month}{Month of the hazard event(s).} +\item{hazard}{Type of hazard (e.g., "Flooding", "Hurricane/Tropical Storm").} +\item{damage_property}{Property damage in 2023 inflation-adjusted dollars.} +\item{damage_crop}{Crop damage in 2023 inflation-adjusted dollars.} +\item{fatalities}{Number of fatalities.} +\item{injuries}{Number of injuries.} +\item{records}{Number of individual events aggregated into this observation.} +} } \description{ -Access temporal county-level SHELDUS hazard damage data. +Retrieves county-level hazard event data from the Spatial Hazard Events +and Losses Database for the United States (SHELDUS), including property damage, +crop damage, fatalities, and injuries. +} +\details{ +Data are from Arizona State University's SHELDUS database. Access requires +a subscription. See \url{https://cemhs.asu.edu/sheldus}. } \examples{ \dontrun{ diff --git a/man/get_structures.Rd b/man/get_structures.Rd index 9d81a15..729fda8 100644 --- a/man/get_structures.Rd +++ b/man/get_structures.Rd @@ -14,10 +14,24 @@ get_structures(boundaries, geography = "county", keep_structures = FALSE) \item{keep_structures}{Logical. If TRUE, the raw structure data will be returned alongside the summarized data.} } \value{ -A dataframe comprising estimated counts of each structure type, at the specified \code{geography}, for all such geographic units intersecting the \code{boundaries} object. If keep_structure = TRUE, returns a list with two elements: the summarized data and the raw structure data. +A dataframe comprising estimated counts of each structure type, at the +specified \code{geography}, for all such geographic units intersecting the \code{boundaries} +object. If keep_structure = TRUE, returns a list with two elements: the summarized +data and the raw structure data. Columns include: +\describe{ +\item{GEOID}{Census tract (11-digit) or county (5-digit) FIPS code.} +\item{primary_occupancy}{The primary use of the structure (e.g., "Residential", "Commercial").} +\item{occupancy_class}{Broader classification of occupancy type.} +\item{count}{Number of structures of this type in the geographic unit.} +} } \description{ -Estimate counts of hazard-impacted structures by structure type +Retrieves building footprint data from the USA Structures dataset and +summarizes structure counts by type at the tract or county level. +} +\details{ +Data are sourced from the USA Structures dataset maintained by the +Department of Homeland Security. See \url{https://geoplatform.gov/metadata/9d4a3ae3-8637-4707-92a7-b7d67b769a6b}. } \examples{ \dontrun{ diff --git a/man/get_wildfire_burn_zones.Rd b/man/get_wildfire_burn_zones.Rd new file mode 100644 index 0000000..29c119f --- /dev/null +++ b/man/get_wildfire_burn_zones.Rd @@ -0,0 +1,58 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/get_wildfire_burn_zones.R +\name{get_wildfire_burn_zones} +\alias{get_wildfire_burn_zones} +\title{Get wildfire burn zones} +\usage{ +get_wildfire_burn_zones( + file_path = file.path(get_box_path(), "hazards", "other-sources", + "wildfire-burn-zones", "wfbz_disasters_2000-2025.geojson") +) +} +\arguments{ +\item{file_path}{The path to the geojson file containing the raw data. Defaults to a +path within Box.} +} +\value{ +An sf dataframe comprising wildfire burn zone disasters. Each row represents a +single wildfire event, with polygon geometries representing burn zones. +Columns include: +\describe{ +\item{wildfire_id}{Unique identifier for the wildfire event.} +\item{id_fema}{FEMA disaster identifier (if applicable).} +\item{year}{Year of the wildfire.} +\item{wildfire_name}{Name of the wildfire or fire complex.} +\item{county_fips}{Pipe-delimited string of five-digit county FIPS codes for all +counties affected by the wildfire.} +\item{county_name}{Pipe-delimited string of county names for all counties +affected by the wildfire.} +\item{area_sq_km}{Burned area in square kilometers.} +\item{wildfire_complex_binary}{Whether the fire is a complex (multiple fires).} +\item{date_start}{Ignition date.} +\item{date_containment}{Containment date.} +\item{fatalities_total}{Total fatalities.} +\item{injuries_total}{Total injuries.} +\item{structures_destroyed}{Number of structures destroyed.} +\item{structures_threatened}{Number of structures threatened.} +\item{evacuation_total}{Total evacuations.} +\item{wui_type}{Wildland-urban interface type.} +\item{density_people_sq_km_wildfire_buffer}{Population density in wildfire buffer area.} +\item{geometry}{Burn zone polygon geometry.} +} +} +\description{ +Returns spatial data on wildfire burn zones in the US from 2000-2025. +This dataset harmonizes six wildfire datasets (FIRED, MTBS, NIFC, ICS-209, RedBook, and FEMA) +to identify wildfires that burned near communities and resulted in civilian fatalities, +destroyed structures, or received federal disaster relief. +} +\details{ +Data are from a harmonized wildfire burn zone disaster dataset combining +FIRED, MTBS, NIFC, ICS-209, RedBook, and FEMA data sources. Geometries are in +NAD83 / Conus Albers (EPSG:5070). +} +\examples{ +\dontrun{ +burn_zones <- get_wildfire_burn_zones() +} +} diff --git a/renv.lock b/renv.lock index ee3ab54..ff8b217 100644 --- a/renv.lock +++ b/renv.lock @@ -1,10 +1,10 @@ { "R": { - "Version": "4.5.1", + "Version": "4.4.0", "Repositories": [ { "Name": "CRAN", - "URL": "https://repo.miserver.it.umich.edu/cran" + "URL": "https://cloud.r-project.org" } ] }, @@ -24,7 +24,7 @@ "NeedsCompilation": "no", "Author": "Dirk Eddelbuettel [aut, cre] (), John W. Emerson [aut], Michael J. Kane [aut] ()", "Maintainer": "Dirk Eddelbuettel ", - "Repository": "RSPM", + "Repository": "https://packagemanager.posit.co/cran/latest", "Encoding": "UTF-8" }, "DBI": { @@ -74,7 +74,7 @@ "NeedsCompilation": "no", "Author": "R Special Interest Group on Databases (R-SIG-DB) [aut], Hadley Wickham [aut], Kirill Müller [aut, cre] (), R Consortium [fnd]", "Maintainer": "Kirill Müller ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "KernSmooth": { "Package": "KernSmooth", @@ -99,7 +99,8 @@ "NeedsCompilation": "yes", "Author": "Matt Wand [aut], Cleve Moler [ctb] (LINPACK routines in src/d*), Brian Ripley [trl, cre, ctb] (R port and updates)", "Maintainer": "Brian Ripley ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest", + "Encoding": "UTF-8" }, "MASS": { "Package": "MASS", @@ -206,7 +207,7 @@ "NeedsCompilation": "no", "Author": "Winston Chang [aut, cre], Posit Software, PBC [cph, fnd]", "Maintainer": "Winston Chang ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "RColorBrewer": { "Package": "RColorBrewer", @@ -223,8 +224,7 @@ "Description": "Provides color schemes for maps (and other graphics) designed by Cynthia Brewer as described at http://colorbrewer2.org.", "License": "Apache License 2.0", "NeedsCompilation": "no", - "Repository": "https://packagemanager.posit.co/cran/latest", - "Encoding": "UTF-8" + "Repository": "CRAN" }, "RSQLite": { "Package": "RSQLite", @@ -284,10 +284,10 @@ }, "Rcpp": { "Package": "Rcpp", - "Version": "1.1.0", + "Version": "1.0.14", "Source": "Repository", "Title": "Seamless R and C++ Integration", - "Date": "2025-07-01", + "Date": "2025-01-11", "Authors@R": "c(person(\"Dirk\", \"Eddelbuettel\", role = c(\"aut\", \"cre\"), email = \"edd@debian.org\", comment = c(ORCID = \"0000-0001-6419-907X\")), person(\"Romain\", \"Francois\", role = \"aut\", comment = c(ORCID = \"0000-0002-2444-4226\")), person(\"JJ\", \"Allaire\", role = \"aut\", comment = c(ORCID = \"0000-0003-0174-9868\")), person(\"Kevin\", \"Ushey\", role = \"aut\", comment = c(ORCID = \"0000-0003-2880-7407\")), person(\"Qiang\", \"Kou\", role = \"aut\", comment = c(ORCID = \"0000-0001-6786-5453\")), person(\"Nathan\", \"Russell\", role = \"aut\"), person(\"Iñaki\", \"Ucar\", role = \"aut\", comment = c(ORCID = \"0000-0001-6403-5550\")), person(\"Doug\", \"Bates\", role = \"aut\", comment = c(ORCID = \"0000-0001-8316-9503\")), person(\"John\", \"Chambers\", role = \"aut\"))", "Description": "The 'Rcpp' package provides R functions as well as C++ classes which offer a seamless integration of R and C++. Many R data types and objects can be mapped back and forth to C++ equivalents which facilitates both writing of new code as well as easier integration of third-party libraries. Documentation about 'Rcpp' is provided by several vignettes included in this package, via the 'Rcpp Gallery' site at , the paper by Eddelbuettel and Francois (2011, ), the book by Eddelbuettel (2013, ) and the paper by Eddelbuettel and Balamuta (2018, ); see 'citation(\"Rcpp\")' for details.", "Imports": [ @@ -307,9 +307,9 @@ "RoxygenNote": "6.1.1", "Encoding": "UTF-8", "NeedsCompilation": "yes", - "Author": "Dirk Eddelbuettel [aut, cre] (ORCID: ), Romain Francois [aut] (ORCID: ), JJ Allaire [aut] (ORCID: ), Kevin Ushey [aut] (ORCID: ), Qiang Kou [aut] (ORCID: ), Nathan Russell [aut], Iñaki Ucar [aut] (ORCID: ), Doug Bates [aut] (ORCID: ), John Chambers [aut]", + "Author": "Dirk Eddelbuettel [aut, cre] (), Romain Francois [aut] (), JJ Allaire [aut] (), Kevin Ushey [aut] (), Qiang Kou [aut] (), Nathan Russell [aut], Iñaki Ucar [aut] (), Doug Bates [aut] (), John Chambers [aut]", "Maintainer": "Dirk Eddelbuettel ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "RcppProgress": { "Package": "RcppProgress", @@ -392,7 +392,7 @@ "NeedsCompilation": "yes", "Author": "Object-Oriented Programming Working Group [cph], Davis Vaughan [aut], Jim Hester [aut] (), Tomasz Kalinowski [aut], Will Landau [aut], Michael Lawrence [aut], Martin Maechler [aut] (), Luke Tierney [aut], Hadley Wickham [aut, cre] ()", "Maintainer": "Hadley Wickham ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "arrow": { "Package": "arrow", @@ -460,7 +460,7 @@ "NeedsCompilation": "yes", "Author": "Neal Richardson [aut], Ian Cook [aut], Nic Crane [aut], Dewey Dunnington [aut] (ORCID: ), Romain François [aut] (ORCID: ), Jonathan Keane [aut, cre], Dragoș Moldovan-Grünfeld [aut], Jeroen Ooms [aut], Jacob Wujciak-Jens [aut], Javier Luraschi [ctb], Karl Dunkle Werner [ctb] (ORCID: ), Jeffrey Wong [ctb], Apache Arrow [aut, cph]", "Maintainer": "Jonathan Keane ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "askpass": { "Package": "askpass", @@ -507,7 +507,7 @@ "NeedsCompilation": "no", "Author": "Hadley Wickham [aut, cre]", "Maintainer": "Hadley Wickham ", - "Repository": "RSPM", + "Repository": "https://packagemanager.posit.co/cran/latest", "Encoding": "UTF-8" }, "backports": { @@ -794,7 +794,7 @@ "NeedsCompilation": "no", "Author": "David Robinson [aut], Alex Hayes [aut] (), Simon Couch [aut, cre] (), Posit Software, PBC [cph, fnd], Indrajeet Patil [ctb] (), Derek Chiu [ctb], Matthieu Gomez [ctb], Boris Demeshev [ctb], Dieter Menne [ctb], Benjamin Nutter [ctb], Luke Johnston [ctb], Ben Bolker [ctb], Francois Briatte [ctb], Jeffrey Arnold [ctb], Jonah Gabry [ctb], Luciano Selzer [ctb], Gavin Simpson [ctb], Jens Preussner [ctb], Jay Hesselberth [ctb], Hadley Wickham [ctb], Matthew Lincoln [ctb], Alessandro Gasparini [ctb], Lukasz Komsta [ctb], Frederick Novometsky [ctb], Wilson Freitas [ctb], Michelle Evans [ctb], Jason Cory Brunson [ctb], Simon Jackson [ctb], Ben Whalley [ctb], Karissa Whiting [ctb], Yves Rosseel [ctb], Michael Kuehn [ctb], Jorge Cimentada [ctb], Erle Holgersen [ctb], Karl Dunkle Werner [ctb] (), Ethan Christensen [ctb], Steven Pav [ctb], Paul PJ [ctb], Ben Schneider [ctb], Patrick Kennedy [ctb], Lily Medina [ctb], Brian Fannin [ctb], Jason Muhlenkamp [ctb], Matt Lehman [ctb], Bill Denney [ctb] (), Nic Crane [ctb], Andrew Bates [ctb], Vincent Arel-Bundock [ctb] (), Hideaki Hayashi [ctb], Luis Tobalina [ctb], Annie Wang [ctb], Wei Yang Tham [ctb], Clara Wang [ctb], Abby Smith [ctb] (), Jasper Cooper [ctb] (), E Auden Krauska [ctb] (), Alex Wang [ctb], Malcolm Barrett [ctb] (), Charles Gray [ctb] (), Jared Wilber [ctb], Vilmantas Gegzna [ctb] (), Eduard Szoecs [ctb], Frederik Aust [ctb] (), Angus Moore [ctb], Nick Williams [ctb], Marius Barth [ctb] (), Bruna Wundervald [ctb] (), Joyce Cahoon [ctb] (), Grant McDermott [ctb] (), Kevin Zarca [ctb], Shiro Kuriwaki [ctb] (), Lukas Wallrich [ctb] (), James Martherus [ctb] (), Chuliang Xiao [ctb] (), Joseph Larmarange [ctb], Max Kuhn [ctb], Michal Bojanowski [ctb], Hakon Malmedal [ctb], Clara Wang [ctb], Sergio Oller [ctb], Luke Sonnet [ctb], Jim Hester [ctb], Ben Schneider [ctb], Bernie Gray [ctb] (), Mara Averick [ctb], Aaron Jacobs [ctb], Andreas Bender [ctb], Sven Templer [ctb], Paul-Christian Buerkner [ctb], Matthew Kay [ctb], Erwan Le Pennec [ctb], Johan Junkka [ctb], Hao Zhu [ctb], Benjamin Soltoff [ctb], Zoe Wilkinson Saldana [ctb], Tyler Littlefield [ctb], Charles T. Gray [ctb], Shabbh E. Banks [ctb], Serina Robinson [ctb], Roger Bivand [ctb], Riinu Ots [ctb], Nicholas Williams [ctb], Nina Jakobsen [ctb], Michael Weylandt [ctb], Lisa Lendway [ctb], Karl Hailperin [ctb], Josue Rodriguez [ctb], Jenny Bryan [ctb], Chris Jarvis [ctb], Greg Macfarlane [ctb], Brian Mannakee [ctb], Drew Tyre [ctb], Shreyas Singh [ctb], Laurens Geffert [ctb], Hong Ooi [ctb], Henrik Bengtsson [ctb], Eduard Szocs [ctb], David Hugh-Jones [ctb], Matthieu Stigler [ctb], Hugo Tavares [ctb] (), R. Willem Vervoort [ctb], Brenton M. Wiernik [ctb], Josh Yamamoto [ctb], Jasme Lee [ctb], Taren Sanders [ctb] (), Ilaria Prosdocimi [ctb] (), Daniel D. Sjoberg [ctb] (), Alex Reinhart [ctb] ()", "Maintainer": "Simon Couch ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "bslib": { "Package": "bslib", @@ -853,7 +853,7 @@ "NeedsCompilation": "no", "Author": "Carson Sievert [aut, cre] (), Joe Cheng [aut], Garrick Aden-Buie [aut] (), Posit Software, PBC [cph, fnd], Bootstrap contributors [ctb] (Bootstrap library), Twitter, Inc [cph] (Bootstrap library), Javi Aguilar [ctb, cph] (Bootstrap colorpicker library), Thomas Park [ctb, cph] (Bootswatch library), PayPal [ctb, cph] (Bootstrap accessibility plugin)", "Maintainer": "Carson Sievert ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "cachem": { "Package": "cachem", @@ -879,7 +879,7 @@ "NeedsCompilation": "yes", "Author": "Winston Chang [aut, cre], Posit Software, PBC [cph, fnd]", "Maintainer": "Winston Chang ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "callr": { "Package": "callr", @@ -948,8 +948,7 @@ "NeedsCompilation": "no", "Author": "Jennifer Bryan [cre, aut], Hadley Wickham [ctb]", "Maintainer": "Jennifer Bryan ", - "Repository": "https://packagemanager.posit.co/cran/latest", - "Encoding": "UTF-8" + "Repository": "CRAN" }, "censusapi": { "Package": "censusapi", @@ -1048,7 +1047,8 @@ "NeedsCompilation": "yes", "Author": "Brian Ripley [aut, cre, cph], William Venables [cph]", "Maintainer": "Brian Ripley ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest", + "Encoding": "UTF-8" }, "classInt": { "Package": "classInt", @@ -1085,7 +1085,7 @@ "VignetteBuilder": "knitr", "Author": "Roger Bivand [aut, cre] (), Bill Denney [ctb] (), Richard Dunlap [ctb], Diego Hernangómez [ctb] (), Hisaji Ono [ctb], Josiah Parry [ctb] (), Matthieu Stigler [ctb] ()", "Maintainer": "Roger Bivand ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "cli": { "Package": "cli", @@ -1132,7 +1132,7 @@ "NeedsCompilation": "yes", "Author": "Gábor Csárdi [aut, cre], Hadley Wickham [ctb], Kirill Müller [ctb], Salim Brüggemann [ctb] (), Posit Software, PBC [cph, fnd]", "Maintainer": "Gábor Csárdi ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "clipr": { "Package": "clipr", @@ -1235,7 +1235,7 @@ "NeedsCompilation": "no", "Author": "Lionel Henry [aut, cre], Posit Software, PBC [cph, fnd]", "Maintainer": "Lionel Henry ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "cpp11": { "Package": "cpp11", @@ -1316,14 +1316,14 @@ }, "curl": { "Package": "curl", - "Version": "6.4.0", + "Version": "6.3.0", "Source": "Repository", "Type": "Package", "Title": "A Modern and Flexible Web Client for R", "Authors@R": "c( person(\"Jeroen\", \"Ooms\", role = c(\"aut\", \"cre\"), email = \"jeroenooms@gmail.com\", comment = c(ORCID = \"0000-0002-4035-0289\")), person(\"Hadley\", \"Wickham\", role = \"ctb\"), person(\"Posit Software, PBC\", role = \"cph\"))", "Description": "Bindings to 'libcurl' for performing fully configurable HTTP/FTP requests where responses can be processed in memory, on disk, or streaming via the callback or connection interfaces. Some knowledge of 'libcurl' is recommended; for a more-user-friendly web client see the 'httr2' package which builds on this package with http specific tools and logic.", "License": "MIT + file LICENSE", - "SystemRequirements": "libcurl (>= 7.73): libcurl-devel (rpm) or libcurl4-openssl-dev (deb)", + "SystemRequirements": "libcurl (>= 7.62): libcurl-devel (rpm) or libcurl4-openssl-dev (deb)", "URL": "https://jeroen.r-universe.dev/curl", "BugReports": "https://github.com/jeroen/curl/issues", "Suggests": [ @@ -1346,11 +1346,11 @@ "NeedsCompilation": "yes", "Author": "Jeroen Ooms [aut, cre] (ORCID: ), Hadley Wickham [ctb], Posit Software, PBC [cph]", "Maintainer": "Jeroen Ooms ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "data.table": { "Package": "data.table", - "Version": "1.17.8", + "Version": "1.17.4", "Source": "Repository", "Title": "Extension of `data.frame`", "Depends": [ @@ -1380,7 +1380,7 @@ "NeedsCompilation": "yes", "Author": "Tyson Barrett [aut, cre] (ORCID: ), Matt Dowle [aut], Arun Srinivasan [aut], Jan Gorecki [aut], Michael Chirico [aut] (ORCID: ), Toby Hocking [aut] (ORCID: ), Benjamin Schwendinger [aut] (ORCID: ), Ivan Krylov [aut] (ORCID: ), Pasha Stetsenko [ctb], Tom Short [ctb], Steve Lianoglou [ctb], Eduard Antonyan [ctb], Markus Bonsch [ctb], Hugh Parsonage [ctb], Scott Ritchie [ctb], Kun Ren [ctb], Xianying Tan [ctb], Rick Saporta [ctb], Otto Seiskari [ctb], Xianghui Dong [ctb], Michel Lang [ctb], Watal Iwasaki [ctb], Seth Wenchel [ctb], Karl Broman [ctb], Tobias Schmidt [ctb], David Arenburg [ctb], Ethan Smith [ctb], Francois Cocquemas [ctb], Matthieu Gomez [ctb], Philippe Chataignon [ctb], Nello Blaser [ctb], Dmitry Selivanov [ctb], Andrey Riabushenko [ctb], Cheng Lee [ctb], Declan Groves [ctb], Daniel Possenriede [ctb], Felipe Parages [ctb], Denes Toth [ctb], Mus Yaramaz-David [ctb], Ayappan Perumal [ctb], James Sams [ctb], Martin Morgan [ctb], Michael Quinn [ctb], @javrucebo [ctb], @marc-outins [ctb], Roy Storey [ctb], Manish Saraswat [ctb], Morgan Jacob [ctb], Michael Schubmehl [ctb], Davis Vaughan [ctb], Leonardo Silvestri [ctb], Jim Hester [ctb], Anthony Damico [ctb], Sebastian Freundt [ctb], David Simons [ctb], Elliott Sales de Andrade [ctb], Cole Miller [ctb], Jens Peder Meldgaard [ctb], Vaclav Tlapak [ctb], Kevin Ushey [ctb], Dirk Eddelbuettel [ctb], Tony Fischetti [ctb], Ofek Shilon [ctb], Vadim Khotilovich [ctb], Hadley Wickham [ctb], Bennet Becker [ctb], Kyle Haynes [ctb], Boniface Christian Kamgang [ctb], Olivier Delmarcell [ctb], Josh O'Brien [ctb], Dereck de Mezquita [ctb], Michael Czekanski [ctb], Dmitry Shemetov [ctb], Nitish Jha [ctb], Joshua Wu [ctb], Iago Giné-Vázquez [ctb], Anirban Chetia [ctb], Doris Amoakohene [ctb], Angel Feliz [ctb], Michael Young [ctb], Mark Seeto [ctb], Philippe Grosjean [ctb], Vincent Runge [ctb], Christian Wia [ctb], Elise Maigné [ctb], Vincent Rocher [ctb], Vijay Lulla [ctb], Aljaž Sluga [ctb], Bill Evans [ctb]", "Maintainer": "Tyson Barrett ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "dbplyr": { "Package": "dbplyr", @@ -1441,7 +1441,7 @@ "NeedsCompilation": "no", "Author": "Hadley Wickham [aut, cre], Maximilian Girlich [aut], Edgar Ruiz [aut], Posit Software, PBC [cph, fnd]", "Maintainer": "Hadley Wickham ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "desc": { "Package": "desc", @@ -1479,6 +1479,40 @@ "Collate": "'assertions.R' 'authors-at-r.R' 'built.R' 'classes.R' 'collate.R' 'constants.R' 'deps.R' 'desc-package.R' 'description.R' 'encoding.R' 'find-package-root.R' 'latex.R' 'non-oo-api.R' 'package-archives.R' 'read.R' 'remotes.R' 'str.R' 'syntax_checks.R' 'urls.R' 'utils.R' 'validate.R' 'version.R'", "NeedsCompilation": "no", "Author": "Gábor Csárdi [aut, cre], Kirill Müller [aut], Jim Hester [aut], Maëlle Salmon [ctb] (), Posit Software, PBC [cph, fnd]", + "Repository": "https://packagemanager.posit.co/cran/latest" + }, + "diffobj": { + "Package": "diffobj", + "Version": "0.3.6", + "Source": "Repository", + "Type": "Package", + "Title": "Diffs for R Objects", + "Description": "Generate a colorized diff of two R objects for an intuitive visualization of their differences.", + "Authors@R": "c( person( \"Brodie\", \"Gaslam\", email=\"brodie.gaslam@yahoo.com\", role=c(\"aut\", \"cre\")), person( \"Michael B.\", \"Allen\", email=\"ioplex@gmail.com\", role=c(\"ctb\", \"cph\"), comment=\"Original C implementation of Myers Diff Algorithm\"))", + "Depends": [ + "R (>= 3.1.0)" + ], + "License": "GPL-2 | GPL-3", + "URL": "https://github.com/brodieG/diffobj", + "BugReports": "https://github.com/brodieG/diffobj/issues", + "RoxygenNote": "7.2.3", + "VignetteBuilder": "knitr", + "Encoding": "UTF-8", + "Suggests": [ + "knitr", + "rmarkdown" + ], + "Collate": "'capt.R' 'options.R' 'pager.R' 'check.R' 'finalizer.R' 'misc.R' 'html.R' 'styles.R' 's4.R' 'core.R' 'diff.R' 'get.R' 'guides.R' 'hunks.R' 'layout.R' 'myerssimple.R' 'rdiff.R' 'rds.R' 'set.R' 'subset.R' 'summmary.R' 'system.R' 'text.R' 'tochar.R' 'trim.R' 'word.R'", + "Imports": [ + "crayon (>= 1.3.2)", + "tools", + "methods", + "utils", + "stats" + ], + "NeedsCompilation": "yes", + "Author": "Brodie Gaslam [aut, cre], Michael B. Allen [ctb, cph] (Original C implementation of Myers Diff Algorithm)", + "Maintainer": "Brodie Gaslam ", "Repository": "CRAN" }, "digest": { @@ -1658,7 +1692,7 @@ "NeedsCompilation": "no", "Author": "Hadley Wickham [cre, aut], Maximilian Girlich [aut], Mark Fairbanks [aut], Ryan Dickerson [aut], Posit Software, PBC [cph, fnd]", "Maintainer": "Hadley Wickham ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "e1071": { "Package": "e1071", @@ -1797,7 +1831,7 @@ }, "evaluate": { "Package": "evaluate", - "Version": "1.0.4", + "Version": "1.0.3", "Source": "Repository", "Type": "Package", "Title": "Parsing and Evaluation Tools that Provide More Details than the Default", @@ -1816,8 +1850,7 @@ "lattice", "methods", "pkgload", - "ragg (>= 1.4.0)", - "rlang (>= 1.1.5)", + "rlang", "knitr", "testthat (>= 3.0.0)", "withr" @@ -1827,9 +1860,9 @@ "Encoding": "UTF-8", "RoxygenNote": "7.3.2", "NeedsCompilation": "no", - "Author": "Hadley Wickham [aut, cre], Yihui Xie [aut] (ORCID: ), Michael Lawrence [ctb], Thomas Kluyver [ctb], Jeroen Ooms [ctb], Barret Schloerke [ctb], Adam Ryczkowski [ctb], Hiroaki Yutani [ctb], Michel Lang [ctb], Karolis Koncevičius [ctb], Posit Software, PBC [cph, fnd]", + "Author": "Hadley Wickham [aut, cre], Yihui Xie [aut] (), Michael Lawrence [ctb], Thomas Kluyver [ctb], Jeroen Ooms [ctb], Barret Schloerke [ctb], Adam Ryczkowski [ctb], Hiroaki Yutani [ctb], Michel Lang [ctb], Karolis Koncevičius [ctb], Posit Software, PBC [cph, fnd]", "Maintainer": "Hadley Wickham ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "extrafont": { "Package": "extrafont", @@ -1902,7 +1935,7 @@ "NeedsCompilation": "yes", "Author": "Brodie Gaslam [aut, cre], Elliott Sales De Andrade [ctb], R Core Team [cph] (UTF8 byte length calcs from src/util.c)", "Maintainer": "Brodie Gaslam ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "farver": { "Package": "farver", @@ -1945,7 +1978,7 @@ "NeedsCompilation": "yes", "Author": "Winston Chang [aut, cre], Posit Software, PBC [cph, fnd], Tessil [cph] (hopscotch_map library)", "Maintainer": "Winston Chang ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "fontawesome": { "Package": "fontawesome", @@ -1980,7 +2013,7 @@ "NeedsCompilation": "no", "Author": "Richard Iannone [aut, cre] (), Christophe Dervieux [ctb] (), Winston Chang [ctb], Dave Gandy [ctb, cph] (Font-Awesome font), Posit Software, PBC [cph, fnd]", "Maintainer": "Richard Iannone ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "forcats": { "Package": "forcats", @@ -2022,7 +2055,7 @@ "NeedsCompilation": "no", "Author": "Hadley Wickham [aut, cre], RStudio [cph, fnd]", "Maintainer": "Hadley Wickham ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "fs": { "Package": "fs", @@ -2113,7 +2146,7 @@ "NeedsCompilation": "no", "Author": "Jennifer Bryan [aut, cre] (), Craig Citro [aut], Hadley Wickham [aut] (), Google Inc [cph], Posit Software, PBC [cph, fnd]", "Maintainer": "Jennifer Bryan ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "generics": { "Package": "generics", @@ -2263,7 +2296,7 @@ "NeedsCompilation": "yes", "Author": "Kamil Slowikowski [aut, cre] (), Alicia Schep [ctb] (), Sean Hughes [ctb] (), Trung Kien Dang [ctb] (), Saulius Lukauskas [ctb], Jean-Olivier Irisson [ctb] (), Zhian N Kamvar [ctb] (), Thompson Ryan [ctb] (), Dervieux Christophe [ctb] (), Yutani Hiroaki [ctb], Pierre Gramme [ctb], Amir Masoud Abdol [ctb], Malcolm Barrett [ctb] (), Robrecht Cannoodt [ctb] (), Michał Krassowski [ctb] (), Michael Chirico [ctb] (), Pedro Aphalo [ctb] (), Francis Barton [ctb]", "Maintainer": "Kamil Slowikowski ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "glue": { "Package": "glue", @@ -2304,7 +2337,7 @@ "NeedsCompilation": "yes", "Author": "Jim Hester [aut] (), Jennifer Bryan [aut, cre] (), Posit Software, PBC [cph, fnd]", "Maintainer": "Jennifer Bryan ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "googledrive": { "Package": "googledrive", @@ -2354,7 +2387,7 @@ "NeedsCompilation": "no", "Author": "Lucy D'Agostino McGowan [aut], Jennifer Bryan [aut, cre] (), Posit Software, PBC [cph, fnd]", "Maintainer": "Jennifer Bryan ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "googlesheets4": { "Package": "googlesheets4", @@ -2404,7 +2437,7 @@ "NeedsCompilation": "no", "Author": "Jennifer Bryan [cre, aut] (), Posit Software, PBC [cph, fnd]", "Maintainer": "Jennifer Bryan ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "gridExtra": { "Package": "gridExtra", @@ -2434,7 +2467,8 @@ "NeedsCompilation": "no", "Author": "Baptiste Auguie [aut, cre], Anton Antonov [ctb]", "Maintainer": "Baptiste Auguie ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest", + "Encoding": "UTF-8" }, "gtable": { "Package": "gtable", @@ -2474,7 +2508,7 @@ "NeedsCompilation": "no", "Author": "Hadley Wickham [aut], Thomas Lin Pedersen [aut, cre], Posit Software, PBC [cph, fnd]", "Maintainer": "Thomas Lin Pedersen ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "haven": { "Package": "haven", @@ -2523,7 +2557,7 @@ "NeedsCompilation": "yes", "Author": "Hadley Wickham [aut, cre], Evan Miller [aut, cph] (Author of included ReadStat code), Danny Smith [aut], Posit Software, PBC [cph, fnd]", "Maintainer": "Hadley Wickham ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "here": { "Package": "here", @@ -2591,11 +2625,11 @@ "NeedsCompilation": "no", "Author": "Yihui Xie [aut, cre] (), Yixuan Qiu [aut], Christopher Gandrud [ctb], Qiang Li [ctb]", "Maintainer": "Yihui Xie ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "hipread": { "Package": "hipread", - "Version": "0.2.5", + "Version": "0.2.4", "Source": "Repository", "Type": "Package", "Title": "Read Hierarchical Fixed Width Files", @@ -2604,15 +2638,12 @@ "Description": "Read hierarchical fixed width files like those commonly used by many census data providers. Also allows for reading of data in chunks, and reading 'gzipped' files without storing the full file in memory.", "License": "GPL (>= 2) | file LICENSE", "Encoding": "UTF-8", - "Depends": [ - "R (>= 3.0.2)" - ], "LinkingTo": [ - "Rcpp (>= 1.0.12)", + "Rcpp", "BH" ], "Imports": [ - "Rcpp (>= 1.0.12)", + "Rcpp", "R6", "rlang", "tibble" @@ -2622,9 +2653,7 @@ "readr", "testthat" ], - "RoxygenNote": "7.3.2", - "URL": "https://github.com/ipums/hipread", - "BugReports": "https://github.com/ipums/hipread/issues", + "RoxygenNote": "7.2.3", "NeedsCompilation": "yes", "Author": "Greg Freedman Ellis [aut], Derek Burk [aut, cre], Joe Grover [ctb], Mark Padgham [ctb], Hadley Wickham [ctb] (Code adapted from readr), Jim Hester [ctb] (Code adapted from readr), Romain Francois [ctb] (Code adapted from readr), R Core Team [ctb] (Code adapted from readr), RStudio [cph, fnd] (Code adapted from readr), Jukka Jylänki [ctb, cph] (Code adapted from readr), Mikkel Jørgensen [ctb, cph] (Code adapted from readr), University of Minnesota [cph]", "Maintainer": "Derek Burk ", @@ -2746,11 +2775,11 @@ "NeedsCompilation": "no", "Author": "Hadley Wickham [aut, cre], Posit, PBC [cph, fnd]", "Maintainer": "Hadley Wickham ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "httr2": { "Package": "httr2", - "Version": "1.2.0", + "Version": "1.1.2", "Source": "Repository", "Title": "Perform HTTP Requests and Process the Responses", "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(\"Maximilian\", \"Girlich\", role = \"ctb\") )", @@ -2759,11 +2788,11 @@ "URL": "https://httr2.r-lib.org, https://github.com/r-lib/httr2", "BugReports": "https://github.com/r-lib/httr2/issues", "Depends": [ - "R (>= 4.1)" + "R (>= 4.0)" ], "Imports": [ "cli (>= 3.0.0)", - "curl (>= 6.4.0)", + "curl (>= 6.2.1)", "glue", "lifecycle", "magrittr", @@ -2791,20 +2820,20 @@ "rmarkdown", "testthat (>= 3.1.8)", "tibble", - "webfakes (>= 1.4.0)", + "webfakes", "xml2" ], "VignetteBuilder": "knitr", "Config/Needs/website": "tidyverse/tidytemplate", "Config/testthat/edition": "3", "Config/testthat/parallel": "true", - "Config/testthat/start-first": "resp-stream, req-perform", + "Config/testthat/start-first": "multi-req, resp-stream, req-perform", "Encoding": "UTF-8", "RoxygenNote": "7.3.2", "NeedsCompilation": "no", "Author": "Hadley Wickham [aut, cre], Posit Software, PBC [cph, fnd], Maximilian Girlich [ctb]", "Maintainer": "Hadley Wickham ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "ids": { "Package": "ids", @@ -2831,8 +2860,7 @@ "NeedsCompilation": "no", "Author": "Rich FitzJohn [aut, cre]", "Maintainer": "Rich FitzJohn ", - "Repository": "https://packagemanager.posit.co/cran/latest", - "Encoding": "UTF-8" + "Repository": "CRAN" }, "ipumsr": { "Package": "ipumsr", @@ -2975,7 +3003,7 @@ "NeedsCompilation": "no", "Author": "Sam Firke [aut, cre], Bill Denney [ctb], Chris Haid [ctb], Ryan Knight [ctb], Malte Grosser [ctb], Jonathan Zadra [ctb]", "Maintainer": "Sam Firke ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "jquerylib": { "Package": "jquerylib", @@ -3027,7 +3055,7 @@ "Encoding": "UTF-8", "NeedsCompilation": "yes", "Author": "Jeroen Ooms [aut, cre] (), Duncan Temple Lang [ctb], Lloyd Hilaiel [cph] (author of bundled libyajl)", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "knitr": { "Package": "knitr", @@ -3091,7 +3119,7 @@ "NeedsCompilation": "no", "Author": "Yihui Xie [aut, cre] (, https://yihui.org), Abhraneel Sarma [ctb], Adam Vogt [ctb], Alastair Andrew [ctb], Alex Zvoleff [ctb], Amar Al-Zubaidi [ctb], Andre Simon [ctb] (the CSS files under inst/themes/ were derived from the Highlight package http://www.andre-simon.de), Aron Atkins [ctb], Aaron Wolen [ctb], Ashley Manton [ctb], Atsushi Yasumoto [ctb] (), Ben Baumer [ctb], Brian Diggs [ctb], Brian Zhang [ctb], Bulat Yapparov [ctb], Cassio Pereira [ctb], Christophe Dervieux [ctb], David Hall [ctb], David Hugh-Jones [ctb], David Robinson [ctb], Doug Hemken [ctb], Duncan Murdoch [ctb], Elio Campitelli [ctb], Ellis Hughes [ctb], Emily Riederer [ctb], Fabian Hirschmann [ctb], Fitch Simeon [ctb], Forest Fang [ctb], Frank E Harrell Jr [ctb] (the Sweavel package at inst/misc/Sweavel.sty), Garrick Aden-Buie [ctb], Gregoire Detrez [ctb], Hadley Wickham [ctb], Hao Zhu [ctb], Heewon Jeon [ctb], Henrik Bengtsson [ctb], Hiroaki Yutani [ctb], Ian Lyttle [ctb], Hodges Daniel [ctb], Jacob Bien [ctb], Jake Burkhead [ctb], James Manton [ctb], Jared Lander [ctb], Jason Punyon [ctb], Javier Luraschi [ctb], Jeff Arnold [ctb], Jenny Bryan [ctb], Jeremy Ashkenas [ctb, cph] (the CSS file at inst/misc/docco-classic.css), Jeremy Stephens [ctb], Jim Hester [ctb], Joe Cheng [ctb], Johannes Ranke [ctb], John Honaker [ctb], John Muschelli [ctb], Jonathan Keane [ctb], JJ Allaire [ctb], Johan Toloe [ctb], Jonathan Sidi [ctb], Joseph Larmarange [ctb], Julien Barnier [ctb], Kaiyin Zhong [ctb], Kamil Slowikowski [ctb], Karl Forner [ctb], Kevin K. Smith [ctb], Kirill Mueller [ctb], Kohske Takahashi [ctb], Lorenz Walthert [ctb], Lucas Gallindo [ctb], Marius Hofert [ctb], Martin Modrák [ctb], Michael Chirico [ctb], Michael Friendly [ctb], Michal Bojanowski [ctb], Michel Kuhlmann [ctb], Miller Patrick [ctb], Nacho Caballero [ctb], Nick Salkowski [ctb], Niels Richard Hansen [ctb], Noam Ross [ctb], Obada Mahdi [ctb], Pavel N. Krivitsky [ctb] (), Pedro Faria [ctb], Qiang Li [ctb], Ramnath Vaidyanathan [ctb], Richard Cotton [ctb], Robert Krzyzanowski [ctb], Rodrigo Copetti [ctb], Romain Francois [ctb], Ruaridh Williamson [ctb], Sagiru Mati [ctb] (), Scott Kostyshak [ctb], Sebastian Meyer [ctb], Sietse Brouwer [ctb], Simon de Bernard [ctb], Sylvain Rousseau [ctb], Taiyun Wei [ctb], Thibaut Assus [ctb], Thibaut Lamadon [ctb], Thomas Leeper [ctb], Tim Mastny [ctb], Tom Torsney-Weir [ctb], Trevor Davis [ctb], Viktoras Veitas [ctb], Weicheng Zhu [ctb], Wush Wu [ctb], Zachary Foster [ctb], Zhian N. Kamvar [ctb] (), Posit Software, PBC [cph, fnd]", "Maintainer": "Yihui Xie ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "labeling": { "Package": "labeling", @@ -3143,7 +3171,7 @@ "NeedsCompilation": "yes", "Author": "Winston Chang [aut, cre], Joe Cheng [aut], Charlie Gao [aut] (), Posit Software, PBC [cph], Marcus Geelnard [ctb, cph] (TinyCThread library, https://tinycthread.github.io/), Evan Nemerson [ctb, cph] (TinyCThread library, https://tinycthread.github.io/)", "Maintainer": "Winston Chang ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "lattice": { "Package": "lattice", @@ -3308,7 +3336,7 @@ "Collate": "'Dates.r' 'POSIXt.r' 'util.r' 'parse.r' 'timespans.r' 'intervals.r' 'difftimes.r' 'durations.r' 'periods.r' 'accessors-date.R' 'accessors-day.r' 'accessors-dst.r' 'accessors-hour.r' 'accessors-minute.r' 'accessors-month.r' 'accessors-quarter.r' 'accessors-second.r' 'accessors-tz.r' 'accessors-week.r' 'accessors-year.r' 'am-pm.r' 'time-zones.r' 'numeric.r' 'coercion.r' 'constants.r' 'cyclic_encoding.r' 'data.r' 'decimal-dates.r' 'deprecated.r' 'format_ISO8601.r' 'guess.r' 'hidden.r' 'instants.r' 'leap-years.r' 'ops-addition.r' 'ops-compare.r' 'ops-division.r' 'ops-integer-division.r' 'ops-m+.r' 'ops-modulo.r' 'ops-multiplication.r' 'ops-subtraction.r' 'package.r' 'pretty.r' 'round.r' 'stamp.r' 'tzdir.R' 'update.r' 'vctrs.R' 'zzz.R'", "NeedsCompilation": "yes", "Author": "Vitalie Spinu [aut, cre], Garrett Grolemund [aut], Hadley Wickham [aut], Davis Vaughan [ctb], Ian Lyttle [ctb], Imanuel Costigan [ctb], Jason Law [ctb], Doug Mitarotonda [ctb], Joseph Larmarange [ctb], Jonathan Boiser [ctb], Chel Hee Lee [ctb]", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "magrittr": { "Package": "magrittr", @@ -3423,7 +3451,7 @@ "NeedsCompilation": "yes", "Author": "Yihui Xie [aut, cre] (, https://yihui.org), Jeffrey Horner [ctb], Beilei Bian [ctb]", "Maintainer": "Yihui Xie ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "modelr": { "Package": "modelr", @@ -3462,7 +3490,7 @@ "NeedsCompilation": "no", "Author": "Hadley Wickham [aut, cre], Posit Software, PBC [cph, fnd]", "Maintainer": "Hadley Wickham ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "nlme": { "Package": "nlme", @@ -3529,7 +3557,7 @@ "NeedsCompilation": "yes", "Author": "Jeroen Ooms [aut, cre] (ORCID: ), Oliver Keyes [ctb]", "Maintainer": "Jeroen Ooms ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "openxlsx": { "Package": "openxlsx", @@ -3610,7 +3638,7 @@ }, "pillar": { "Package": "pillar", - "Version": "1.11.0", + "Version": "1.10.2", "Source": "Repository", "Title": "Coloured Formatting for Columns", "Authors@R": "c(person(given = \"Kirill\", family = \"M\\u00fcller\", role = c(\"aut\", \"cre\"), email = \"kirill@cynkra.com\", comment = c(ORCID = \"0000-0002-1416-3412\")), person(given = \"Hadley\", family = \"Wickham\", role = \"aut\"), person(given = \"RStudio\", role = \"cph\"))", @@ -3661,8 +3689,47 @@ "Config/gha/extra-packages": "units=?ignore-before-r=4.3.0", "Config/Needs/website": "tidyverse/tidytemplate", "NeedsCompilation": "no", - "Author": "Kirill Müller [aut, cre] (ORCID: ), Hadley Wickham [aut], RStudio [cph]", + "Author": "Kirill Müller [aut, cre] (), Hadley Wickham [aut], RStudio [cph]", "Maintainer": "Kirill Müller ", + "Repository": "https://packagemanager.posit.co/cran/latest" + }, + "pkgbuild": { + "Package": "pkgbuild", + "Version": "1.4.8", + "Source": "Repository", + "Title": "Find Tools Needed to Build R Packages", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", role = \"aut\"), person(\"Jim\", \"Hester\", role = \"aut\"), person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\"), comment = c(ROR = \"03wc8by49\")) )", + "Description": "Provides functions used to build R packages. Locates compilers needed to build R packages on various platforms and ensures the PATH is configured appropriately so R can use them.", + "License": "MIT + file LICENSE", + "URL": "https://github.com/r-lib/pkgbuild, https://pkgbuild.r-lib.org", + "BugReports": "https://github.com/r-lib/pkgbuild/issues", + "Depends": [ + "R (>= 3.5)" + ], + "Imports": [ + "callr (>= 3.2.0)", + "cli (>= 3.4.0)", + "desc", + "processx", + "R6" + ], + "Suggests": [ + "covr", + "cpp11", + "knitr", + "Rcpp", + "rmarkdown", + "testthat (>= 3.2.0)", + "withr (>= 2.3.0)" + ], + "Config/Needs/website": "tidyverse/tidytemplate", + "Config/testthat/edition": "3", + "Config/usethis/last-upkeep": "2025-04-30", + "Encoding": "UTF-8", + "RoxygenNote": "7.3.2", + "NeedsCompilation": "no", + "Author": "Hadley Wickham [aut], Jim Hester [aut], Gábor Csárdi [aut, cre], Posit Software, PBC [cph, fnd] (ROR: )", + "Maintainer": "Gábor Csárdi ", "Repository": "CRAN" }, "pkgconfig": { @@ -3756,6 +3823,55 @@ "NeedsCompilation": "no", "Author": "Hadley Wickham [aut, cre] (ORCID: ), Jay Hesselberth [aut] (ORCID: ), Maëlle Salmon [aut] (ORCID: ), Olivier Roy [aut], Salim Brüggemann [aut] (ORCID: ), Posit Software, PBC [cph, fnd] (ROR: )", "Maintainer": "Hadley Wickham ", + "Repository": "CRAN" + }, + "pkgload": { + "Package": "pkgload", + "Version": "1.4.0", + "Source": "Repository", + "Title": "Simulate Package Installation and Attach", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", role = \"aut\"), person(\"Winston\", \"Chang\", role = \"aut\"), person(\"Jim\", \"Hester\", role = \"aut\"), person(\"Lionel\", \"Henry\", , \"lionel@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(\"R Core team\", role = \"ctb\", comment = \"Some namespace and vignette code extracted from base R\") )", + "Description": "Simulates the process of installing a package and then attaching it. This is a key part of the 'devtools' package as it allows you to rapidly iterate while developing a package.", + "License": "GPL-3", + "URL": "https://github.com/r-lib/pkgload, https://pkgload.r-lib.org", + "BugReports": "https://github.com/r-lib/pkgload/issues", + "Depends": [ + "R (>= 3.4.0)" + ], + "Imports": [ + "cli (>= 3.3.0)", + "desc", + "fs", + "glue", + "lifecycle", + "methods", + "pkgbuild", + "processx", + "rlang (>= 1.1.1)", + "rprojroot", + "utils", + "withr (>= 2.4.3)" + ], + "Suggests": [ + "bitops", + "jsonlite", + "mathjaxr", + "pak", + "Rcpp", + "remotes", + "rstudioapi", + "testthat (>= 3.2.1.1)", + "usethis" + ], + "Config/Needs/website": "tidyverse/tidytemplate, ggplot2", + "Config/testthat/edition": "3", + "Config/testthat/parallel": "TRUE", + "Config/testthat/start-first": "dll", + "Encoding": "UTF-8", + "RoxygenNote": "7.3.1", + "NeedsCompilation": "no", + "Author": "Hadley Wickham [aut], Winston Chang [aut], Jim Hester [aut], Lionel Henry [aut, cre], Posit Software, PBC [cph, fnd], R Core team [ctb] (Some namespace and vignette code extracted from base R)", + "Maintainer": "Lionel Henry ", "Repository": "https://packagemanager.posit.co/cran/latest" }, "plogr": { @@ -3817,6 +3933,26 @@ "Maintainer": "Hadley Wickham ", "Repository": "https://packagemanager.posit.co/cran/latest" }, + "praise": { + "Package": "praise", + "Version": "1.0.0", + "Source": "Repository", + "Title": "Praise Users", + "Author": "Gabor Csardi, Sindre Sorhus", + "Maintainer": "Gabor Csardi ", + "Description": "Build friendly R packages that praise their users if they have done something good, or they just need it to feel better.", + "License": "MIT + file LICENSE", + "LazyData": "true", + "URL": "https://github.com/gaborcsardi/praise", + "BugReports": "https://github.com/gaborcsardi/praise/issues", + "Suggests": [ + "testthat" + ], + "Collate": "'adjective.R' 'adverb.R' 'exclamation.R' 'verb.R' 'rpackage.R' 'package.R'", + "NeedsCompilation": "no", + "Repository": "https://packagemanager.posit.co/cran/latest", + "Encoding": "UTF-8" + }, "prettyunits": { "Package": "prettyunits", "Version": "1.2.0", @@ -3983,8 +4119,7 @@ "NeedsCompilation": "yes", "Author": "David Meyer [aut, cre], Christian Buchta [aut]", "Maintainer": "David Meyer ", - "Repository": "https://packagemanager.posit.co/cran/latest", - "Encoding": "UTF-8" + "Repository": "CRAN" }, "ps": { "Package": "ps", @@ -4027,16 +4162,16 @@ }, "purrr": { "Package": "purrr", - "Version": "1.1.0", + "Version": "1.0.4", "Source": "Repository", "Title": "Functional Programming Tools", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Lionel\", \"Henry\", , \"lionel@posit.co\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\"), comment = c(ROR = \"https://ror.org/03wc8by49\")) )", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Lionel\", \"Henry\", , \"lionel@posit.co\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\"), comment = c(ROR = \"03wc8by49\")) )", "Description": "A complete and consistent functional programming toolkit for R.", "License": "MIT + file LICENSE", "URL": "https://purrr.tidyverse.org/, https://github.com/tidyverse/purrr", "BugReports": "https://github.com/tidyverse/purrr/issues", "Depends": [ - "R (>= 4.1)" + "R (>= 4.0)" ], "Imports": [ "cli (>= 3.6.1)", @@ -4046,13 +4181,11 @@ "vctrs (>= 0.6.3)" ], "Suggests": [ - "carrier (>= 0.2.0)", "covr", "dplyr (>= 0.7.8)", "httr", "knitr", "lubridate", - "mirai (>= 2.4.0)", "rmarkdown", "testthat (>= 3.0.0)", "tibble", @@ -4070,9 +4203,9 @@ "Encoding": "UTF-8", "RoxygenNote": "7.3.2", "NeedsCompilation": "yes", - "Author": "Hadley Wickham [aut, cre] (ORCID: ), Lionel Henry [aut], Posit Software, PBC [cph, fnd] (ROR: )", + "Author": "Hadley Wickham [aut, cre] (), Lionel Henry [aut], Posit Software, PBC [cph, fnd] (03wc8by49)", "Maintainer": "Hadley Wickham ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "qpdf": { "Package": "qpdf", @@ -4102,7 +4235,7 @@ "NeedsCompilation": "yes", "Author": "Jeroen Ooms [aut, cre] (ORCID: ), Ben Raymond [ctb], Jay Berkenbilt [cph] (Author of libqpdf)", "Maintainer": "Jeroen Ooms ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "ragg": { "Package": "ragg", @@ -4138,7 +4271,7 @@ "Config/build/compilation-database": "true", "NeedsCompilation": "yes", "Author": "Thomas Lin Pedersen [cre, aut] (), Maxim Shemanarev [aut, cph] (Author of AGG), Tony Juricic [ctb, cph] (Contributor to AGG), Milan Marusinec [ctb, cph] (Contributor to AGG), Spencer Garrett [ctb] (Contributor to AGG), Posit, PBC [cph, fnd]", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "rappdirs": { "Package": "rappdirs", @@ -4263,7 +4396,7 @@ "NeedsCompilation": "yes", "Author": "Hadley Wickham [aut] (), Jennifer Bryan [aut, cre] (), Posit, PBC [cph, fnd] (Copyright holder of all R code and all C/C++ code without explicit copyright attribution), Marcin Kalicinski [ctb, cph] (Author of included RapidXML code), Komarov Valery [ctb, cph] (Author of included libxls code), Christophe Leitienne [ctb, cph] (Author of included libxls code), Bob Colbert [ctb, cph] (Author of included libxls code), David Hoerl [ctb, cph] (Author of included libxls code), Evan Miller [ctb, cph] (Author of included libxls code)", "Maintainer": "Jennifer Bryan ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "rematch": { "Package": "rematch", @@ -4283,7 +4416,7 @@ ], "Encoding": "UTF-8", "NeedsCompilation": "no", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "rematch2": { "Package": "rematch2", @@ -4308,7 +4441,7 @@ "NeedsCompilation": "no", "Author": "Gábor Csárdi [aut, cre], Matthew Lincoln [ctb]", "Maintainer": "Gábor Csárdi ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "renv": { "Package": "renv", @@ -4446,7 +4579,7 @@ "VignetteBuilder": "knitr", "URL": "https://github.com/dylan-turner25/rfema", "BugReports": "https://github.com/dylan-turner25/rfema/issues", - "Author": "Dylan Turner [aut, cre] (ORCID: ), François Michonneau [rev, ctb] (reviewed the package for rOpenSci and patched several bugs in the prelease code, see https://github.com/ropensci/software-review/issues/484.), Marcus Beck [rev, ctb] (reviewed the package for rOpenSci, see https://github.com/ropensci/software-review/issues/484.)", + "Author": "Dylan Turner [aut, cre] (), François Michonneau [rev, ctb] (reviewed the package for rOpenSci and patched several bugs in the prelease code, see https://github.com/ropensci/software-review/issues/484.), Marcus Beck [rev, ctb] (reviewed the package for rOpenSci, see https://github.com/ropensci/software-review/issues/484.)", "Maintainer": "Dylan Turner ", "RemoteType": "github", "RemoteHost": "api.github.com", @@ -4560,11 +4693,11 @@ "NeedsCompilation": "no", "Author": "JJ Allaire [aut], Yihui Xie [aut, cre] (), Christophe Dervieux [aut] (), Jonathan McPherson [aut], Javier Luraschi [aut], Kevin Ushey [aut], Aron Atkins [aut], Hadley Wickham [aut], Joe Cheng [aut], Winston Chang [aut], Richard Iannone [aut] (), Andrew Dunning [ctb] (), Atsushi Yasumoto [ctb, cph] (, Number sections Lua filter), Barret Schloerke [ctb], Carson Sievert [ctb] (), Devon Ryan [ctb] (), Frederik Aust [ctb] (), Jeff Allen [ctb], JooYoung Seo [ctb] (), Malcolm Barrett [ctb], Rob Hyndman [ctb], Romain Lesur [ctb], Roy Storey [ctb], Ruben Arslan [ctb], Sergio Oller [ctb], Posit Software, PBC [cph, fnd], jQuery UI contributors [ctb, cph] (jQuery UI library; authors listed in inst/rmd/h/jqueryui/AUTHORS.txt), Mark Otto [ctb] (Bootstrap library), Jacob Thornton [ctb] (Bootstrap library), Bootstrap contributors [ctb] (Bootstrap library), Twitter, Inc [cph] (Bootstrap library), Alexander Farkas [ctb, cph] (html5shiv library), Scott Jehl [ctb, cph] (Respond.js library), Ivan Sagalaev [ctb, cph] (highlight.js library), Greg Franko [ctb, cph] (tocify library), John MacFarlane [ctb, cph] (Pandoc templates), Google, Inc. [ctb, cph] (ioslides library), Dave Raggett [ctb] (slidy library), W3C [cph] (slidy library), Dave Gandy [ctb, cph] (Font-Awesome), Ben Sperry [ctb] (Ionicons), Drifty [cph] (Ionicons), Aidan Lister [ctb, cph] (jQuery StickyTabs), Benct Philip Jonsson [ctb, cph] (pagebreak Lua filter), Albert Krewinkel [ctb, cph] (pagebreak Lua filter)", "Maintainer": "Yihui Xie ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "rprojroot": { "Package": "rprojroot", - "Version": "2.1.0", + "Version": "2.0.4", "Source": "Repository", "Title": "Finding Files in Project Subdirectories", "Authors@R": "person(given = \"Kirill\", family = \"M\\u00fcller\", role = c(\"aut\", \"cre\"), email = \"kirill@cynkra.com\", comment = c(ORCID = \"0000-0002-1416-3412\"))", @@ -4579,20 +4712,18 @@ "covr", "knitr", "lifecycle", + "mockr", "rlang", "rmarkdown", - "testthat (>= 3.2.0)", + "testthat (>= 3.0.0)", "withr" ], "VignetteBuilder": "knitr", "Config/testthat/edition": "3", "Encoding": "UTF-8", - "RoxygenNote": "7.3.2.9000", - "Config/autostyle/scope": "line_breaks", - "Config/autostyle/strict": "true", - "Config/Needs/website": "tidyverse/tidytemplate", + "RoxygenNote": "7.2.3", "NeedsCompilation": "no", - "Author": "Kirill Müller [aut, cre] (ORCID: )", + "Author": "Kirill Müller [aut, cre] ()", "Maintainer": "Kirill Müller ", "Repository": "CRAN" }, @@ -4704,7 +4835,7 @@ "NeedsCompilation": "yes", "Author": "Dewey Dunnington [aut] (ORCID: ), Edzer Pebesma [aut, cre] (ORCID: ), Ege Rubak [aut], Jeroen Ooms [ctb] (configure script), Google, Inc. [cph] (Original s2geometry.io source code)", "Maintainer": "Edzer Pebesma ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "sass": { "Package": "sass", @@ -4784,7 +4915,7 @@ "NeedsCompilation": "no", "Author": "Hadley Wickham [aut], Thomas Lin Pedersen [cre, aut] (), Dana Seidel [aut], Posit Software, PBC [cph, fnd] (03wc8by49)", "Maintainer": "Thomas Lin Pedersen ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "segregation": { "Package": "segregation", @@ -4942,7 +5073,36 @@ "NeedsCompilation": "yes", "Author": "Edzer Pebesma [aut, cre] (ORCID: ), Roger Bivand [ctb] (ORCID: ), Etienne Racine [ctb], Michael Sumner [ctb], Ian Cook [ctb], Tim Keitt [ctb], Robin Lovelace [ctb], Hadley Wickham [ctb], Jeroen Ooms [ctb] (ORCID: ), Kirill Müller [ctb], Thomas Lin Pedersen [ctb], Dan Baston [ctb], Dewey Dunnington [ctb] (ORCID: )", "Maintainer": "Edzer Pebesma ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" + }, + "sfarrow": { + "Package": "sfarrow", + "Version": "0.4.1", + "Source": "Repository", + "Title": "Read/Write Simple Feature Objects ('sf') with 'Apache' 'Arrow'", + "Date": "2021-10-25", + "Authors@R": "person(given = \"Chris\", family = \"Jochem\", role = c(\"aut\", \"cre\"), email = \"w.c.jochem@soton.ac.uk\", comment = c(ORCID = \"0000-0003-2192-5988\"))", + "Description": "Support for reading/writing simple feature ('sf') spatial objects from/to 'Parquet' files. 'Parquet' files are an open-source, column-oriented data storage format from Apache (), now popular across programming languages. This implementation converts simple feature list geometries into well-known binary format for use by 'arrow', and coordinate reference system information is maintained in a standard metadata format.", + "License": "MIT + file LICENSE", + "URL": "https://github.com/wcjochem/sfarrow, https://wcjochem.github.io/sfarrow/", + "BugReports": "https://github.com/wcjochem/sfarrow/issues", + "Encoding": "UTF-8", + "RoxygenNote": "7.1.1", + "Imports": [ + "sf", + "arrow", + "jsonlite", + "dplyr" + ], + "Suggests": [ + "knitr", + "rmarkdown" + ], + "VignetteBuilder": "knitr", + "NeedsCompilation": "no", + "Author": "Chris Jochem [aut, cre] ()", + "Maintainer": "Chris Jochem ", + "Repository": "CRAN" }, "snakecase": { "Package": "snakecase", @@ -5051,7 +5211,7 @@ "NeedsCompilation": "no", "Author": "Hadley Wickham [aut, cre, cph], Posit Software, PBC [cph, fnd]", "Maintainer": "Hadley Wickham ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "sys": { "Package": "sys", @@ -5122,6 +5282,65 @@ "Maintainer": "Thomas Lin Pedersen ", "Repository": "CRAN" }, + "testthat": { + "Package": "testthat", + "Version": "3.2.3", + "Source": "Repository", + "Title": "Unit Testing for R", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(\"R Core team\", role = \"ctb\", comment = \"Implementation of utils::recover()\") )", + "Description": "Software testing is important, but, in part because it is frustrating and boring, many of us avoid it. 'testthat' is a testing framework for R that is easy to learn and use, and integrates with your existing 'workflow'.", + "License": "MIT + file LICENSE", + "URL": "https://testthat.r-lib.org, https://github.com/r-lib/testthat", + "BugReports": "https://github.com/r-lib/testthat/issues", + "Depends": [ + "R (>= 3.6.0)" + ], + "Imports": [ + "brio (>= 1.1.3)", + "callr (>= 3.7.3)", + "cli (>= 3.6.1)", + "desc (>= 1.4.2)", + "digest (>= 0.6.33)", + "evaluate (>= 1.0.1)", + "jsonlite (>= 1.8.7)", + "lifecycle (>= 1.0.3)", + "magrittr (>= 2.0.3)", + "methods", + "pkgload (>= 1.3.2.1)", + "praise (>= 1.0.0)", + "processx (>= 3.8.2)", + "ps (>= 1.7.5)", + "R6 (>= 2.5.1)", + "rlang (>= 1.1.1)", + "utils", + "waldo (>= 0.6.0)", + "withr (>= 3.0.2)" + ], + "Suggests": [ + "covr", + "curl (>= 0.9.5)", + "diffviewer (>= 0.1.0)", + "knitr", + "rmarkdown", + "rstudioapi", + "S7", + "shiny", + "usethis", + "vctrs (>= 0.1.0)", + "xml2" + ], + "VignetteBuilder": "knitr", + "Config/Needs/website": "tidyverse/tidytemplate", + "Config/testthat/edition": "3", + "Config/testthat/parallel": "true", + "Config/testthat/start-first": "watcher, parallel*", + "Encoding": "UTF-8", + "RoxygenNote": "7.3.2", + "NeedsCompilation": "yes", + "Author": "Hadley Wickham [aut, cre], Posit Software, PBC [cph, fnd], R Core team [ctb] (Implementation of utils::recover())", + "Maintainer": "Hadley Wickham ", + "Repository": "CRAN" + }, "textshaping": { "Package": "textshaping", "Version": "1.0.1", @@ -5164,7 +5383,7 @@ "NeedsCompilation": "yes", "Author": "Thomas Lin Pedersen [cre, aut] (ORCID: ), Posit Software, PBC [cph, fnd] (ROR: )", "Maintainer": "Thomas Lin Pedersen ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "tibble": { "Package": "tibble", @@ -5276,7 +5495,7 @@ "NeedsCompilation": "no", "Author": "Kyle Walker [aut, cre], Matt Herman [aut], Kris Eberwein [ctb]", "Maintainer": "Kyle Walker ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "tidyr": { "Package": "tidyr", @@ -5326,7 +5545,7 @@ "NeedsCompilation": "yes", "Author": "Hadley Wickham [aut, cre], Davis Vaughan [aut], Maximilian Girlich [aut], Kevin Ushey [ctb], Posit Software, PBC [cph, fnd]", "Maintainer": "Hadley Wickham ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "tidyselect": { "Package": "tidyselect", @@ -5468,7 +5687,7 @@ "NeedsCompilation": "no", "Author": "Hadley Wickham [aut, cre], RStudio [cph, fnd]", "Maintainer": "Hadley Wickham ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "tigris": { "Package": "tigris", @@ -5689,7 +5908,7 @@ "RemoteUsername": "UI-Research", "RemoteRepo": "urbnindicators", "RemoteRef": "main", - "RemoteSha": "48d93cdabd4d855cfe7a0c6b38dda3f4401bae1a", + "RemoteSha": "c9392a008dc577ea3ff3c30b28b6878119bc8d42", "RemoteHost": "api.github.com", "Remotes": "UrbanInstitute/urbnthemes" }, @@ -5787,7 +6006,8 @@ "URL": "https://www.rforge.net/uuid", "BugReports": "https://github.com/s-u/uuid/issues", "NeedsCompilation": "yes", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest", + "Encoding": "UTF-8" }, "vctrs": { "Package": "vctrs", @@ -5934,6 +6154,43 @@ "Maintainer": "Jennifer Bryan ", "Repository": "CRAN" }, + "waldo": { + "Package": "waldo", + "Version": "0.6.1", + "Source": "Repository", + "Title": "Find Differences Between R Objects", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "Compare complex R objects and reveal the key differences. Designed particularly for use in testing packages where being able to quickly isolate key differences makes understanding test failures much easier.", + "License": "MIT + file LICENSE", + "URL": "https://waldo.r-lib.org, https://github.com/r-lib/waldo", + "BugReports": "https://github.com/r-lib/waldo/issues", + "Depends": [ + "R (>= 4.0)" + ], + "Imports": [ + "cli", + "diffobj (>= 0.3.4)", + "glue", + "methods", + "rlang (>= 1.1.0)" + ], + "Suggests": [ + "bit64", + "R6", + "S7", + "testthat (>= 3.0.0)", + "withr", + "xml2" + ], + "Config/Needs/website": "tidyverse/tidytemplate", + "Config/testthat/edition": "3", + "Encoding": "UTF-8", + "RoxygenNote": "7.3.2", + "NeedsCompilation": "no", + "Author": "Hadley Wickham [aut, cre], Posit Software, PBC [cph, fnd]", + "Maintainer": "Hadley Wickham ", + "Repository": "https://packagemanager.posit.co/cran/latest" + }, "whisker": { "Package": "whisker", "Version": "0.4.1", @@ -5990,7 +6247,7 @@ "NeedsCompilation": "no", "Author": "Jim Hester [aut], Lionel Henry [aut, cre], Kirill Müller [aut], Kevin Ushey [aut], Hadley Wickham [aut], Winston Chang [aut], Jennifer Bryan [ctb], Richard Cotton [ctb], Posit Software, PBC [cph, fnd]", "Maintainer": "Lionel Henry ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "wk": { "Package": "wk", @@ -6066,7 +6323,7 @@ "NeedsCompilation": "yes", "Author": "Yihui Xie [aut, cre, cph] (, https://yihui.org), Wush Wu [ctb], Daijiang Li [ctb], Xianying Tan [ctb], Salim Brüggemann [ctb] (), Christophe Dervieux [ctb]", "Maintainer": "Yihui Xie ", - "Repository": "CRAN" + "Repository": "https://packagemanager.posit.co/cran/latest" }, "xml2": { "Package": "xml2", @@ -6155,7 +6412,7 @@ "NeedsCompilation": "no", "Author": "Nathan Teetor [aut, cre], Paul Teetor [ctb]", "Maintainer": "Nathan Teetor ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" }, "zip": { "Package": "zip", @@ -6183,7 +6440,7 @@ "NeedsCompilation": "yes", "Author": "Gábor Csárdi [aut, cre], Kuba Podgórski [ctb], Rich Geldreich [ctb], Posit Software, PBC [cph, fnd] (ROR: )", "Maintainer": "Gábor Csárdi ", - "Repository": "https://packagemanager.posit.co/cran/latest" + "Repository": "CRAN" } } } diff --git a/tests/testthat.R b/tests/testthat.R new file mode 100644 index 0000000..17ae5ff --- /dev/null +++ b/tests/testthat.R @@ -0,0 +1,4 @@ +library(testthat) +library(climateapi) + +test_check("climateapi") diff --git a/tests/testthat/test-get_wildfire_burn_zones.R b/tests/testthat/test-get_wildfire_burn_zones.R new file mode 100644 index 0000000..c9daa44 --- /dev/null +++ b/tests/testthat/test-get_wildfire_burn_zones.R @@ -0,0 +1,137 @@ +test_that("get_wildfire_burn_zones errors when file path does not exist", { + expect_error( + get_wildfire_burn_zones(file_path = "/nonexistent/path/to/file.geojson"), + "does not point to a valid file" + ) +}) + +test_that("get_wildfire_burn_zones returns expected structure", { + skip_if_not( + file.exists(file.path( + get_box_path(), "hazards", "other-sources", "wildfire-burn-zones", + "wfbz_disasters_2000-2025.geojson")), + message = "Wildfire burn zones data file not available" + ) + + result <- suppressMessages(get_wildfire_burn_zones()) + + expect_s3_class(result, "sf") + + expected_columns <- c( + "wildfire_id", "id_fema", "year", "wildfire_name", + "state_fips", "county_fips", "county_name", + "area_sq_km", "wildfire_complex_binary", + "date_start", "date_containment", + "fatalities_total", "injuries_total", + "structures_destroyed", "structures_threatened", + "evacuation_total", "wui_type", + "density_people_sq_km_wildfire_buffer", "geometry" + ) + expect_true(all(expected_columns %in% names(result))) +}) + +test_that("get_wildfire_burn_zones has correct CRS", { + skip_if_not( + file.exists(file.path( + get_box_path(), "hazards", "other-sources", "wildfire-burn-zones", + "wfbz_disasters_2000-2025.geojson")), + message = "Wildfire burn zones data file not available" + ) + + result <- suppressMessages(get_wildfire_burn_zones()) + + expect_equal(sf::st_crs(result)$epsg, 5070) +}) + +test_that("get_wildfire_burn_zones has one county per row", { + skip_if_not( + file.exists(file.path( + get_box_path(), "hazards", "other-sources", "wildfire-burn-zones", + "wfbz_disasters_2000-2025.geojson")), + message = "Wildfire burn zones data file not available" + ) + + result <- suppressMessages(get_wildfire_burn_zones()) + + expect_false(any(stringr::str_detect(result$county_fips, "\\|"), na.rm = TRUE)) + expect_false(any(stringr::str_detect(result$county_name, "\\|"), na.rm = TRUE)) +}) + +test_that("get_wildfire_burn_zones has valid FIPS codes", { + skip_if_not( + file.exists(file.path( + get_box_path(), "hazards", "other-sources", "wildfire-burn-zones", + "wfbz_disasters_2000-2025.geojson")), + message = "Wildfire burn zones data file not available" + ) + + result <- suppressMessages(get_wildfire_burn_zones()) + + non_na_state_fips <- result$state_fips[!is.na(result$state_fips)] + non_na_county_fips <- result$county_fips[!is.na(result$county_fips)] + + expect_true(all(stringr::str_length(non_na_state_fips) == 2)) + expect_true(all(stringr::str_length(non_na_county_fips) == 5)) + expect_true(all(result$state_fips == stringr::str_sub(result$county_fips, 1, 2), na.rm = TRUE)) +}) + +test_that("get_wildfire_burn_zones county names are title case", { + skip_if_not( + file.exists(file.path( + get_box_path(), "hazards", "other-sources", "wildfire-burn-zones", + "wfbz_disasters_2000-2025.geojson")), + message = "Wildfire burn zones data file not available" + ) + + result <- suppressMessages(get_wildfire_burn_zones()) + + non_na_names <- result$county_name[!is.na(result$county_name)] + expect_equal(non_na_names, stringr::str_to_title(non_na_names)) +}) + +test_that("get_wildfire_burn_zones emits expected message", { + skip_if_not( + file.exists(file.path( + get_box_path(), "hazards", "other-sources", "wildfire-burn-zones", + "wfbz_disasters_2000-2025.geojson")), + message = "Wildfire burn zones data file not available" + ) + + expect_message( + get_wildfire_burn_zones(), + stringr::str_c( + "Each observation represents a county affected by a wildfire burn zone disaster. ", + "Wildfires spanning multiple counties appear as multiple rows. ", + "Disasters are defined as wildfires that burned near a community and resulted in ", + "at least one civilian fatality, one destroyed structure, or received federal disaster relief. ", + "Geometries represent burn zone perimeters sourced from FIRED, MTBS, or NIFC datasets.") + ) +}) + +test_that("get_wildfire_burn_zones year column is integer", { + skip_if_not( + file.exists(file.path( + get_box_path(), "hazards", "other-sources", "wildfire-burn-zones", + "wfbz_disasters_2000-2025.geojson")), + message = "Wildfire burn zones data file not available" + ) + + result <- suppressMessages(get_wildfire_burn_zones()) + + expect_type(result$year, "integer") + expect_true(all(result$year >= 2000 & result$year <= 2025, na.rm = TRUE)) +}) + +test_that("get_wildfire_burn_zones date columns are Date class", { + skip_if_not( + file.exists(file.path( + get_box_path(), "hazards", "other-sources", "wildfire-burn-zones", + "wfbz_disasters_2000-2025.geojson")), + message = "Wildfire burn zones data file not available" + ) + + result <- suppressMessages(get_wildfire_burn_zones()) + + expect_s3_class(result$date_start, "Date") + expect_s3_class(result$date_containment, "Date") +}) diff --git a/vignettes/get_sheldus.Rmd b/vignettes/get_sheldus.Rmd new file mode 100644 index 0000000..f8add1b --- /dev/null +++ b/vignettes/get_sheldus.Rmd @@ -0,0 +1,222 @@ +--- +title: "SHELDUS Hazard Data" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{SHELDUS Hazard Data} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} +--- + +```{r, include = FALSE} +knitr::opts_chunk$set( + collapse = TRUE, + comment = "#>", + warning = FALSE, + message = FALSE, + fig.width = 8, + fig.height = 6, + dpi = 150 +) +``` + +## Overview + +The `get_sheldus()` function provides access to county-level hazard event data from the Spatial Hazard Events and Losses Database for the United States (SHELDUS). This database tracks property damage, crop damage, fatalities, and injuries from natural hazards across all US counties. + +## Data source + +SHELDUS is maintained by Arizona State University's Center for Emergency Management and Homeland Security. The database compiles hazard event data from multiple sources including NOAA Storm Events, the National Climatic Data Center, and other federal agencies. + +Access to SHELDUS requires a subscription. See [https://cemhs.asu.edu/sheldus](https://cemhs.asu.edu/sheldus) for more information. + +## Loading the data + +```{r setup} +library(climateapi) +library(tidyverse) +library(sf) +library(urbnthemes) + +set_urbn_defaults(style = "print") +``` + +```{r load-data, eval = FALSE} +sheldus <- get_sheldus() +``` + +```{r load-data-hidden, echo = FALSE} +sheldus <- get_sheldus() +``` + +## Data structure + +Each row represents a unique combination of county, year, month, and hazard type. Only county-month-hazard combinations with recorded events are included. + +```{r structure} +glimpse(sheldus) +``` + +Key variables include: + +- `unique_id`: Unique identifier for each observation +- `GEOID`: Five-digit county FIPS code +- `year` and `month`: Temporal identifiers +- `hazard`: Type of hazard event (e.g., "Flooding", "Hurricane/Tropical Storm") +- `damage_property`: Property damage in 2023 inflation-adjusted dollars +- `damage_crop`: Crop damage in 2023 inflation-adjusted dollars +- `fatalities` and `injuries`: Human impacts +- `records`: Number of individual events aggregated into the observation + +## Example analyses + +### Hazard types in the database + +```{r hazard-types} +sheldus |> + distinct(hazard) |> + arrange(hazard) |> + pull(hazard) +``` + +### Annual property damage by hazard type + +```{r annual-damage} +df1 <- sheldus |> + summarize( + .by = c(year, hazard), + total_damage = sum(damage_property, na.rm = TRUE) / 1e9) + +top_hazards <- df1 |> + summarize( + .by = hazard, + total = sum(total_damage, na.rm = TRUE)) |> + slice_max(total, n = 5) |> + pull(hazard) + +df1 |> + filter(hazard %in% top_hazards) |> + ggplot(aes(x = year, y = total_damage, fill = hazard)) + + geom_col() + + scale_fill_manual(values = c( + palette_urbn_main[1:4], palette_urbn_cyan[5])) + + labs( + title = "Annual Property Damage by Hazard Type", + subtitle = "Top 5 hazard types by total damage, 2023 dollars", + x = "", + y = "Property damage (billions)", + fill = "Hazard type") +``` + +### Geographic distribution of flood damage + +```{r flood-damage-map, fig.height = 7} +flood_damage <- sheldus |> + filter(hazard == "Flooding") |> + summarize( + .by = GEOID, + total_damage = sum(damage_property, na.rm = TRUE)) + +counties_sf <- tigris::counties(cb = TRUE, year = 2022, progress_bar = FALSE) |> + st_transform(5070) |> + filter(!STATEFP %in% c("02", "15", "72", "78", "66", "60", "69")) + +flood_map <- counties_sf |> + left_join(flood_damage, by = "GEOID") |> + mutate( + damage_category = case_when( + is.na(total_damage) | total_damage == 0 ~ "No recorded damage", + total_damage < 1e6 ~ "< $1M", + total_damage < 10e6 ~ "$1M - $10M", + total_damage < 100e6 ~ "$10M - $100M", + TRUE ~ "> $100M"), + damage_category = factor( + damage_category, + levels = c("No recorded damage", "< $1M", "$1M - $10M", "$10M - $100M", "> $100M"))) + +ggplot(flood_map) + + geom_sf(aes(fill = damage_category), color = NA) + + scale_fill_manual(values = c( + "No recorded damage" = "grey90", + "< $1M" = palette_urbn_cyan[1], + "$1M - $10M" = palette_urbn_cyan[3], + "$10M - $100M" = palette_urbn_cyan[5], + "> $100M" = palette_urbn_cyan[7])) + + labs( + title = "Cumulative Flood Damage by County", + subtitle = "Total property damage from flooding events, 2023 dollars", + fill = "Total damage") + + theme_urbn_map() +``` + +### Seasonal patterns in hazard events + +```{r seasonal-patterns} +seasonal <- sheldus |> + filter(hazard %in% top_hazards) |> + summarize( + .by = c(month, hazard), + total_events = sum(records, na.rm = TRUE)) + +ggplot(seasonal, aes(x = month, y = total_events, color = hazard)) + + geom_line(linewidth = 1) + + geom_point(size = 2) + + scale_x_continuous(breaks = 1:12, labels = month.abb) + + scale_color_manual(values = c( + palette_urbn_main[1:4], palette_urbn_cyan[5])) + + labs( + title = "Seasonal Distribution of Hazard Events", + subtitle = "Total event count by month across all years", + x = "", + y = "Number of events", + color = "Hazard type") +``` + +### Counties with highest fatalities + +```{r fatalities} +high_fatality_counties <- sheldus |> + summarize( + .by = c(GEOID, state_name, county_name), + total_fatalities = sum(fatalities, na.rm = TRUE)) |> + slice_max(total_fatalities, n = 15) + +high_fatality_counties |> + mutate( + county_label = str_c(county_name, ", ", state_name), + county_label = fct_reorder(county_label, total_fatalities)) |> + ggplot(aes(y = county_label, x = total_fatalities)) + + geom_col() + + labs( + title = "Counties with Highest Hazard-Related Fatalities", + x = "Total fatalities", + y = "") +``` + +### Linking with census data + +The county-level structure makes it straightforward to join with demographic data. + +```{r demographics, eval = FALSE} +county_demographics <- tidycensus::get_acs( + geography = "county", + variables = c( + median_income = "B19013_001", + total_pop = "B01003_001"), + year = 2022, + output = "wide") + +county_hazard_summary <- sheldus |> + filter(year >= 2018) |> + summarize( + .by = GEOID, + total_damage = sum(damage_property, na.rm = TRUE), + total_events = sum(records, na.rm = TRUE)) |> + left_join(county_demographics, by = "GEOID") |> + mutate(damage_per_capita = total_damage / total_popE) +``` + +## See also + +- `get_fema_disaster_declarations()`: FEMA disaster declarations +- `get_nfip_claims()`: National Flood Insurance Program claims data +- `get_wildfire_burn_zones()`: Wildfire burn zone disasters diff --git a/vignettes/get_structures.Rmd b/vignettes/get_structures.Rmd new file mode 100644 index 0000000..1577b18 --- /dev/null +++ b/vignettes/get_structures.Rmd @@ -0,0 +1,229 @@ +--- +title: "USA Structures Data" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{USA Structures Data} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} +--- + +```{r, include = FALSE} +knitr::opts_chunk$set( + collapse = TRUE, + comment = "#>", + warning = FALSE, + message = FALSE, + fig.width = 8, + fig.height = 6, + dpi = 150 +) +``` + +## Overview + +The `get_structures()` function retrieves building footprint data from the USA Structures dataset and summarizes structure counts by type at the tract or county level. This is useful for estimating the number and types of buildings within areas affected by natural hazards. + +## Data source + +Data are sourced from the USA Structures dataset maintained by the Department of Homeland Security. The dataset contains building footprints derived from high-resolution imagery for structures across the United States. + +See [https://geoplatform.gov/metadata/9d4a3ae3-8637-4707-92a7-b7d67b769a6b](https://geoplatform.gov/metadata/9d4a3ae3-8637-4707-92a7-b7d67b769a6b) for more information. + +## Loading the data + +```{r setup} +library(climateapi) +library(tidyverse) +library(sf) +library(urbnthemes) + +set_urbn_defaults(style = "print") +``` + +The function requires a `boundaries` argument specifying the geographic area of interest. This must be a spatial polygon object with a defined coordinate reference system. + +```{r load-data, eval = FALSE} +# Example: Get structures in Washington, DC +dc_boundary <- tigris::states(cb = TRUE) |> + filter(STUSPS == "DC") + +dc_structures <- get_structures( + boundaries = dc_boundary, + geography = "tract") +``` + +```{r load-data-hidden, echo = FALSE} +dc_boundary <- tigris::states(cb = TRUE) |> + filter(STUSPS == "DC") + +dc_structures <- get_structures( + boundaries = dc_boundary, + geography = "tract") +``` + +## Function parameters + +- `boundaries`: A POLYGON or MULTIPOLYGON sf object, or an `sf::st_bbox()`-style bounding box. Must have a defined CRS. +- `geography`: The desired output geography. One of `"tract"` or `"county"`. +- `keep_structures`: If `TRUE`, returns both the summarized counts and the raw point-level structure data. + +## Data structure + +Each row represents a unique combination of geographic unit (tract or county) and structure type. + +```{r structure} +glimpse(dc_structures) +``` + +Key variables include: + +- `GEOID`: Census tract (11-digit) or county (5-digit) FIPS code +- `primary_occupancy`: The primary use of the structure (e.g., "Residential", "Commercial") +- `occupancy_class`: Broader classification of occupancy type +- `count`: Number of structures of this type in the geographic unit + +### Occupancy types + +```{r occupancy-types} +dc_structures |> + distinct(occupancy_class, primary_occupancy) |> + arrange(occupancy_class, primary_occupancy) +``` + +## Example analyses + +### Structure composition by tract + +```{r tract-composition} +dc_summary <- dc_structures |> + summarize( + .by = GEOID, + total_structures = sum(count, na.rm = TRUE), + residential = sum(count[occupancy_class == "Residential"], na.rm = TRUE), + commercial = sum(count[occupancy_class == "Commercial"], na.rm = TRUE)) |> + mutate( + residential_share = residential / total_structures) + +dc_summary |> + ggplot(aes(x = total_structures, y = residential_share)) + + geom_point(alpha = 0.7) + + scale_y_continuous(labels = scales::percent) + + labs( + title = "Residential Share vs. Total Structures by Tract", + subtitle = "Washington, DC census tracts", + x = "Total structures", + y = "Share residential") +``` + +### Mapping structure density + +```{r map-structures, fig.height = 8} +dc_tracts <- tigris::tracts(state = "DC", cb = TRUE, year = 2023, progress_bar = FALSE) |> + st_transform(5070) + +dc_tract_totals <- dc_structures |> + summarize( + .by = GEOID, + total_structures = sum(count, na.rm = TRUE)) + +dc_map <- dc_tracts |> + left_join(dc_tract_totals, by = "GEOID") + +ggplot(dc_map) + + geom_sf(aes(fill = total_structures), color = "white", linewidth = 0.2) + + scale_fill_gradientn( + colors = c(palette_urbn_cyan[1], palette_urbn_cyan[4], palette_urbn_cyan[7]), + labels = scales::comma) + + labs( + title = "Structure Count by Census Tract", + subtitle = "Washington, DC", + fill = "Structures") + + theme_urbn_map() +``` + +### Analyzing structures in a disaster area + +A common use case is estimating structures within a hazard-affected area. This example shows how to combine structure data with wildfire burn zones. + +```{r disaster-example, eval = FALSE} +# Get a specific wildfire's burn zone +burn_zones <- get_wildfire_burn_zones() + +camp_fire <- burn_zones |> + filter(str_detect(wildfire_name, "CAMP")) + +# Get structures within the burn zone +camp_fire_structures <- get_structures( + + boundaries = camp_fire, + geography = "tract", + keep_structures = TRUE) + +# Summarized counts +camp_fire_structures$structures_summarized |> + summarize( + .by = occupancy_class, + total = sum(count, na.rm = TRUE)) |> + arrange(desc(total)) +``` + +### Working with raw structure data + +Setting `keep_structures = TRUE` returns both the summarized data and the raw point-level structure data. + +```{r raw-structures, eval = FALSE} +dc_full <- get_structures( + boundaries = dc_boundary, + geography = "county", + keep_structures = TRUE) + +# Access the raw point data +raw_structures <- dc_full$structures_raw + +# Access the summarized data +summary_structures <- dc_full$structures_summarized + +# Map individual structures +ggplot() + + geom_sf(data = dc_boundary, fill = "grey95") + + geom_sf( + data = raw_structures |> filter(primary_occupancy == "Commercial"), + size = 0.1, + alpha = 0.5) + + labs(title = "Commercial Structures in Washington, DC") + + theme_urbn_map() +``` + +### County-level analysis + +For larger areas, county-level aggregation provides a useful summary. + +```{r county-example, eval = FALSE} +# Get structures for multiple states +southeast_boundary <- tigris::states(cb = TRUE) |> + filter(STUSPS %in% c("FL", "GA", "AL", "SC")) + +southeast_structures <- get_structures( + boundaries = southeast_boundary, + geography = "county") + +# Summarize by county +county_totals <- southeast_structures |> + summarize( + .by = GEOID, + total_structures = sum(count, na.rm = TRUE)) +``` + +## Performance considerations + +The raw building footprint data files are large. Processing can be slow, especially for multi-state analyses. Consider: + +- Starting with a small geographic area to test your workflow +- Using county-level aggregation when tract-level detail is not required +- Caching results for repeated analyses + +## See also + +- `get_wildfire_burn_zones()`: Wildfire burn zone disasters for use as boundaries +- `get_current_fire_perimeters()`: Active wildfire perimeters for use as boundaries +- `get_fema_disaster_declarations()`: FEMA disaster declarations diff --git a/vignettes/get_wildfire_burn_zones.Rmd b/vignettes/get_wildfire_burn_zones.Rmd new file mode 100644 index 0000000..0d5bdd4 --- /dev/null +++ b/vignettes/get_wildfire_burn_zones.Rmd @@ -0,0 +1,215 @@ +--- +title: "Wildfire Burn Zones" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{Wildfire Burn Zones} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} +--- + +```{r, include = FALSE} +knitr::opts_chunk$set( + collapse = TRUE, + comment = "#>", + warning = FALSE, + message = FALSE, + fig.width = 8, + fig.height = 6, + dpi = 150 +) +``` + +## Overview + +The `get_wildfire_burn_zones()` function provides access to a harmonized dataset of wildfire burn zone disasters in the United States from 2000-2025. + +## Data sources and methodology + +This dataset combines six authoritative wildfire data sources to identify wildfires that burned near communities and resulted in civilian fatalities, destroyed structures, or received federal disaster relief. These sources are: + +- **FIRED (Fire Event Delineation)**: Satellite-derived fire perimeters +- **MTBS (Monitoring Trends in Burn Severity)**: Burn severity and perimeters for large fires +- **NIFC (National Interagency Fire Center)**: Official fire incident data +- **ICS-209 (Incident Status Summary)**: Incident management records +- **RedBook**: Historical wildfire statistics +- **FEMA**: Federal disaster declarations + +Wildfires are classified as "disasters" if they: + +1. Burned near a community AND +2. Resulted in at least one of: + - Civilian fatality + - Destroyed structure + - Federal disaster relief + +These data are described and provided in association with the journal article: Wilner, L.B., Piepmeier, L., Gordon, M. et al. Two and a half decades of United States wildfire burn zone disaster data, 2000-2025. Sci Data 12, 1948 (2025). https://doi.org/10.1038/s41597-025-06226-8. + +## Loading the data + +```{r setup} +library(climateapi) +library(tidyverse) +library(sf) +library(urbnthemes) + +set_urbn_defaults(style = "print") +``` + +```{r load-data, eval = FALSE} +burn_zones <- get_wildfire_burn_zones() +``` + +```{r load-data-hidden, echo = FALSE} +# For vignette building, we'll create example output structure +# In practice, users would run the above code +burn_zones <- get_wildfire_burn_zones() +``` + +## Data structure + +Each row in the dataset represents a single wildfire burn zone disaster. + +```{r structure} +glimpse(burn_zones) +``` + +Key variables include: + +- `wildfire_id`: Unique identifier for each wildfire event +- `year`: Year the wildfire occurred (2000-2025) +- `wildfire_name`: Name of the wildfire or fire complex +- `county_fips`: Pipe-delimited string of county FIPS codes for all affected counties +- `county_name`: Pipe-delimited string of county names for all affected counties +- `area_sq_km`: Burned area in square kilometers +- `fatalities_total` and `injuries_total`: Human impacts +- `structures_destroyed` and `structures_threatened`: Built environment impacts +- `geometry`: Burn zone polygon boundaries + +## Example analyses + +### Annual trends in wildfire disasters + +```{r annual-trends} +# Extract state FIPS from the first county in the pipe-delimited list +df1 = burn_zones |> + st_drop_geometry() |> + mutate(state_fips = str_sub(county_fips, 1, 2)) |> + summarize( + .by = c(year, state_fips), + n_wildfires = n(), + total_area_sq_km = sum(area_sq_km, na.rm = TRUE), + total_structures_destroyed = sum(structures_destroyed, na.rm = TRUE)) |> + left_join( + tigris::fips_codes %>% distinct(state, state_code), + by = c("state_fips" = "state_code")) + +top_five_states = df1 %>% + arrange(desc(n_wildfires)) %>% + distinct(state) %>% + slice(1:5) + +df1 %>% + filter(state %in% top_five_states$state) %>% + mutate(state = factor(state, levels = top_five_states %>% pull(state), ordered = TRUE)) %>% + ggplot(aes(x = year, y = n_wildfires)) + + geom_col() + + labs( + title = "Many states frequently experience more than 50 wildfires per year", + subtitle = "Disasters defined as wildfires causing fatalities, structure loss, or federal relief", + x = "", + y = "Number of wildfires") + + facet_wrap(~state) +``` + +### Geographic distribution of impacts + +```{r state-impacts} +state_impacts <- burn_zones |> + st_drop_geometry() |> + mutate(state_fips = str_sub(county_fips, 1, 2)) |> + summarize( + .by = state_fips, + n_wildfires = n_distinct(wildfire_id), + total_structures_destroyed = sum(structures_destroyed, na.rm = TRUE), + total_fatalities = sum(fatalities_total, na.rm = TRUE) + ) |> + left_join( + tidycensus::fips_codes |> + distinct(state_code, state_name), + by = c("state_fips" = "state_code") + ) |> + filter(!is.na(state_name)) + +state_impacts |> + slice_max(n_wildfires, n = 10) |> + mutate(state_name = fct_reorder(state_name, n_wildfires)) |> + ggplot(aes(y = state_name, x = n_wildfires)) + + geom_col() + + labs( + title = "States with Most Wildfire Burn Zone Disasters (2000-2025)", + x = "Number of distinct wildfires", + y = "" + ) +``` + +### Mapping wildfire burn zones + +```{r map-example, fig.height = 8} +# Get wildfires from a recent year in California +ca_2020_fires <- burn_zones |> + filter(str_detect(county_fips, "^06"), year == 2020) + +# Get California counties for context +ca_counties <- tigris::counties(state = "CA", cb = TRUE, year = 2022, progress_bar = FALSE) |> + st_transform(5070) + +ggplot() + + geom_sf(data = ca_counties, fill = "grey95", color = "grey70") + + geom_sf(data = ca_2020_fires, aes(fill = structures_destroyed), alpha = 0.8) + + scale_fill_continuous(trans = "reverse") + + labs( + title = "California Wildfire Burn Zone Disasters (2020)", + subtitle = "Burn zone perimeters colored by structures destroyed", + fill = "Structures\ndestroyed") + + theme_urbn_map() +``` + +### Analyzing structure loss severity + +```{r severity-analysis} +burn_zones |> + st_drop_geometry() |> + distinct(wildfire_id, year, wildfire_name, structures_destroyed) |> + filter(!is.na(structures_destroyed), structures_destroyed > 0) |> + mutate( + severity = case_when( + structures_destroyed >= 1000 ~ "1,000+ structures", + structures_destroyed >= 100 ~ "100-999", + structures_destroyed >= 10 ~ "10-99", + TRUE ~ "1-9 structures"), + severity = factor( + severity, + levels = c("1-9 structures", "10-99", "100-999", "1,000+ structures"))) |> + count(year, severity) |> + mutate( + .by = year, + percent = n / sum(n, na.rm = TRUE)) |> + ggplot(aes(x = year, y = percent, fill = severity)) + + geom_col() + + scale_fill_manual(values = c( + "1-9 structures" = palette_urbn_cyan[1], + "10-99" = palette_urbn_cyan[3], + "100-999" = palette_urbn_cyan[5], + "1,000+ structures" = palette_urbn_cyan[7])) + + scale_y_continuous(labels = scales::percent) + + labs( + title = "Most wildifres destroy under 10 structures", + x = "", + y = "") +``` + +## See also + +- `get_current_fire_perimeters()`: Access current/active wildfire perimeters +- `get_fema_disaster_declarations()`: FEMA disaster declarations including fire-related declarations +- `get_structures()`: Estimate structures within geographic boundaries