From f9aa7f35bebf2ff847f04b3ed9343f44daf25e83 Mon Sep 17 00:00:00 2001 From: Will Curran-Groome Date: Fri, 16 Jan 2026 14:55:13 -0500 Subject: [PATCH 1/3] beginning to support inter-decadal changes; adding beads --- .Rbuildignore | 18 +- .Rprofile | 2 +- .beads/.gitignore | 44 + .beads/README.md | 81 + .beads/config.yaml | 62 + .beads/interactions.jsonl | 0 .beads/issues.jsonl | 0 .beads/metadata.json | 4 + .gitattributes | 3 + .github/.gitignore | 2 +- .github/workflows/pkgdown.yaml | 98 +- .gitignore | 172 +- AGENTS.md | 40 + DESCRIPTION | 67 +- LICENSE.md | 42 +- R/get_crosswalk.R | 408 ++- R/get_ctdata_crosswalk.R | 255 ++ R/get_geocorr_crosswalk.R | 666 ++-- R/get_nhgis_crosswalk.R | 926 ++--- README.md | 198 +- _pkgdown.yml | 8 +- crosswalk.Rproj | 42 +- man/get_crosswalk.Rd | 74 +- renv.lock | 3759 ++++---------------- renv/.gitignore | 14 +- renv/activate.R | 2668 +++++++------- renv/settings.json | 38 +- tests/testthat.R | 4 + tests/testthat/test-noncensus-crosswalks.R | 348 ++ 29 files changed, 4457 insertions(+), 5586 deletions(-) create mode 100644 .beads/.gitignore create mode 100644 .beads/README.md create mode 100644 .beads/config.yaml create mode 100644 .beads/interactions.jsonl create mode 100644 .beads/issues.jsonl create mode 100644 .beads/metadata.json create mode 100644 .gitattributes create mode 100644 AGENTS.md create mode 100644 R/get_ctdata_crosswalk.R create mode 100644 tests/testthat.R create mode 100644 tests/testthat/test-noncensus-crosswalks.R diff --git a/.Rbuildignore b/.Rbuildignore index 1114329..e283198 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -1,9 +1,9 @@ -^renv$ -^renv\.lock$ -^.*\.Rproj$ -^\.Rproj\.user$ -^LICENSE\.md$ -^_pkgdown\.yml$ -^docs$ -^pkgdown$ -^\.github$ +^renv$ +^renv\.lock$ +^.*\.Rproj$ +^\.Rproj\.user$ +^LICENSE\.md$ +^_pkgdown\.yml$ +^docs$ +^pkgdown$ +^\.github$ diff --git a/.Rprofile b/.Rprofile index 81b960f..c044f24 100644 --- a/.Rprofile +++ b/.Rprofile @@ -1 +1 @@ -source("renv/activate.R") +source("renv/activate.R") diff --git a/.beads/.gitignore b/.beads/.gitignore new file mode 100644 index 0000000..d27a1db --- /dev/null +++ b/.beads/.gitignore @@ -0,0 +1,44 @@ +# SQLite databases +*.db +*.db?* +*.db-journal +*.db-wal +*.db-shm + +# Daemon runtime files +daemon.lock +daemon.log +daemon.pid +bd.sock +sync-state.json +last-touched + +# Local version tracking (prevents upgrade notification spam after git ops) +.local_version + +# Legacy database files +db.sqlite +bd.db + +# Worktree redirect file (contains relative path to main repo's .beads/) +# Must not be committed as paths would be wrong in other clones +redirect + +# Merge artifacts (temporary files from 3-way merge) +beads.base.jsonl +beads.base.meta.json +beads.left.jsonl +beads.left.meta.json +beads.right.jsonl +beads.right.meta.json + +# Sync state (local-only, per-machine) +# These files are machine-specific and should not be shared across clones +.sync.lock +sync_base.jsonl + +# NOTE: Do NOT add negation patterns (e.g., !issues.jsonl) here. +# They would override fork protection in .git/info/exclude, allowing +# contributors to accidentally commit upstream issue databases. +# The JSONL files (issues.jsonl, interactions.jsonl) and config files +# are tracked by git by default since no pattern above ignores them. diff --git a/.beads/README.md b/.beads/README.md new file mode 100644 index 0000000..50f281f --- /dev/null +++ b/.beads/README.md @@ -0,0 +1,81 @@ +# Beads - AI-Native Issue Tracking + +Welcome to Beads! This repository uses **Beads** for issue tracking - a modern, AI-native tool designed to live directly in your codebase alongside your code. + +## What is Beads? + +Beads is issue tracking that lives in your repo, making it perfect for AI coding agents and developers who want their issues close to their code. No web UI required - everything works through the CLI and integrates seamlessly with git. + +**Learn more:** [github.com/steveyegge/beads](https://github.com/steveyegge/beads) + +## Quick Start + +### Essential Commands + +```bash +# Create new issues +bd create "Add user authentication" + +# View all issues +bd list + +# View issue details +bd show + +# Update issue status +bd update --status in_progress +bd update --status done + +# Sync with git remote +bd sync +``` + +### Working with Issues + +Issues in Beads are: +- **Git-native**: Stored in `.beads/issues.jsonl` and synced like code +- **AI-friendly**: CLI-first design works perfectly with AI coding agents +- **Branch-aware**: Issues can follow your branch workflow +- **Always in sync**: Auto-syncs with your commits + +## Why Beads? + +✨ **AI-Native Design** +- Built specifically for AI-assisted development workflows +- CLI-first interface works seamlessly with AI coding agents +- No context switching to web UIs + +🚀 **Developer Focused** +- Issues live in your repo, right next to your code +- Works offline, syncs when you push +- Fast, lightweight, and stays out of your way + +🔧 **Git Integration** +- Automatic sync with git commits +- Branch-aware issue tracking +- Intelligent JSONL merge resolution + +## Get Started with Beads + +Try Beads in your own projects: + +```bash +# Install Beads +curl -sSL https://raw.githubusercontent.com/steveyegge/beads/main/scripts/install.sh | bash + +# Initialize in your repo +bd init + +# Create your first issue +bd create "Try out Beads" +``` + +## Learn More + +- **Documentation**: [github.com/steveyegge/beads/docs](https://github.com/steveyegge/beads/tree/main/docs) +- **Quick Start Guide**: Run `bd quickstart` +- **Examples**: [github.com/steveyegge/beads/examples](https://github.com/steveyegge/beads/tree/main/examples) + +--- + +*Beads: Issue tracking that moves at the speed of thought* ⚡ diff --git a/.beads/config.yaml b/.beads/config.yaml new file mode 100644 index 0000000..f242785 --- /dev/null +++ b/.beads/config.yaml @@ -0,0 +1,62 @@ +# Beads Configuration File +# This file configures default behavior for all bd commands in this repository +# All settings can also be set via environment variables (BD_* prefix) +# or overridden with command-line flags + +# Issue prefix for this repository (used by bd init) +# If not set, bd init will auto-detect from directory name +# Example: issue-prefix: "myproject" creates issues like "myproject-1", "myproject-2", etc. +# issue-prefix: "" + +# Use no-db mode: load from JSONL, no SQLite, write back after each command +# When true, bd will use .beads/issues.jsonl as the source of truth +# instead of SQLite database +# no-db: false + +# Disable daemon for RPC communication (forces direct database access) +# no-daemon: false + +# Disable auto-flush of database to JSONL after mutations +# no-auto-flush: false + +# Disable auto-import from JSONL when it's newer than database +# no-auto-import: false + +# Enable JSON output by default +# json: false + +# Default actor for audit trails (overridden by BD_ACTOR or --actor) +# actor: "" + +# Path to database (overridden by BEADS_DB or --db) +# db: "" + +# Auto-start daemon if not running (can also use BEADS_AUTO_START_DAEMON) +# auto-start-daemon: true + +# Debounce interval for auto-flush (can also use BEADS_FLUSH_DEBOUNCE) +# flush-debounce: "5s" + +# Git branch for beads commits (bd sync will commit to this branch) +# IMPORTANT: Set this for team projects so all clones use the same sync branch. +# This setting persists across clones (unlike database config which is gitignored). +# Can also use BEADS_SYNC_BRANCH env var for local override. +# If not set, bd sync will require you to run 'bd config set sync.branch '. +# sync-branch: "beads-sync" + +# Multi-repo configuration (experimental - bd-307) +# Allows hydrating from multiple repositories and routing writes to the correct JSONL +# repos: +# primary: "." # Primary repo (where this database lives) +# additional: # Additional repos to hydrate from (read-only) +# - ~/beads-planning # Personal planning repo +# - ~/work-planning # Work planning repo + +# Integration settings (access with 'bd config get/set') +# These are stored in the database, not in this file: +# - jira.url +# - jira.project +# - linear.url +# - linear.api-key +# - github.org +# - github.repo diff --git a/.beads/interactions.jsonl b/.beads/interactions.jsonl new file mode 100644 index 0000000..e69de29 diff --git a/.beads/issues.jsonl b/.beads/issues.jsonl new file mode 100644 index 0000000..e69de29 diff --git a/.beads/metadata.json b/.beads/metadata.json new file mode 100644 index 0000000..c787975 --- /dev/null +++ b/.beads/metadata.json @@ -0,0 +1,4 @@ +{ + "database": "beads.db", + "jsonl_export": "issues.jsonl" +} \ No newline at end of file diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..807d598 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,3 @@ + +# Use bd merge for beads JSONL files +.beads/issues.jsonl merge=beads diff --git a/.github/.gitignore b/.github/.gitignore index 2d19fc7..d3fc626 100644 --- a/.github/.gitignore +++ b/.github/.gitignore @@ -1 +1 @@ -*.html +*.html diff --git a/.github/workflows/pkgdown.yaml b/.github/workflows/pkgdown.yaml index bfc9f4d..057cae3 100644 --- a/.github/workflows/pkgdown.yaml +++ b/.github/workflows/pkgdown.yaml @@ -1,49 +1,49 @@ -# Workflow derived from https://github.com/r-lib/actions/tree/v2/examples -# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help -on: - push: - branches: [main, master] - pull_request: - release: - types: [published] - workflow_dispatch: - -name: pkgdown.yaml - -permissions: read-all - -jobs: - pkgdown: - runs-on: ubuntu-latest - # Only restrict concurrency for non-PR jobs - concurrency: - group: pkgdown-${{ github.event_name != 'pull_request' || github.run_id }} - env: - GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} - permissions: - contents: write - steps: - - uses: actions/checkout@v4 - - - uses: r-lib/actions/setup-pandoc@v2 - - - uses: r-lib/actions/setup-r@v2 - with: - use-public-rspm: true - - - uses: r-lib/actions/setup-r-dependencies@v2 - with: - extra-packages: any::pkgdown, local::. - needs: website - - - name: Build site - run: pkgdown::build_site_github_pages(new_process = FALSE, install = FALSE) - shell: Rscript {0} - - - name: Deploy to GitHub pages 🚀 - if: github.event_name != 'pull_request' - uses: JamesIves/github-pages-deploy-action@v4.5.0 - with: - clean: false - branch: gh-pages - folder: docs +# Workflow derived from https://github.com/r-lib/actions/tree/v2/examples +# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help +on: + push: + branches: [main, master] + pull_request: + release: + types: [published] + workflow_dispatch: + +name: pkgdown.yaml + +permissions: read-all + +jobs: + pkgdown: + runs-on: ubuntu-latest + # Only restrict concurrency for non-PR jobs + concurrency: + group: pkgdown-${{ github.event_name != 'pull_request' || github.run_id }} + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + permissions: + contents: write + steps: + - uses: actions/checkout@v4 + + - uses: r-lib/actions/setup-pandoc@v2 + + - uses: r-lib/actions/setup-r@v2 + with: + use-public-rspm: true + + - uses: r-lib/actions/setup-r-dependencies@v2 + with: + extra-packages: any::pkgdown, local::. + needs: website + + - name: Build site + run: pkgdown::build_site_github_pages(new_process = FALSE, install = FALSE) + shell: Rscript {0} + + - name: Deploy to GitHub pages 🚀 + if: github.event_name != 'pull_request' + uses: JamesIves/github-pages-deploy-action@v4.5.0 + with: + clean: false + branch: gh-pages + folder: docs diff --git a/.gitignore b/.gitignore index a490d86..6ea6533 100644 --- a/.gitignore +++ b/.gitignore @@ -1,86 +1,86 @@ -# History files -.Rhistory -.Rapp.history - -# Data files -.RData -.csv -.xlsx -.gpkg -.shp - -# User-specific files -.Ruserdata - -# Example code in package build process -*-Ex.R - -# Output files from R CMD build -/*.tar.gz - -# Output files from R CMD check -/*.Rcheck/ - -# RStudio files -.Rproj.user/ - -# produced vignettes -vignettes/*.html -vignettes/*.pdf - -# OAuth2 token, see https://github.com/hadley/httr/releases/tag/v0.3 -.httr-oauth - -# knitr and R markdown default cache directories -*_cache/ -/cache/ - -# Temporary files created by R markdown -*.utf8.md -*.knit.md - -# R Environment Variables -.Renviron - -# pkgdown site -docs/ - -# translation temp files -po/*~ - -# RStudio Connect folder -rsconnect/ - -# Shiny token, see https://shiny.rstudio.com/articles/shinyapps.html -.shinyapps.io/ - -# MacOS -.DS_Store - -# Windows -Thumbs.db -ehthumbs.db -Desktop.ini - -# Linux -*~ - -# IDE -.vscode/ -.idea/ - -# Logs -*.log - -# API keys and sensitive data -.env -config.yml -.Rproj.user -.Rdata -.quarto - -# Cached crosswalks -/crosswalks-cache -docs -CLAUDE.md -settings.local.json +# History files +.Rhistory +.Rapp.history + +# Data files +.RData +.csv +.xlsx +.gpkg +.shp + +# User-specific files +.Ruserdata + +# Example code in package build process +*-Ex.R + +# Output files from R CMD build +/*.tar.gz + +# Output files from R CMD check +/*.Rcheck/ + +# RStudio files +.Rproj.user/ + +# produced vignettes +vignettes/*.html +vignettes/*.pdf + +# OAuth2 token, see https://github.com/hadley/httr/releases/tag/v0.3 +.httr-oauth + +# knitr and R markdown default cache directories +*_cache/ +/cache/ + +# Temporary files created by R markdown +*.utf8.md +*.knit.md + +# R Environment Variables +.Renviron + +# pkgdown site +docs/ + +# translation temp files +po/*~ + +# RStudio Connect folder +rsconnect/ + +# Shiny token, see https://shiny.rstudio.com/articles/shinyapps.html +.shinyapps.io/ + +# MacOS +.DS_Store + +# Windows +Thumbs.db +ehthumbs.db +Desktop.ini + +# Linux +*~ + +# IDE +.vscode/ +.idea/ + +# Logs +*.log + +# API keys and sensitive data +.env +config.yml +.Rproj.user +.Rdata +.quarto + +# Cached crosswalks +/crosswalks-cache +docs +CLAUDE.md +settings.local.json diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 0000000..df7a4af --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,40 @@ +# Agent Instructions + +This project uses **bd** (beads) for issue tracking. Run `bd onboard` to get started. + +## Quick Reference + +```bash +bd ready # Find available work +bd show # View issue details +bd update --status in_progress # Claim work +bd close # Complete work +bd sync # Sync with git +``` + +## Landing the Plane (Session Completion) + +**When ending a work session**, you MUST complete ALL steps below. Work is NOT complete until `git push` succeeds. + +**MANDATORY WORKFLOW:** + +1. **File issues for remaining work** - Create issues for anything that needs follow-up +2. **Run quality gates** (if code changed) - Tests, linters, builds +3. **Update issue status** - Close finished work, update in-progress items +4. **PUSH TO REMOTE** - This is MANDATORY: + ```bash + git pull --rebase + bd sync + git push + git status # MUST show "up to date with origin" + ``` +5. **Clean up** - Clear stashes, prune remote branches +6. **Verify** - All changes committed AND pushed +7. **Hand off** - Provide context for next session + +**CRITICAL RULES:** +- Work is NOT complete until `git push` succeeds +- NEVER stop before pushing - that leaves work stranded locally +- NEVER say "ready to push when you are" - YOU must push +- If push fails, resolve and retry until it succeeds + diff --git a/DESCRIPTION b/DESCRIPTION index 952f3dc..a68f35b 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,33 +1,34 @@ -Package: crosswalk -Type: Package -Title: Simple interface to inter-temporal and inter-geography crosswalks -Version: 0.0.0.9001 -Description: An R package providing a simple interface to access geographic crosswalks. -License: MIT + file LICENSE.md -Authors@R: - person(given = "Will", family = "Curran-Groome", email = "wcurrangroome@urban.org", role = c("aut", "cre")) -Encoding: UTF-8 -LazyData: true -Roxygen: list(markdown = TRUE) -RoxygenNote: 7.3.2 -Depends: - R (>= 4.1.0) -Imports: - dplyr, - httr, - httr2, - janitor, - purrr, - readr, - rvest, - stringr, - tibble, - tidyr, - utils -Suggests: - testthat (>= 3.0.0), - knitr, - rmarkdown -Config/testthat/edition: 3 -VignetteBuilder: knitr -URL: https://ui-research.github.io/crosswalk/ +Package: crosswalk +Type: Package +Title: Simple interface to inter-temporal and inter-geography crosswalks +Version: 0.0.0.9001 +Description: An R package providing a simple interface to access geographic crosswalks. +License: MIT + file LICENSE.md +Authors@R: + person(given = "Will", family = "Curran-Groome", email = "wcurrangroome@urban.org", role = c("aut", "cre")) +Encoding: UTF-8 +LazyData: true +Roxygen: list(markdown = TRUE) +RoxygenNote: 7.3.2 +Depends: + R (>= 4.1.0) +Imports: + dplyr, + httr, + httr2, + janitor, + purrr, + readr, + rvest, + stringr, + tibble, + tidyr, + utils +Suggests: + testthat (>= 3.0.0), + tidycensus, + knitr, + rmarkdown +Config/testthat/edition: 3 +VignetteBuilder: knitr +URL: https://ui-research.github.io/crosswalk/ diff --git a/LICENSE.md b/LICENSE.md index 4b17df0..27caa79 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,21 +1,21 @@ -# MIT License - -Copyright (c) 2025 crosswalk authors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +# MIT License + +Copyright (c) 2025 crosswalk authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/R/get_crosswalk.R b/R/get_crosswalk.R index 5e87c9c..5825b64 100644 --- a/R/get_crosswalk.R +++ b/R/get_crosswalk.R @@ -1,142 +1,266 @@ -#' Get an inter-temporal or inter-geography crosswalk -#' -#' Retrieves a crosswalk with interpolation values from a source geography to a target -#' geography or from a source year to a target year. -#' -#' @details This function sources crosswalks from Geocorr 2022 and IPUMS NHGIS. -#' Crosswalk weights are from the original sources and have not been modified; -#' this function merely standardizes the format of the returned crosswalks and -#' enables easy programmatic access and cacheing. -#' -#' Note that an IPUMS NHGIS API key is required to access crosswalks from that -#' source. Use `usethis::edit_r_environ(scope = "user")` to save your API key -#' to your .Renviron; the name of the key should be "IPUMS_API_KEY". You can -#' obtain a key from: https://account.ipums.org/api_keys. -#' -#' @param source_year Character or numeric. Year of the source geography one of -#' c(1990, 2000, 2010, 2020). -#' @param source_geography Character. Source geography name. One of c("block", -#' "block group", "tract", "place", county", "urban_area", "zcta", "puma", "cd118", -#' "cd119", "urban_area", "core_based_statistical_area"). -#' @param target_year Character or numeric. Year of the target geography, one of -#' c(1990, 2000, 2010, 2020). -#' @param target_geography Character. Target geography name. One of c("block", -#' "block group", "tract", "place", county", "urban_area", "zcta", "puma", "cd118", -#' "cd119", "urban_area", "core_based_statistical_area"). -#' @param weight Character. Weighting variable. One of c("population", "housing", "land"). -#' @param cache Directory path. Where to download the crosswalk to. If NULL (default), -#' crosswalk is returned but not saved to disk. -#' -#' @return A data frame containing the crosswalk between the specified geographies. -#' Data are tidy-formatted, with each observation reflecting a unique -#' source-target-weighting factor combination. Note that all (typically two -#' or three) available weighting factors are returned. -#' -#' @return A dataframe representing the requested crosswalk for all 51 states -#' and Puerto Rico. Depending on the desired geographies and the source of the -#' crosswalk (Geocorr vs. NHGIS), some fields may not be included. -#' \describe{ -#' \item{source_geoid}{A unique identifier for the source geography} -#' \item{target_geoid}{A unique identifier for the target geography} -#' \item{source_geography_name}{The name of the source geography} -#' \item{target_geography_name}{The name of the target geography} -#' \item{source_year}{The year of the source geography} -#' \item{target_year}{The year of the target geography} -#' \item{allocation_factor_source_to_target}{The weight to interpolate values -#' from the source geography to the target geography} -#' \item{allocation_factor_target_to_source}{The weight to interpolate values -#' from the target geography to the source geography} -#' \item{population_2020}{The estimated overlap in population, if applicable} -#' \item{housing_2020}{The estimated overlap in housing units, if applicable} -#' \item{land_area_sqmi}{The overlap in land area, if applicable} -#' \item{weighting_factor}{The attribute used to calculate allocation factors} -#' } -#' -#' @export -#' @examples -#' \dontrun{ -#' get_crosswalk( -# source_geography = "zcta", -# target_geography = "puma22", -# weight = c("population"), -# cache = here::here("crosswalks-cache")) -#' } - -get_crosswalk = function( - source_geography, - target_geography, - source_year = NULL, - target_year = NULL, - cache = NULL, - weight = NULL) { - - # if ( (source_year == target_year | (is.null(source_year) | is.null(target_year))) ) { - if ( - source_geography == "block" & target_geography %in% c("block group", "tract", "county", "core_based_statistical_area") | - source_geography == "block group" & target_geography %in% c("tract", "county", "core_based_statistical_area") | - source_geography == "tract" & target_geography %in% c("county", "core_based_statistical_area") | - source_geography == "county" & target_geography == "core_based_statistical_area" - ) { - warning( -"The source geography is nested within the target geography and an empty result -will be returned. No crosswalk is needed to translate data between nested geographies; -simply aggregate your data to the desired geography.") - - return(tibble::tibble()) - } - # } - - if (is.null(source_year) | is.null(target_year)) { - crosswalk_source = "geocorr" - } else { crosswalk_source = "nhgis" } - - if (crosswalk_source == "nhgis") { - result = get_nhgis_crosswalk( - source_year = source_year, - source_geography = source_geography, - target_year = target_year, - target_geography = target_geography, - cache = cache) - } else { - result = get_geocorr_crosswalk( - source_geography = source_geography, - target_geography = target_geography, - weight = weight, - cache = cache) - } - - return(result) -} - -# ## write out geocorr crosswalks -# core_sources_geocorr = c( -# #"place", "county", -# "tract", -# #"blockgroup", -# "zcta", -# "puma22"#, -# #"cd119", "cd118" -# ) - -# library(climateapi) -## create an intersection of all geography combinations -# expand.grid(core_sources_geocorr, core_sources_geocorr) |> -# dplyr::rename(source_geography = 1, target_geography = 2) |> -# ## drop where the source and target geographies are the same -# dplyr::filter(source_geography != target_geography) |> -# dplyr::mutate( -# weight = "housing", -# cache = file.path("C:", "Users", climateapi::get_system_username(), "Box", "Arnold LIHTC study", "Data", "Shapefiles and crosswalks", "crosswalk_acs_decennial_chas"), -# dplyr::across(dplyr::where(is.factor), as.character)) |> -# purrr::pwalk(get_crosswalk) - -# tibble::tibble( -# source_geography = "tract", -# target_geography = "tract", -# source_year = c(1990, 2000, 2010), -# target_year = c(2010, 2010, 2020)) |> -# dplyr::mutate( -# weight = "housing", -# cache = file.path("C:", "Users", climateapi::get_system_username(), "Box", "Arnold LIHTC study", "Data", "Shapefiles and crosswalks", "crosswalk_acs_decennial_chas"), -# dplyr::across(dplyr::where(is.factor), as.character)) |> -# purrr::pwalk(get_crosswalk) +#' Get an inter-temporal or inter-geography crosswalk +#' +#' Retrieves a crosswalk with interpolation values from a source geography to a target +#' geography or from a source year to a target year. +#' +#' @details This function sources crosswalks from Geocorr 2022, IPUMS NHGIS, and +#' CT Data Collaborative. Crosswalk weights are from the original sources and +#' have not been modified; this function merely standardizes the format of the +#' returned crosswalks and enables easy programmatic access and caching. +#' +#' **Non-census year support**: For target years 2011, 2012, 2014, 2015, and 2022, +#' crosswalks are available only for block groups, tracts, and counties. These +#' years correspond to American Community Survey geography changes. +#' +#' **2020 to 2022 crosswalks**: The 2022 geographic changes only affected +#' Connecticut (county-equivalent planning regions replaced historical counties). +#' For this case, the function combines CT Data Collaborative crosswalks for +#' Connecticut with identity mappings for other states. +#' +#' Note that an IPUMS NHGIS API key is required to access crosswalks from that +#' source. Use `usethis::edit_r_environ(scope = "user")` to save your API key +#' to your .Renviron; the name of the key should be "IPUMS_API_KEY". You can +#' obtain a key from: https://account.ipums.org/api_keys. +#' +#' @param source_year Character or numeric. Year of the source geography, one of +#' c(1990, 2000, 2010, 2020). +#' @param source_geography Character. Source geography name. One of c("block", +#' "block group", "tract", "place", "county", "urban_area", "zcta", "puma", "cd118", +#' "cd119", "urban_area", "core_based_statistical_area"). +#' @param target_year Character or numeric. Year of the target geography, one of +#' c(1990, 2000, 2010, 2020) for decennial crosswalks, or c(2011, 2012, 2014, +#' 2015, 2022) for non-census year crosswalks (limited to block groups, tracts, +#' and counties). +#' @param target_geography Character. Target geography name. One of c("block", +#' "block group", "tract", "place", "county", "urban_area", "zcta", "puma", "cd118", +#' "cd119", "urban_area", "core_based_statistical_area"). +#' @param weight Character. Weighting variable for Geocorr crosswalks. One of +#' c("population", "housing", "land"). +#' @param cache Directory path. Where to download the crosswalk to. If NULL (default), +#' crosswalk is returned but not saved to disk. Individual component crosswalks +#' are cached separately when provided. +#' +#' @return A tibble containing the crosswalk between the specified geographies. +#' Data are tidy-formatted, with each observation reflecting a unique +#' source-target-weighting factor combination. +#' +#' The returned tibble includes an attribute `crosswalk_metadata` containing: +#' \describe{ +#' \item{source}{Character vector of data sources used (e.g., "nhgis", "ctdata")} +#' \item{source_year}{The source year} +#' \item{target_year}{The target year} +#' \item{source_geography}{The source geography} +#' \item{target_geography}{The target geography} +#' \item{notes}{Any relevant notes about the crosswalk construction} +#' } +#' +#' Columns in the returned dataframe (some may not be present depending on source): +#' \describe{ +#' \item{source_geoid}{A unique identifier for the source geography} +#' \item{target_geoid}{A unique identifier for the target geography} +#' \item{source_geography_name}{The name of the source geography} +#' \item{target_geography_name}{The name of the target geography} +#' \item{source_year}{The year of the source geography} +#' \item{target_year}{The year of the target geography} +#' \item{allocation_factor_source_to_target}{The weight to interpolate values +#' from the source geography to the target geography} +#' \item{allocation_factor_target_to_source}{The weight to interpolate values +#' from the target geography to the source geography} +#' \item{population_2020}{The estimated overlap in population, if applicable} +#' \item{housing_2020}{The estimated overlap in housing units, if applicable} +#' \item{land_area_sqmi}{The overlap in land area, if applicable} +#' \item{weighting_factor}{The attribute used to calculate allocation factors} +#' \item{state_fips}{Two-digit state FIPS code, if applicable} +#' } +#' +#' @export +#' @examples +#' \dontrun{ +#' # Same-year crosswalk between geographies (uses Geocorr) +#' get_crosswalk( +#' source_geography = "zcta", +#' target_geography = "puma22", +#' weight = "population", +#' cache = here::here("crosswalks-cache")) +#' +#' # Inter-temporal crosswalk (uses NHGIS) +#' get_crosswalk( +#' source_geography = "tract", +#' target_geography = "tract", +#' source_year = 2010, +#' target_year = 2020, +#' cache = here::here("crosswalks-cache")) +#' +#' # Non-census year crosswalk (2020 to 2022, CT changes) +#' get_crosswalk( +#' source_geography = "tract", +#' target_geography = "tract", +#' source_year = 2020, +#' target_year = 2022, +#' cache = here::here("crosswalks-cache")) +#' } + +get_crosswalk <- function( + source_geography, + target_geography, + source_year = NULL, + target_year = NULL, + cache = NULL, + weight = NULL) { + + if ( + source_geography == "block" & target_geography %in% c("block group", "tract", "county", "core_based_statistical_area") | + source_geography == "block group" & target_geography %in% c("tract", "county", "core_based_statistical_area") | + source_geography == "tract" & target_geography %in% c("county", "core_based_statistical_area") | + source_geography == "county" & target_geography == "core_based_statistical_area" + ) { + warning( +"The source geography is nested within the target geography and an empty result +will be returned. No crosswalk is needed to translate data between nested geographies; +simply aggregate your data to the desired geography.") + + return(tibble::tibble()) + } + + source_year_chr <- if (!is.null(source_year)) as.character(source_year) else NULL + target_year_chr <- if (!is.null(target_year)) as.character(target_year) else NULL + + if (is.null(source_year) | is.null(target_year)) { + crosswalk_source <- "geocorr" + } else if (source_year_chr == "2020" & target_year_chr == "2022") { + crosswalk_source <- "ctdata_2020_2022" + } else { + crosswalk_source <- "nhgis" + } + + metadata <- list( + source = character(), + source_year = source_year_chr, + target_year = target_year_chr, + source_geography = source_geography, + target_geography = target_geography, + notes = character()) + + if (crosswalk_source == "ctdata_2020_2022") { + result <- get_crosswalk_2020_2022( + geography = source_geography, + cache = cache) + metadata$source <- c("ctdata", "identity") + metadata$notes <- c( + "Connecticut: CTData Collaborative 2020-2022 crosswalk (identity mapping, FIPS code change only)", + "Other states: Identity mapping (no geographic changes between 2020 and 2022)") + + } else if (crosswalk_source == "nhgis") { + result <- get_nhgis_crosswalk( + source_year = source_year, + source_geography = source_geography, + target_year = target_year, + target_geography = target_geography, + cache = cache) + metadata$source <- "nhgis" + + } else { + result <- get_geocorr_crosswalk( + source_geography = source_geography, + target_geography = target_geography, + weight = weight, + cache = cache) + metadata$source <- "geocorr" + } + + attr(result, "crosswalk_metadata") <- metadata + + return(result) +} + + +#' Get 2020 to 2022 Crosswalk (Connecticut + Identity Mapping) +#' +#' Internal function that handles the special case of 2020 to 2022 crosswalks. +#' Connecticut changed from historical counties to planning regions in 2022, +#' while all other states had no geographic changes. +#' +#' @param geography Character. Geography type: one of "block", "block_group", +#' "tract", or "county". +#' @param cache Directory path for caching component crosswalks. +#' +#' @return A tibble containing the national crosswalk with Connecticut from CTData +#' and identity mappings for other states. +#' @noRd +get_crosswalk_2020_2022 <- function(geography, cache = NULL) { + + geography_standardized <- geography |> + stringr::str_to_lower() |> + stringr::str_squish() |> + stringr::str_replace_all("_", " ") + + geography_standardized <- dplyr::case_when( + geography_standardized %in% c("block", "blocks", "blk") ~ "block", + geography_standardized %in% c("block group", "blockgroup", "bg") ~ "block_group", + geography_standardized %in% c("tract", "tracts", "tr") ~ "tract", + geography_standardized %in% c("county", "counties", "co") ~ "county", + TRUE ~ NA_character_) + + if (is.na(geography_standardized)) { + stop( +"2020 to 2022 crosswalks are only available for blocks, block groups, tracts, +and counties. The provided geography '", geography, "' is not supported.")} + + message( +"Constructing 2020 to 2022 crosswalk: +- Connecticut: Using CT Data Collaborative crosswalk (FIPS code changes only, + boundaries unchanged). Historical counties were replaced by planning regions. +- Other states: No geographic changes occurred between 2020 and 2022. + Returning identity mapping (source_geoid = target_geoid) for non-CT states.") + + ct_crosswalk <- get_ctdata_crosswalk( + geography = geography_standardized, + cache = cache) + + message( + "Connecticut crosswalk loaded: ", nrow(ct_crosswalk), " ", + geography_standardized, " records.") + + attr(ct_crosswalk, "crosswalk_sources") <- list( + connecticut = "ctdata", + other_states = "identity_mapping") + attr(ct_crosswalk, "identity_states_note") <- +"For states other than Connecticut, no geographic changes occurred between 2020 +and 2022. When joining your data, non-CT records will match on identical GEOIDs. +This crosswalk only contains Connecticut records where FIPS codes changed." + + return(ct_crosswalk) +} + +# ## write out geocorr crosswalks +# core_sources_geocorr = c( +# #"place", "county", +# "tract", +# #"blockgroup", +# "zcta", +# "puma22"#, +# #"cd119", "cd118" +# ) + +# library(climateapi) +## create an intersection of all geography combinations +# expand.grid(core_sources_geocorr, core_sources_geocorr) |> +# dplyr::rename(source_geography = 1, target_geography = 2) |> +# ## drop where the source and target geographies are the same +# dplyr::filter(source_geography != target_geography) |> +# dplyr::mutate( +# weight = "housing", +# cache = file.path("C:", "Users", climateapi::get_system_username(), "Box", "Arnold LIHTC study", "Data", "Shapefiles and crosswalks", "crosswalk_acs_decennial_chas"), +# dplyr::across(dplyr::where(is.factor), as.character)) |> +# purrr::pwalk(get_crosswalk) + +# tibble::tibble( +# source_geography = "tract", +# target_geography = "tract", +# source_year = c(1990, 2000, 2010), +# target_year = c(2010, 2010, 2020)) |> +# dplyr::mutate( +# weight = "housing", +# cache = file.path("C:", "Users", climateapi::get_system_username(), "Box", "Arnold LIHTC study", "Data", "Shapefiles and crosswalks", "crosswalk_acs_decennial_chas"), +# dplyr::across(dplyr::where(is.factor), as.character)) |> +# purrr::pwalk(get_crosswalk) diff --git a/R/get_ctdata_crosswalk.R b/R/get_ctdata_crosswalk.R new file mode 100644 index 0000000..fa11e23 --- /dev/null +++ b/R/get_ctdata_crosswalk.R @@ -0,0 +1,255 @@ +#' Get Connecticut 2020-2022 Crosswalk from CTData +#' +#' Retrieves a crosswalk for Connecticut geographies between 2020 and 2022 from +#' the CT Data Collaborative. This handles the 2022 change when Connecticut +#' switched from eight historical counties to nine county-equivalent planning regions. +#' +#' @details This function sources crosswalks from the CT Data Collaborative GitHub +#' repository. The crosswalk provides a 1:1 mapping between 2020 and 2022 FIPS +#' codes for Connecticut geographies. No interpolation weights are needed because +#' the physical boundaries did not change—only the county-level identifiers changed. +#' +#' @param geography Character. Geography type: one of "block", "block_group", "tract", +#' or "county". +#' @param cache Directory path. Where to download the crosswalk to. If NULL (default), +#' crosswalk is returned but not saved to disk. +#' +#' @return A tibble containing the Connecticut crosswalk with columns: +#' \describe{ +#' \item{source_geoid}{The 2020 FIPS code} +#' \item{target_geoid}{The 2022 FIPS code} +#' \item{source_geography_name}{The geography type} +#' \item{target_geography_name}{The geography type} +#' \item{source_year}{2020} +#' \item{target_year}{2022} +#' \item{allocation_factor_source_to_target}{Always 1 (identity mapping)} +#' \item{weighting_factor}{"identity" (no interpolation needed)} +#' \item{state_fips}{"09" (Connecticut)} +#' } +#' @noRd +get_ctdata_crosswalk <- function(geography, cache = NULL) { + + geography_standardized <- geography |> + stringr::str_to_lower() |> + stringr::str_squish() |> + stringr::str_replace_all("_", " ") + + geography_standardized <- dplyr::case_when( + geography_standardized %in% c("block", "blocks", "blk") ~ "block", + geography_standardized %in% c("block group", "blockgroup", "bg") ~ "block_group", + geography_standardized %in% c("tract", "tracts", "tr") ~ "tract", + geography_standardized %in% c("county", "counties", "co") ~ "county", + TRUE ~ NA_character_) + + if (is.na(geography_standardized)) { + stop( +"CTData crosswalks are only available for blocks, block groups, tracts, and counties. +The provided geography '", geography, "' is not supported.")} + + if (is.null(cache)) { + cache_path <- tempdir() + } else { + cache_path <- cache + } + + csv_path <- file.path( + cache_path, + stringr::str_c("crosswalk_ctdata_2020_to_2022_", geography_standardized, ".csv")) + + if (file.exists(csv_path) & !is.null(cache)) { + message("Reading CTData crosswalk from cache.") + return(readr::read_csv( + csv_path, + col_types = readr::cols(.default = readr::col_character(), + allocation_factor_source_to_target = readr::col_double()))) + } + + ctdata_urls <- list( + block = "https://raw.githubusercontent.com/CT-Data-Collaborative/2022-block-crosswalk/main/2022blockcrosswalk.csv", + tract = "https://raw.githubusercontent.com/CT-Data-Collaborative/2022-tract-crosswalk/main/2022tractcrosswalk.csv") + + if (geography_standardized == "block") { + raw_df <- readr::read_csv(ctdata_urls$block, show_col_types = FALSE) + + result <- raw_df |> + dplyr::transmute( + source_geoid = block_fips_2020, + target_geoid = block_fips_2022, + source_geography_name = "block", + target_geography_name = "block", + source_year = "2020", + target_year = "2022", + allocation_factor_source_to_target = 1, + weighting_factor = "identity", + state_fips = "09") + + } else if (geography_standardized == "block_group") { + raw_df <- readr::read_csv(ctdata_urls$block, show_col_types = FALSE) + + result <- raw_df |> + dplyr::transmute( + source_geoid = stringr::str_sub(block_fips_2020, 1, 12), + target_geoid = stringr::str_sub(block_fips_2022, 1, 12)) |> + dplyr::distinct() |> + dplyr::mutate( + source_geography_name = "block_group", + target_geography_name = "block_group", + source_year = "2020", + target_year = "2022", + allocation_factor_source_to_target = 1, + weighting_factor = "identity", + state_fips = "09") + + } else if (geography_standardized == "tract") { + raw_df <- readr::read_csv(ctdata_urls$tract, show_col_types = FALSE) + + result <- raw_df |> + dplyr::transmute( + source_geoid = tract_fips_2020, + target_geoid = Tract_fips_2022, + source_geography_name = "tract", + target_geography_name = "tract", + source_year = "2020", + target_year = "2022", + allocation_factor_source_to_target = 1, + weighting_factor = "identity", + state_fips = "09") + + } else if (geography_standardized == "county") { + if (!requireNamespace("tidycensus", quietly = TRUE)) { + stop( +"The tidycensus package is required for Connecticut county crosswalks because +allocation factors must be calculated based on population. Install it with: +install.packages('tidycensus') +You will also need a Census API key: tidycensus::census_api_key('YOUR_KEY')") + } + + raw_df <- readr::read_csv(ctdata_urls$tract, show_col_types = FALSE) |> + janitor::clean_names() |> + dplyr::select( + tract_fips_2020, + tract_fips_2022, + county_fips_2020, + county_fips_2022 = ce_fips_2022) + + ct_tract_populations <- suppressMessages({ + tidycensus::get_acs( + year = 2021, + geography = "tract", + state = "CT", + variables = "B01003_001", + output = "wide") |> + dplyr::select( + tract_fips_2020 = GEOID, + population_2020 = B01003_001E) + }) + + result <- raw_df |> + dplyr::left_join(ct_tract_populations, by = "tract_fips_2020") |> + dplyr::summarize( + population_2020 = sum(population_2020, na.rm = TRUE), + .by = c("county_fips_2020", "county_fips_2022")) |> + dplyr::mutate( + population_2020_total = sum(population_2020, na.rm = TRUE), + .by = "county_fips_2020") |> + dplyr::mutate( + source_geoid = county_fips_2020, + target_geoid = county_fips_2022, + source_geography_name = "county", + target_geography_name = "county", + source_year = "2020", + target_year = "2022", + allocation_factor_source_to_target = population_2020 / population_2020_total, + weighting_factor = "population", + state_fips = "09") |> + dplyr::select( + source_geoid, target_geoid, + source_geography_name, target_geography_name, + source_year, target_year, + allocation_factor_source_to_target, + weighting_factor, state_fips) + } + + if (!is.null(cache)) { + if (!dir.exists(cache_path)) { + dir.create(cache_path, recursive = TRUE) + } + readr::write_csv(result, csv_path) + } + + message( +"Connecticut 2020-2022 crosswalk sourced from CT Data Collaborative. +See https://github.com/CT-Data-Collaborative for more information.") + + return(result) +} + + +#' Generate Identity Crosswalk for Non-Connecticut States (2020-2022) +#' +#' For states other than Connecticut, there were no geographic changes between +#' 2020 and 2022. This function generates an identity mapping where source and +#' target GEOIDs are identical. +#' +#' @param geography Character. Geography type: one of "block_group", "tract", or "county". +#' @param states Character vector. State FIPS codes to include. Defaults to all +#' states except Connecticut ("09"). +#' +#' @return A tibble with identity mappings for the specified geography and states. +#' @noRd +get_identity_crosswalk_2020_2022 <- function(geography, states = NULL) { + + geography_standardized <- geography |> + stringr::str_to_lower() |> + stringr::str_squish() |> + stringr::str_replace_all("_", " ") + + geography_standardized <- dplyr::case_when( + geography_standardized %in% c("block group", "blockgroup", "bg") ~ "block_group", + geography_standardized %in% c("tract", "tracts", "tr") ~ "tract", + geography_standardized %in% c("county", "counties", "co") ~ "county", + TRUE ~ NA_character_) + + if (is.na(geography_standardized)) { + stop( +"Identity crosswalks for 2020-2022 are only available for block groups, tracts, +and counties. Block-level identity crosswalks are not supported due to data size.")} + + all_state_fips <- c( + "01", "02", "04", "05", "06", "08", "10", "11", "12", "13", + "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", + "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", + "35", "36", "37", "38", "39", "40", "41", "42", "44", "45", + "46", "47", "48", "49", "50", "51", "53", "54", "55", "56", "72") + + non_ct_states <- all_state_fips[all_state_fips != "09"] + + if (is.null(states)) { + states <- non_ct_states + } else { + states <- states[states != "09"] + } + + result <- tibble::tibble( + source_geoid = character(), + target_geoid = character(), + source_geography_name = character(), + target_geography_name = character(), + source_year = character(), + target_year = character(), + allocation_factor_source_to_target = numeric(), + weighting_factor = character(), + state_fips = character()) + + message( +"For states other than Connecticut, no geographic changes occurred between +2020 and 2022. Returning identity mapping (source_geoid = target_geoid). +Note: This function returns an empty template. To populate with actual GEOIDs, +you would need to provide a list of GEOIDs or use Census Bureau geography files.") + + attr(result, "identity_mapping") <- TRUE + attr(result, "states_included") <- states + attr(result, "geography") <- geography_standardized + + return(result) +} diff --git a/R/get_geocorr_crosswalk.R b/R/get_geocorr_crosswalk.R index 8aeacd7..a0a72e9 100644 --- a/R/get_geocorr_crosswalk.R +++ b/R/get_geocorr_crosswalk.R @@ -1,333 +1,333 @@ -#' Obtain a Geocorr22 Crosswalk -#' -#' Query Geocorr22 (https://mcdc.missouri.edu/applications/geocorr2022.html) for -#' a crosswalk between two geographies for all 51 states and Puerto Rico. -#' -#' @details Note: this function is under development but does not yet support all -#' of the geographies supported by Geocorr. Currently this includes: -#' c("place", "county", "tract", "blockgroup", "zcta", "puma22", "cd119", "cd118"). -#' -#' @param source_geography Character. Source geography name. One of: -#' c("place", "county", "tract", "blockgroup", "zcta", "puma22", "cd119", "cd118"). -#' Note "cd" stands for "congressional district". -#' @param target_geography Character. Target geography name. See `source_geography` -#' for options. -#' @param weight Character. Weighting variable. One of c("population", "housing", "land"). -#' @param cache Directory path. Where to download the crosswalk to. If NULL (default), -#' crosswalk is returned but not saved to disk. -#' -#' @return A dataframe representing the requested Geocorr22 crosswalk for all 51 -#' states and Puerto Rico. Depending on the desired geographies, some fields -#' may not be included. -#' \describe{ -#' \item{state_fips}{A two-digit identified for the state (or DC/PR), if applicable} -#' \item{state_abbreviation}{A two-character abbreviation for the state (or DC/PR), -#' if applicable} -#' \item{source_geoid}{A unique identifier for the source geography} -#' \item{target_geoid}{A unique identifier for the target geography} -#' \item{source_geography_name}{The name of the source geography} -#' \item{target_geography_name}{The name of the target geography} -#' \item{allocation_factor_source_to_target}{The weight to interpolate values -#' from the source geography to the target geography} -#' \item{allocation_factor_target_to_source}{The weight to interpolate values -#' from the source geography to the target geography} -#' \item{population_2020}{The estimated overlap in population, if applicable} -#' \item{housing_2020}{The estimated overlap in housing units, if applicable} -#' \item{land_area_sqmi}{The overlap in land area, if applicable} -#' \item{weighting_factor}{The attribute used to calculate allocation factors -#' (one of population, housing, land)} -#' } -#' @noRd -get_geocorr_crosswalk <- function( - source_geography, - target_geography, - weight = c("population", "housing", "land"), - cache = NULL) { - - ## identify the relevant file paths for potentially-cached crosswalks - if (!is.null(cache)) { - outpath = file.path( - cache, - stringr::str_c("crosswalk_geocorr_2022_to_2022_", source_geography, "_to_", - target_geography, "_weightedby_", weight, ".csv")) } - - ## if the file exists and the user does not wish to overwrite it - if (file.exists(outpath) & !is.null(cache)) { - result = readr::read_csv(outpath) - - message("Reading file from cache.") - - return(result) } - - # Base API URL for geocorr2022 - base_url <- "https://mcdc.missouri.edu/cgi-bin/broker" - - # Map weight parameter to API format - weight_value <- switch(weight, - "population" = "pop20", - "land" = "landsqmi", - "housing" = "hus20") - - # Define all states with their abbreviations and FIPS codes - # Based on the sample URL pattern: state=Mo29, state=Al01, etc. - states_data <- list( - list(abbr = "Al", fips = "01"), - list(abbr = "Ak", fips = "02"), - list(abbr = "Az", fips = "04"), - list(abbr = "Ar", fips = "05"), - list(abbr = "Ca", fips = "06"), - list(abbr = "Co", fips = "08"), - list(abbr = "Ct", fips = "09"), - list(abbr = "De", fips = "10"), - list(abbr = "Dc", fips = "11"), - list(abbr = "Fl", fips = "12"), - list(abbr = "Ga", fips = "13"), - list(abbr = "Hi", fips = "15"), - list(abbr = "Id", fips = "16"), - list(abbr = "Il", fips = "17"), - list(abbr = "In", fips = "18"), - list(abbr = "Ia", fips = "19"), - list(abbr = "Ks", fips = "20"), - list(abbr = "Ky", fips = "21"), - list(abbr = "La", fips = "22"), - list(abbr = "Me", fips = "23"), - list(abbr = "Md", fips = "24"), - list(abbr = "Ma", fips = "25"), - list(abbr = "Mi", fips = "26"), - list(abbr = "Mn", fips = "27"), - list(abbr = "Ms", fips = "28"), - list(abbr = "Mo", fips = "29"), - list(abbr = "Mt", fips = "30"), - list(abbr = "Ne", fips = "31"), - list(abbr = "Nv", fips = "32"), - list(abbr = "Nh", fips = "33"), - list(abbr = "Nj", fips = "34"), - list(abbr = "Nm", fips = "35"), - list(abbr = "Ny", fips = "36"), - list(abbr = "Nc", fips = "37"), - list(abbr = "Nd", fips = "38"), - list(abbr = "Oh", fips = "39"), - list(abbr = "Ok", fips = "40"), - list(abbr = "Or", fips = "41"), - list(abbr = "Pa", fips = "42"), - list(abbr = "Ri", fips = "44"), - list(abbr = "Sc", fips = "45"), - list(abbr = "Sd", fips = "46"), - list(abbr = "Tn", fips = "47"), - list(abbr = "Tx", fips = "48"), - list(abbr = "Ut", fips = "49"), - list(abbr = "Vt", fips = "50"), - list(abbr = "Va", fips = "51"), - list(abbr = "Wa", fips = "53"), - list(abbr = "Wv", fips = "54"), - list(abbr = "Wi", fips = "55"), - list(abbr = "Wy", fips = "56"), - list(abbr = "Pr", fips = "72")) |> - purrr::map_chr(~ paste0(.x$abbr, .x$fips)) - - ## for block-level crosswalks, the maximum number of states per query is 13 - if ("block" %in% c(source_geography, target_geography)) { - - n = length(states_data) / 13 - groups = cut(seq_along(states_data), n, labels = FALSE) - states_chunked = split(states_data, groups) - - df1 = purrr::map_dfr( - states_chunked, - function(states) { - # Build query parameters - params <- list( - `_PROGRAM` = "apps.geocorr2022.sas", - `_SERVICE` = "MCDC_long", - `_debug` = "0", - `afacts2` = "on", - `g1_` = source_geography, - `g2_` = target_geography, - `wtvar` = weight_value, - `nozerob` = "1", - `fileout` = "1", - `filefmt` = "csv", - `lstfmt` = "html", - `title` = "", - `sort2` = "on", - `counties` = "", - `metros` = "", - `places` = "", - `oropt` = "", - `latitude` = "", - `longitude` = "", - `distance` = "", - `kiloms` = "0", - `locname` = "", - `state` = states) - - # Make the HTTP GET request using httr2 - request <- httr2::request(base_url) |> - httr2::req_url_query(!!!params, .multi = "explode") - - csv_path <- httr2::req_perform(request) |> - httr2::resp_body_html() |> - rvest::html_element("body") |> - rvest::html_text2() |> - stringr::str_extract("geocorr.*.csv") - - if (is.na(csv_path)) { stop("Unable to acquire the specified crosswalk; please file an issue.") } - - df1 = readr::read_csv(file.path("https://mcdc.missouri.edu", "temp", csv_path)) |> - janitor::clean_names() })} else { - - # Build query parameters - params <- list( - `_PROGRAM` = "apps.geocorr2022.sas", - `_SERVICE` = "MCDC_long", - `_debug` = "0", - `afacts2` = "on", - `g1_` = source_geography, - `g2_` = target_geography, - `wtvar` = weight_value, - `nozerob` = "1", - `fileout` = "1", - `filefmt` = "csv", - `lstfmt` = "html", - `title` = "", - `sort2` = "on", - `counties` = "", - `metros` = "", - `places` = "", - `oropt` = "", - `latitude` = "", - `longitude` = "", - `distance` = "", - `kiloms` = "0", - `locname` = "", - `state` = states_data) - - # Make the HTTP GET request using httr2 - request <- httr2::request(base_url) |> - httr2::req_url_query(!!!params, .multi = "explode") - - csv_path <- httr2::req_perform(request) |> - httr2::resp_body_html() |> - rvest::html_element("body") |> - rvest::html_text2() |> - stringr::str_extract("geocorr.*.csv") - - if (is.na(csv_path)) { stop("Unable to acquire the specified crosswalk; please file an issue.") } - - df1 = readr::read_csv(file.path("https://mcdc.missouri.edu", "temp", csv_path)) |> - janitor::clean_names() } - - df2 = df1 |> - dplyr::slice(2:nrow(df1)) |> - ## naming conventions for some geographies are inconsistent; we standardize - dplyr::rename_with( - .cols = dplyr::matches("zip_name"), - .fn = ~ .x |> stringr::str_replace("zip", "zcta")) |> - dplyr::rename_with( - .cols = dplyr::matches("puma22name"), - .fn = ~ .x |> stringr::str_replace("puma22name", "puma22_name")) |> - dplyr::rename_with( - .cols = dplyr::matches("hus20|pop20|landsqmi"), - .fn = ~ stringr::str_replace_all(.x, c( - "hus20" = "housing_2020", "pop20" = "population_2020", "landsqmi" = "land_area_sqmi"))) - - if (!"state" %in% colnames(df2)) { - df2 = df2 |> - dplyr::mutate( - state = stringr::str_sub(county, 1, 2), - county = stringr::str_sub(county, 3, 5)) } - - df2 = df2 |> - dplyr::mutate( - ## data with blocks/block groups/tracts have differently structured/named columns - ## we standardize here so that subsequent workflows are uniform - dplyr::across( - .cols = dplyr::matches("^block$"), - .fns = ~ stringr::str_c(state, county, tract, block) |> - stringr::str_remove_all("\\.")), - dplyr::across( - .cols = dplyr::matches("^block$"), - .fns = ~ stringr::str_c(county_name, " ", tract, block), - .names = "block_name"), - dplyr::across( - .cols = dplyr::matches("^blockgroup$"), - .fns = ~ stringr::str_c(state, county, tract, blockgroup) |> - stringr::str_remove_all("\\.")), - dplyr::across( - .cols = dplyr::matches("^blockgroup$"), - .fns = ~ stringr::str_c(county_name, " ", blockgroup), - .names = "blockgroup_name"), - dplyr::across( - .cols = dplyr::matches("^tract$"), - .fns = ~ stringr::str_c(state, county, tract) |> - stringr::str_remove_all("\\.")), - dplyr::across( - .cols = dplyr::matches("^tract$"), - .fns = ~ stringr::str_c(county_name, " ", tract), - .names = "tract_name"), - dplyr::across( - .cols = dplyr::matches("^cd11"), - .fns = ~ stringr::str_c(stab, "-", .x), - .names = "{.col}_name")) |> - dplyr::rename_with( - .cols = dplyr::matches("state|stab"), - .fn = ~ stringr::str_replace_all(.x, c("state" = "state_fips", "stab" = "state_abbreviation"))) |> - dplyr::select( - dplyr::matches("state"), - source_geoid = source_geography, - target_geoid = target_geography, - source_geography_name = !!stringr::str_c(source_geography, "_name"), - target_geography_name = !!stringr::str_c(target_geography, "_name"), - allocation_factor_source_to_target = afact, - allocation_factor_target_to_source = afact2, - dplyr::any_of(c("housing_2020", "population_2020", "land_area_sqmi"))) |> - dplyr::mutate( - source_geography = source_geography, - target_geography = target_geography, - weighting_factor = weight, - dplyr::across(.cols = dplyr::matches("allocation"), .fns = as.numeric)) - - if (!is.null(cache)) { - ## if the file does not already exist and cache is TRUE - if (!file.exists(outpath) & !is.null(cache)) { - ## if the specified cache directory doesn't yet exist, create it - if (!dir.exists(cache)) { dir.create(cache) } - readr::write_csv(df2, outpath) - } - } - return(df2) -} - -utils::globalVariables(c("afact", "afact2", "county")) - -# get_geocorr_crosswalk( -# source_geography = "zcta", -# target_geography = "puma22", -# weight = c("population"), -# cache = here::here("crosswalks-cache"), -# overwrite_cache = FALSE) - -# ## omitting the provided MO-specific geographies -# sources = c( -# "place", "county", "tract", "blockgroup", "block", "zcta", "puma22", "cousub", -# "cbsa20", "cbsatype20", "metdiv20", "csa20", "necta", "nectadiv", "cnect", "aiannh", -# ## these may be formatted differently -- including a state parameter? -# "sduni20", "sdelem20", "sdsec20", "sdbest20", "sdbesttype20", "placesc", "puma12", -# "countysc", "inplace", "ur", "ua", "cbsa23", "cbsatype23", "metdiv23", "csa23", -# "cbsacentral23", "sldu24", "sldl24", "sldu22", "sld22", "sldu18", "sldl28", -# "cd119", "cd118", "cd117", "cd116", -# ## ctregion only works for CT; "vtd20" may be nested at the country level? -# "ctregion", "vtd20", "hsa19", "hrr19", "rucc23") -# -# ## "block" -- this level requires submitting 13 or fewer states at a time -# -# core_sources = c("place", "county", "tract", "blockgroup", -# "zcta", "puma22", "cd119", "cd118") -# -# expand.grid(core_sources, core_sources) |> -# dplyr::rename(source_geography = 1, target_geography = 2) |> -# dplyr::filter(source_geography != target_geography) |> -# dplyr::mutate(weight = "population", cache = here::here("crosswalks-cache"), overwrite_cache = FALSE, dplyr::across(dplyr::where(is.factor), as.character)) |> -# purrr::pwalk(get_geocorr_crosswalk) - - +#' Obtain a Geocorr22 Crosswalk +#' +#' Query Geocorr22 (https://mcdc.missouri.edu/applications/geocorr2022.html) for +#' a crosswalk between two geographies for all 51 states and Puerto Rico. +#' +#' @details Note: this function is under development but does not yet support all +#' of the geographies supported by Geocorr. Currently this includes: +#' c("place", "county", "tract", "blockgroup", "zcta", "puma22", "cd119", "cd118"). +#' +#' @param source_geography Character. Source geography name. One of: +#' c("place", "county", "tract", "blockgroup", "zcta", "puma22", "cd119", "cd118"). +#' Note "cd" stands for "congressional district". +#' @param target_geography Character. Target geography name. See `source_geography` +#' for options. +#' @param weight Character. Weighting variable. One of c("population", "housing", "land"). +#' @param cache Directory path. Where to download the crosswalk to. If NULL (default), +#' crosswalk is returned but not saved to disk. +#' +#' @return A dataframe representing the requested Geocorr22 crosswalk for all 51 +#' states and Puerto Rico. Depending on the desired geographies, some fields +#' may not be included. +#' \describe{ +#' \item{state_fips}{A two-digit identified for the state (or DC/PR), if applicable} +#' \item{state_abbreviation}{A two-character abbreviation for the state (or DC/PR), +#' if applicable} +#' \item{source_geoid}{A unique identifier for the source geography} +#' \item{target_geoid}{A unique identifier for the target geography} +#' \item{source_geography_name}{The name of the source geography} +#' \item{target_geography_name}{The name of the target geography} +#' \item{allocation_factor_source_to_target}{The weight to interpolate values +#' from the source geography to the target geography} +#' \item{allocation_factor_target_to_source}{The weight to interpolate values +#' from the source geography to the target geography} +#' \item{population_2020}{The estimated overlap in population, if applicable} +#' \item{housing_2020}{The estimated overlap in housing units, if applicable} +#' \item{land_area_sqmi}{The overlap in land area, if applicable} +#' \item{weighting_factor}{The attribute used to calculate allocation factors +#' (one of population, housing, land)} +#' } +#' @noRd +get_geocorr_crosswalk <- function( + source_geography, + target_geography, + weight = c("population", "housing", "land"), + cache = NULL) { + + ## identify the relevant file paths for potentially-cached crosswalks + if (!is.null(cache)) { + outpath = file.path( + cache, + stringr::str_c("crosswalk_geocorr_2022_to_2022_", source_geography, "_to_", + target_geography, "_weightedby_", weight, ".csv")) } + + ## if the file exists and the user does not wish to overwrite it + if (file.exists(outpath) & !is.null(cache)) { + result = readr::read_csv(outpath) + + message("Reading file from cache.") + + return(result) } + + # Base API URL for geocorr2022 + base_url <- "https://mcdc.missouri.edu/cgi-bin/broker" + + # Map weight parameter to API format + weight_value <- switch(weight, + "population" = "pop20", + "land" = "landsqmi", + "housing" = "hus20") + + # Define all states with their abbreviations and FIPS codes + # Based on the sample URL pattern: state=Mo29, state=Al01, etc. + states_data <- list( + list(abbr = "Al", fips = "01"), + list(abbr = "Ak", fips = "02"), + list(abbr = "Az", fips = "04"), + list(abbr = "Ar", fips = "05"), + list(abbr = "Ca", fips = "06"), + list(abbr = "Co", fips = "08"), + list(abbr = "Ct", fips = "09"), + list(abbr = "De", fips = "10"), + list(abbr = "Dc", fips = "11"), + list(abbr = "Fl", fips = "12"), + list(abbr = "Ga", fips = "13"), + list(abbr = "Hi", fips = "15"), + list(abbr = "Id", fips = "16"), + list(abbr = "Il", fips = "17"), + list(abbr = "In", fips = "18"), + list(abbr = "Ia", fips = "19"), + list(abbr = "Ks", fips = "20"), + list(abbr = "Ky", fips = "21"), + list(abbr = "La", fips = "22"), + list(abbr = "Me", fips = "23"), + list(abbr = "Md", fips = "24"), + list(abbr = "Ma", fips = "25"), + list(abbr = "Mi", fips = "26"), + list(abbr = "Mn", fips = "27"), + list(abbr = "Ms", fips = "28"), + list(abbr = "Mo", fips = "29"), + list(abbr = "Mt", fips = "30"), + list(abbr = "Ne", fips = "31"), + list(abbr = "Nv", fips = "32"), + list(abbr = "Nh", fips = "33"), + list(abbr = "Nj", fips = "34"), + list(abbr = "Nm", fips = "35"), + list(abbr = "Ny", fips = "36"), + list(abbr = "Nc", fips = "37"), + list(abbr = "Nd", fips = "38"), + list(abbr = "Oh", fips = "39"), + list(abbr = "Ok", fips = "40"), + list(abbr = "Or", fips = "41"), + list(abbr = "Pa", fips = "42"), + list(abbr = "Ri", fips = "44"), + list(abbr = "Sc", fips = "45"), + list(abbr = "Sd", fips = "46"), + list(abbr = "Tn", fips = "47"), + list(abbr = "Tx", fips = "48"), + list(abbr = "Ut", fips = "49"), + list(abbr = "Vt", fips = "50"), + list(abbr = "Va", fips = "51"), + list(abbr = "Wa", fips = "53"), + list(abbr = "Wv", fips = "54"), + list(abbr = "Wi", fips = "55"), + list(abbr = "Wy", fips = "56"), + list(abbr = "Pr", fips = "72")) |> + purrr::map_chr(~ paste0(.x$abbr, .x$fips)) + + ## for block-level crosswalks, the maximum number of states per query is 13 + if ("block" %in% c(source_geography, target_geography)) { + + n = length(states_data) / 13 + groups = cut(seq_along(states_data), n, labels = FALSE) + states_chunked = split(states_data, groups) + + df1 = purrr::map_dfr( + states_chunked, + function(states) { + # Build query parameters + params <- list( + `_PROGRAM` = "apps.geocorr2022.sas", + `_SERVICE` = "MCDC_long", + `_debug` = "0", + `afacts2` = "on", + `g1_` = source_geography, + `g2_` = target_geography, + `wtvar` = weight_value, + `nozerob` = "1", + `fileout` = "1", + `filefmt` = "csv", + `lstfmt` = "html", + `title` = "", + `sort2` = "on", + `counties` = "", + `metros` = "", + `places` = "", + `oropt` = "", + `latitude` = "", + `longitude` = "", + `distance` = "", + `kiloms` = "0", + `locname` = "", + `state` = states) + + # Make the HTTP GET request using httr2 + request <- httr2::request(base_url) |> + httr2::req_url_query(!!!params, .multi = "explode") + + csv_path <- httr2::req_perform(request) |> + httr2::resp_body_html() |> + rvest::html_element("body") |> + rvest::html_text2() |> + stringr::str_extract("geocorr.*.csv") + + if (is.na(csv_path)) { stop("Unable to acquire the specified crosswalk; please file an issue.") } + + df1 = readr::read_csv(file.path("https://mcdc.missouri.edu", "temp", csv_path)) |> + janitor::clean_names() })} else { + + # Build query parameters + params <- list( + `_PROGRAM` = "apps.geocorr2022.sas", + `_SERVICE` = "MCDC_long", + `_debug` = "0", + `afacts2` = "on", + `g1_` = source_geography, + `g2_` = target_geography, + `wtvar` = weight_value, + `nozerob` = "1", + `fileout` = "1", + `filefmt` = "csv", + `lstfmt` = "html", + `title` = "", + `sort2` = "on", + `counties` = "", + `metros` = "", + `places` = "", + `oropt` = "", + `latitude` = "", + `longitude` = "", + `distance` = "", + `kiloms` = "0", + `locname` = "", + `state` = states_data) + + # Make the HTTP GET request using httr2 + request <- httr2::request(base_url) |> + httr2::req_url_query(!!!params, .multi = "explode") + + csv_path <- httr2::req_perform(request) |> + httr2::resp_body_html() |> + rvest::html_element("body") |> + rvest::html_text2() |> + stringr::str_extract("geocorr.*.csv") + + if (is.na(csv_path)) { stop("Unable to acquire the specified crosswalk; please file an issue.") } + + df1 = readr::read_csv(file.path("https://mcdc.missouri.edu", "temp", csv_path)) |> + janitor::clean_names() } + + df2 = df1 |> + dplyr::slice(2:nrow(df1)) |> + ## naming conventions for some geographies are inconsistent; we standardize + dplyr::rename_with( + .cols = dplyr::matches("zip_name"), + .fn = ~ .x |> stringr::str_replace("zip", "zcta")) |> + dplyr::rename_with( + .cols = dplyr::matches("puma22name"), + .fn = ~ .x |> stringr::str_replace("puma22name", "puma22_name")) |> + dplyr::rename_with( + .cols = dplyr::matches("hus20|pop20|landsqmi"), + .fn = ~ stringr::str_replace_all(.x, c( + "hus20" = "housing_2020", "pop20" = "population_2020", "landsqmi" = "land_area_sqmi"))) + + if (!"state" %in% colnames(df2)) { + df2 = df2 |> + dplyr::mutate( + state = stringr::str_sub(county, 1, 2), + county = stringr::str_sub(county, 3, 5)) } + + df2 = df2 |> + dplyr::mutate( + ## data with blocks/block groups/tracts have differently structured/named columns + ## we standardize here so that subsequent workflows are uniform + dplyr::across( + .cols = dplyr::matches("^block$"), + .fns = ~ stringr::str_c(state, county, tract, block) |> + stringr::str_remove_all("\\.")), + dplyr::across( + .cols = dplyr::matches("^block$"), + .fns = ~ stringr::str_c(county_name, " ", tract, block), + .names = "block_name"), + dplyr::across( + .cols = dplyr::matches("^blockgroup$"), + .fns = ~ stringr::str_c(state, county, tract, blockgroup) |> + stringr::str_remove_all("\\.")), + dplyr::across( + .cols = dplyr::matches("^blockgroup$"), + .fns = ~ stringr::str_c(county_name, " ", blockgroup), + .names = "blockgroup_name"), + dplyr::across( + .cols = dplyr::matches("^tract$"), + .fns = ~ stringr::str_c(state, county, tract) |> + stringr::str_remove_all("\\.")), + dplyr::across( + .cols = dplyr::matches("^tract$"), + .fns = ~ stringr::str_c(county_name, " ", tract), + .names = "tract_name"), + dplyr::across( + .cols = dplyr::matches("^cd11"), + .fns = ~ stringr::str_c(stab, "-", .x), + .names = "{.col}_name")) |> + dplyr::rename_with( + .cols = dplyr::matches("state|stab"), + .fn = ~ stringr::str_replace_all(.x, c("state" = "state_fips", "stab" = "state_abbreviation"))) |> + dplyr::select( + dplyr::matches("state"), + source_geoid = source_geography, + target_geoid = target_geography, + source_geography_name = !!stringr::str_c(source_geography, "_name"), + target_geography_name = !!stringr::str_c(target_geography, "_name"), + allocation_factor_source_to_target = afact, + allocation_factor_target_to_source = afact2, + dplyr::any_of(c("housing_2020", "population_2020", "land_area_sqmi"))) |> + dplyr::mutate( + source_geography = source_geography, + target_geography = target_geography, + weighting_factor = weight, + dplyr::across(.cols = dplyr::matches("allocation"), .fns = as.numeric)) + + if (!is.null(cache)) { + ## if the file does not already exist and cache is TRUE + if (!file.exists(outpath) & !is.null(cache)) { + ## if the specified cache directory doesn't yet exist, create it + if (!dir.exists(cache)) { dir.create(cache) } + readr::write_csv(df2, outpath) + } + } + return(df2) +} + +utils::globalVariables(c("afact", "afact2", "county")) + +# get_geocorr_crosswalk( +# source_geography = "zcta", +# target_geography = "puma22", +# weight = c("population"), +# cache = here::here("crosswalks-cache"), +# overwrite_cache = FALSE) + +# ## omitting the provided MO-specific geographies +# sources = c( +# "place", "county", "tract", "blockgroup", "block", "zcta", "puma22", "cousub", +# "cbsa20", "cbsatype20", "metdiv20", "csa20", "necta", "nectadiv", "cnect", "aiannh", +# ## these may be formatted differently -- including a state parameter? +# "sduni20", "sdelem20", "sdsec20", "sdbest20", "sdbesttype20", "placesc", "puma12", +# "countysc", "inplace", "ur", "ua", "cbsa23", "cbsatype23", "metdiv23", "csa23", +# "cbsacentral23", "sldu24", "sldl24", "sldu22", "sld22", "sldu18", "sldl28", +# "cd119", "cd118", "cd117", "cd116", +# ## ctregion only works for CT; "vtd20" may be nested at the country level? +# "ctregion", "vtd20", "hsa19", "hrr19", "rucc23") +# +# ## "block" -- this level requires submitting 13 or fewer states at a time +# +# core_sources = c("place", "county", "tract", "blockgroup", +# "zcta", "puma22", "cd119", "cd118") +# +# expand.grid(core_sources, core_sources) |> +# dplyr::rename(source_geography = 1, target_geography = 2) |> +# dplyr::filter(source_geography != target_geography) |> +# dplyr::mutate(weight = "population", cache = here::here("crosswalks-cache"), overwrite_cache = FALSE, dplyr::across(dplyr::where(is.factor), as.character)) |> +# purrr::pwalk(get_geocorr_crosswalk) + + diff --git a/R/get_nhgis_crosswalk.R b/R/get_nhgis_crosswalk.R index c2e7d0f..49277e8 100644 --- a/R/get_nhgis_crosswalk.R +++ b/R/get_nhgis_crosswalk.R @@ -1,425 +1,501 @@ -#' Standardize Geography Names -#' -#' Internal helper function to convert various geography name spellings to standard codes. -#' -#' @param geography Character. Geography name in various formats. -#' @param context Character. Either "source" or "target" to determine valid options. -#' @return Character. Standardized geography code. -#' @keywords internal -standardize_geography <- function(geography, context = "source") { - # Convert to lowercase and remove extra whitespace - geography <- geography |> - stringr::str_to_lower() |> - stringr::str_squish() |> - stringr::str_trim() |> - stringr::str_replace_all("_", " ") - - # Define mapping for different spellings - geography_mapping <- list( - # Blocks - "blk" = "blk", - "block" = "blk", - "blocks" = "blk", - "census block" = "blk", - "census blocks" = "blk", - - # Block groups - "bg" = "bg", - "blockgroup" = "bg", - "block group" = "bg", - "blockgroups" = "bg", - "block groups" = "bg", - "census block group" = "bg", - "census block groups" = "bg", - - # Block group parts (source only) - "bgp" = "bgp", - "block group part" = "bgp", - "block group parts" = "bgp", - "blockgroup part" = "bgp", - "blockgroup parts" = "bgp", - "census block group part" = "bgp", - "census block group parts" = "bgp", - - # Tracts - "tr" = "tr", - "tract" = "tr", - "tracts" = "tr", - "census tract" = "tr", - "census tracts" = "tr", - - # Counties - "co" = "co", - "county" = "co", - "counties" = "co", - "cnty" = "co", - - # Places - "pl" = "pl", - "place" = "pl", - "places" = "pl", - - # CBSAs - "cbsa" = "cbsa", - "cbsas" = "cbsa", - "core based statistical area" = "cbsa", - "core based statistical areas" = "cbsa", - - # Urban Areas - "ua" = "ua", - "uas" = "ua", - "urban area" = "ua", - "urban areas" = "ua", - - # PUMAs - "puma" = "puma", - "pumas" = "puma", - "public use microdata area" = "puma", - "public use microdata areas" = "puma", - - # ZCTAs - "zcta" = "zcta", - "zctas" = "zcta", - "zip code" = "zcta", - "zip codes" = "zcta", - "zip code tabulation area" = "zcta", - "zip code tabulation areas" = "zcta") - - # Check if the geography is in our mapping - if (geography %in% names(geography_mapping)) { - standardized <- geography_mapping[[geography]] - - # Validate based on context (source vs target geographies have different options) - if (context == "source") { - valid_geogs = c("blk", "bg", "tr") - if (standardized %in% valid_geogs) { - return(standardized) - } - } else if (context == "target") { - valid_geogs = c("blk", "bg", "tr", "co", "ua", "zcta", "puma", "cbsa") - if (standardized %in% valid_geogs) { - return(standardized) - } - } - } - - stop( -"The provided geography is invalid. Use `list_nhgis_crosswalks()` to check -available crosswalks.") -} - -#' List Available NHGIS Crosswalks -#' -#' Returns a tibble of all available NHGIS geographic crosswalks with their -#' corresponding parameters that can be used with get_nhgis_crosswalk(). -#' -#' @return A tibble with columns: -#' \itemize{ -#' \item source_year: Year of the source geography -#' \item source_geography: Source geography name -#' \item target_year: Year of the target geography -#' \item target_geography: Target geography name -#' } -#' -#' @export -list_nhgis_crosswalks <- function() { - nhgis_crosswalks_vector = c( - ## from 1990 to 2010 - ## blk source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_ua2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_zcta2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_puma2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_blk2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_pl2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_bg2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_tr2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_co2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_cbsa2010.zip", - ## bgp source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bgp1990_bg2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bgp1990_tr2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bgp1990_co2010.zip", - ## tr source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr1990_tr2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr1990_co2010.zip", - - ## from 2000 to 2010 - ## blk source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_ua2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_zcta2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_puma2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_pl2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_blk2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_bg2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_tr2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_co2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_cbsa2010.zip", - ## bgp source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bgp2000_bg2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bgp2000_tr2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bgp2000_co2010.zip", - ## tr source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2000_tr2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2000_co2010.zip", - - ## from 2010 to 2020 - ## blk source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_ua2020.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_zcta2020.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_puma2020.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_pl2020.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_cbsa2020.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_blk2020.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_bg2020.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_tr2020.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_co2020.zip", - ## bg source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2010_bg2020.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2010_tr2020.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2010_co2020.zip", - ## tr source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2010_tr2020.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2010_co2020.zip", - - ## from 2020 to 2010 - ## blk source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_ua2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_zcta2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_puma2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_pl2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_cbsa2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_blk2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_bg2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_tr2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_co2010.zip", - ## bg source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_bg2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_tr2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_co2010.zip", - ## tr source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2020_tr2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2020_co2010.zip") - - ## for the time being, not supporting block group parts - nhgis_crosswalks_vector = nhgis_crosswalks_vector[!stringr::str_detect(nhgis_crosswalks_vector, "bgp")] - - nhgis_crosswalks = purrr::map_dfr( - nhgis_crosswalks_vector |> stringr::str_remove_all(".*nhgis_|\\.zip"), - function(path) { - path_parts = stringr::str_split(path, "_") |> _[[1]] - - tibble::tibble( - source_geography = path_parts[1] |> stringr::str_extract("[a-zA-Z]{1,10}"), - source_year = path_parts[1] |> stringr::str_extract("[0-9]{1,10}"), - target_geography = path_parts[2] |> stringr::str_extract("[a-zA-Z]{1,10}"), - target_year = path_parts[2] |> stringr::str_extract("[0-9]{1,10}")) |> - dplyr::mutate( - dplyr::across( - .cols = dplyr::matches("geography"), - .fns = ~ .x |> stringr::str_replace_all(c( - "blk" = "block", - "bgp" = "block_group_part", - "bg" = "block_group", - "tr" = "tract", - "co" = "county", - "ua" = "urban_area", - "cbsa" = "core_based_statistical_area")))) }) |> - dplyr::bind_cols(tibble::tibble(crosswalk_path = nhgis_crosswalks_vector)) - - return(nhgis_crosswalks) -} - -#' Get NHGIS Geographic Crosswalk -#' -#' Retrieves a geographic crosswalk from the IPUMS NHGIS API based on user-specified -#' source and target geographies and years. Use `list_nhgis_crosswalks()` to view valid -#' parameter combinations. -#' -#' @details Note: this function does not support block group part crosswalks at this time. -#' -#' @param source_year Character or numeric. Year of the source geography one of -#' c(1990, 2000, 2010, 2020). -#' @param source_geography Character. Source geography name. One of c("block", -#' "blockgroup", "tract"). -#' @param target_year Character or numeric. Year of the target geography, one of -#' c(1990, 2000, 2010, 2020). -#' @param target_geography Character. Target geography name. One of c("block", -#' "block group", "tract", "place", county", "urban_area", "zcta", "puma", -#' "core_based_statistical_area"). -#' @param cache Directory path. Where to download the crosswalk to. If NULL (default), -#' crosswalk is returned but not saved to disk. -#' -#' @return A data frame containing the crosswalk between the specified geographies. -#' Data are tidy-formatted, with each observation reflecting a unique -#' source-target-weighting factor combination. Note that all (typically two -#' or three) available weighting factors are returned. -#' -#'#' @return A dataframe representing the requested Geocorr22 crosswalk for all -#' 51 states and Puerto Rico. Depending on the desired geographies, some -#' fields may not be included. -#' \describe{ -#' \item{source_geoid}{A unique identifier for the source geography} -#' \item{target_geoid}{A unique identifier for the target geography} -#' \item{source_geography_name}{The name of the source geography} -#' \item{target_geography_name}{The name of the target geography} -#' \item{source_year}{The year of the source geography} -#' \item{target_year}{The year of the target geography} -#' \item{allocation_factor_source_to_target}{The weight to interpolate values -#' from the source geography to the target geography} -#' \item{weighting_factor}{The attribute used to calculate allocation factors} -#' } -#' @noRd -get_nhgis_crosswalk <- function( - source_year, - source_geography, - target_year, - target_geography, - cache = NULL, - api_key = NULL) { - - if (is.null(cache)) { cache_path = tempdir() } else {cache_path = cache} - - # Convert years to character for consistent processing - source_year = as.character(source_year) - target_year = as.character(target_year) - - # Standardize geography names - source_geography_standardized = standardize_geography(source_geography, "source") - target_geography_standardized = standardize_geography(target_geography, "target") - - crosswalk_sub_path = stringr::str_c(source_geography_standardized, source_year, "_", target_geography_standardized, target_year) - crosswalk_path <- paste0("https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_", crosswalk_sub_path, ".zip") - - ## identify the relevant file paths for potentially-cached crosswalks - csv_path = file.path( - cache_path, - stringr::str_c( - "crosswalk_nhgis_", source_year, "_to_", target_year, "_", - source_geography, "_to_", target_geography, ".csv")) - - ## if the file exists and cache == TRUE - if (file.exists(csv_path) & !is.null(cache)) { - result = readr::read_csv(csv_path) - - message( -"Use of NHGIS crosswalks is subject to the same conditions as for all NHGIS data. -See https://www.nhgis.org/citation-and-use-nhgis-data.") - message("Reading file from cache.") - - return(result) } - - # Validate inputs - valid_years = c("1990", "2000", "2010", "2020") - valid_source_geogs = c("blk", "bg", "tr") - valid_target_geogs = c("blk", "bg", "tr", "co", "ua", "zcta", "puma", "cbsa") - - if (source_year == "1990" & target_year == "2000") { - stop( -"There are no crosswalks from 1990 to 2000; 1990 source geography crosswalks are -available only to 2010 geographies.")} - - if (!source_year %in% valid_years) { - stop("source_year must be one of: ", paste(valid_years, collapse = ", "))} - - if (!target_year %in% valid_years) { - stop("target_year must be one of: ", paste(valid_years, collapse = ", "))} - - if (is.null(source_geography_standardized)) { - stop( -"source_geography '", source_geography, "' is not valid. Must be one of: blocks, -block group parts, or tracts (various spellings accepted).")} - - if (is.null(target_geography_standardized)) { - stop( -"target_geography '", target_geography, "' is not valid. Must be one of: blocks, -block groups, tracts, or counties (various spellings accepted)")} - - if (!(crosswalk_path %in% list_nhgis_crosswalks()$crosswalk_path)) { - stop(stringr::str_c( -"There is no available crosswalk between the specified geographies and years.")) } - - api_key = Sys.getenv("IPUMS_API_KEY") - if (api_key == "") { - stop( -"API key required. Save your API key to the IPUMS_API_KEY environment -variable. Get your key at https://account.ipums.org/api_keys") } - - crosswalk_df1 = tryCatch({ - - zip_path = file.path(cache_path, stringr::str_c(crosswalk_sub_path, ".zip")) - csv_path_temporary = file.path(cache_path, stringr::str_c("nhgis_", crosswalk_sub_path, ".csv")) - - ## if the specified directory doesn't yet exist, create it - if (!dir.exists(cache_path)) { dir.create(cache_path) } - - # Download the crosswalk file - response = httr::GET( - crosswalk_path, - httr::add_headers(Authorization = api_key), - httr::write_disk(zip_path, overwrite = TRUE), overwrite = TRUE) - - # Unzip the .zip - utils::unzip( - zipfile = zip_path, - exdir = file.path(cache_path)) - - crosswalk_df = readr::read_csv(csv_path_temporary) |> - janitor::clean_names() - - # Remove the zipped folder and the raw CSV file - file.remove(zip_path) - file.remove(csv_path_temporary) - - crosswalk_df - }, - error = function(e) { - stop("Failed to retrieve crosswalk: ", e$message) }) - - crosswalk_df = crosswalk_df1 |> - dplyr::select(-dplyr::matches("gj")) |> - dplyr::rename_with( - .cols = dplyr::everything(), - .fn = ~ .x |> stringr::str_replace_all(c( - ## for block-based crosswalks, there's only a single, combined weight - "^weight$" = "weight_housing_population", - "parea" = "weight_landarea", - "wt" = "weight", - "pop$" = "population", - "fam" = "family", - "hh" = "household", - "_hu" = "_housing_all", - "ownhu" = "housing_owned", - "renthu" = "housing_rented"))) |> - dplyr::rename( - source_geoid = !!(stringr::str_c(source_geography_standardized, source_year, "ge")), - target_geoid = !!(stringr::str_c(target_geography_standardized, target_year, "ge"))) |> - dplyr::mutate( - source_geography_name = source_geography_standardized, - target_geography_name = target_geography_standardized, - dplyr::across( - .cols = dplyr::matches("geography_name"), - .fns = ~ .x |> stringr::str_replace_all(c( - "blk" = "block", - "bgp" = "block_group_part", - "bg" = "block_group", - "tr" = "tract", - "co" = "county"))), - source_year = source_year, - target_year = target_year) |> - tidyr::pivot_longer( - cols = dplyr::matches("weight_"), - names_to = "weighting_factor", - values_to = "allocation_factor_source_to_target") - - ## if the file does not already exist and cache is not NULL - if (!file.exists(csv_path) & !is.null(cache) ) { - readr::write_csv(crosswalk_df, csv_path) } - -message( -"Use of NHGIS crosswalks is subject to the same conditions as for all NHGIS data. -See https://www.nhgis.org/citation-and-use-nhgis-data.") - - return(crosswalk_df) -} - - +#' Standardize Geography Names +#' +#' Internal helper function to convert various geography name spellings to standard codes. +#' +#' @param geography Character. Geography name in various formats. +#' @param context Character. Either "source" or "target" to determine valid options. +#' @return Character. Standardized geography code. +#' @keywords internal +standardize_geography <- function(geography, context = "source") { + # Convert to lowercase and remove extra whitespace + geography <- geography |> + stringr::str_to_lower() |> + stringr::str_squish() |> + stringr::str_trim() |> + stringr::str_replace_all("_", " ") + + # Define mapping for different spellings + geography_mapping <- list( + # Blocks + "blk" = "blk", + "block" = "blk", + "blocks" = "blk", + "census block" = "blk", + "census blocks" = "blk", + + # Block groups + "bg" = "bg", + "blockgroup" = "bg", + "block group" = "bg", + "blockgroups" = "bg", + "block groups" = "bg", + "census block group" = "bg", + "census block groups" = "bg", + + # Block group parts (source only) + "bgp" = "bgp", + "block group part" = "bgp", + "block group parts" = "bgp", + "blockgroup part" = "bgp", + "blockgroup parts" = "bgp", + "census block group part" = "bgp", + "census block group parts" = "bgp", + + # Tracts + "tr" = "tr", + "tract" = "tr", + "tracts" = "tr", + "census tract" = "tr", + "census tracts" = "tr", + + # Counties + "co" = "co", + "county" = "co", + "counties" = "co", + "cnty" = "co", + + # Places + "pl" = "pl", + "place" = "pl", + "places" = "pl", + + # CBSAs + "cbsa" = "cbsa", + "cbsas" = "cbsa", + "core based statistical area" = "cbsa", + "core based statistical areas" = "cbsa", + + # Urban Areas + "ua" = "ua", + "uas" = "ua", + "urban area" = "ua", + "urban areas" = "ua", + + # PUMAs + "puma" = "puma", + "pumas" = "puma", + "public use microdata area" = "puma", + "public use microdata areas" = "puma", + + # ZCTAs + "zcta" = "zcta", + "zctas" = "zcta", + "zip code" = "zcta", + "zip codes" = "zcta", + "zip code tabulation area" = "zcta", + "zip code tabulation areas" = "zcta") + + # Check if the geography is in our mapping + if (geography %in% names(geography_mapping)) { + standardized <- geography_mapping[[geography]] + + # Validate based on context (source vs target geographies have different options) + if (context == "source") { + valid_geogs = c("blk", "bg", "tr") + if (standardized %in% valid_geogs) { + return(standardized) + } + } else if (context == "target") { + valid_geogs = c("blk", "bg", "tr", "co", "ua", "zcta", "puma", "cbsa") + if (standardized %in% valid_geogs) { + return(standardized) + } + } + } + + stop( +"The provided geography is invalid. Use `list_nhgis_crosswalks()` to check +available crosswalks.") +} + +#' List Available NHGIS Crosswalks +#' +#' Returns a tibble of all available NHGIS geographic crosswalks with their +#' corresponding parameters that can be used with get_nhgis_crosswalk(). +#' +#' @return A tibble with columns: +#' \itemize{ +#' \item source_year: Year of the source geography +#' \item source_geography: Source geography name +#' \item target_year: Year of the target geography +#' \item target_geography: Target geography name +#' } +#' +#' @export +list_nhgis_crosswalks <- function() { + nhgis_crosswalks_vector = c( + ## from 1990 to 2010 + ## blk source + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_ua2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_zcta2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_puma2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_blk2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_pl2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_bg2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_tr2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_co2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_cbsa2010.zip", + ## bgp source + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bgp1990_bg2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bgp1990_tr2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bgp1990_co2010.zip", + ## tr source + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr1990_tr2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr1990_co2010.zip", + + ## from 2000 to 2010 + ## blk source + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_ua2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_zcta2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_puma2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_pl2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_blk2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_bg2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_tr2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_co2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_cbsa2010.zip", + ## bgp source + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bgp2000_bg2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bgp2000_tr2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bgp2000_co2010.zip", + ## tr source + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2000_tr2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2000_co2010.zip", + + ## from 2010 to 2020 + ## blk source + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_ua2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_zcta2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_puma2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_pl2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_cbsa2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_blk2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_bg2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_tr2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_co2020.zip", + ## bg source + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2010_bg2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2010_tr2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2010_co2020.zip", + ## tr source + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2010_tr2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2010_co2020.zip", + + ## from 2020 to 2010 + ## blk source + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_ua2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_zcta2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_puma2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_pl2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_cbsa2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_blk2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_bg2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_tr2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_co2010.zip", + ## bg source + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_bg2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_tr2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_co2010.zip", + ## tr source + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2020_tr2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2020_co2010.zip", + + ## ========================================================================= + ## NON-CENSUS YEAR CROSSWALKS + ## Available for block groups, tracts, and counties only + ## Years with boundary changes: 2011, 2012, 2014, 2015, 2022 + ## ========================================================================= + + ## from 1990 to non-census years (2011, 2012, 2014, 2015) + ## blk source to bg + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_bg2011.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_bg2012.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_bg2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_bg2015.zip", + ## blk source to tr + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_tr2011.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_tr2012.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_tr2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_tr2015.zip", + ## blk source to co + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_co2011.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_co2012.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_co2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_co2015.zip", + + ## from 2000 to non-census years (2011, 2012, 2014, 2015) + ## blk source to bg + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_bg2011.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_bg2012.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_bg2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_bg2015.zip", + ## blk source to tr + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_tr2011.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_tr2012.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_tr2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_tr2015.zip", + ## blk source to co + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_co2011.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_co2012.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_co2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_co2015.zip", + + ## from 2010 to 2022 + ## blk source + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_bg2022.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_tr2022.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_co2022.zip", + + ## from 2020 to non-census years (2011, 2012, 2014, 2015) + ## blk source to bg + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_bg2011.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_bg2012.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_bg2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_bg2015.zip", + ## blk source to tr + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_tr2011.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_tr2012.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_tr2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_tr2015.zip", + ## blk source to co + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_co2011.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_co2012.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_co2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_co2015.zip") + + ## for the time being, not supporting block group parts + nhgis_crosswalks_vector = nhgis_crosswalks_vector[!stringr::str_detect(nhgis_crosswalks_vector, "bgp")] + + nhgis_crosswalks = purrr::map_dfr( + nhgis_crosswalks_vector |> stringr::str_remove_all(".*nhgis_|\\.zip"), + function(path) { + path_parts = stringr::str_split(path, "_") |> _[[1]] + + tibble::tibble( + source_geography = path_parts[1] |> stringr::str_extract("[a-zA-Z]{1,10}"), + source_year = path_parts[1] |> stringr::str_extract("[0-9]{1,10}"), + target_geography = path_parts[2] |> stringr::str_extract("[a-zA-Z]{1,10}"), + target_year = path_parts[2] |> stringr::str_extract("[0-9]{1,10}")) |> + dplyr::mutate( + dplyr::across( + .cols = dplyr::matches("geography"), + .fns = ~ .x |> stringr::str_replace_all(c( + "blk" = "block", + "bgp" = "block_group_part", + "bg" = "block_group", + "tr" = "tract", + "co" = "county", + "ua" = "urban_area", + "cbsa" = "core_based_statistical_area")))) }) |> + dplyr::bind_cols(tibble::tibble(crosswalk_path = nhgis_crosswalks_vector)) + + return(nhgis_crosswalks) +} + +#' Get NHGIS Geographic Crosswalk +#' +#' Retrieves a geographic crosswalk from the IPUMS NHGIS API based on user-specified +#' source and target geographies and years. Use `list_nhgis_crosswalks()` to view valid +#' parameter combinations. +#' +#' @details Note: this function does not support block group part crosswalks at this time. +#' +#' @param source_year Character or numeric. Year of the source geography one of +#' c(1990, 2000, 2010, 2020). +#' @param source_geography Character. Source geography name. One of c("block", +#' "blockgroup", "tract"). +#' @param target_year Character or numeric. Year of the target geography, one of +#' c(1990, 2000, 2010, 2020). +#' @param target_geography Character. Target geography name. One of c("block", +#' "block group", "tract", "place", county", "urban_area", "zcta", "puma", +#' "core_based_statistical_area"). +#' @param cache Directory path. Where to download the crosswalk to. If NULL (default), +#' crosswalk is returned but not saved to disk. +#' +#' @return A data frame containing the crosswalk between the specified geographies. +#' Data are tidy-formatted, with each observation reflecting a unique +#' source-target-weighting factor combination. Note that all (typically two +#' or three) available weighting factors are returned. +#' +#'#' @return A dataframe representing the requested Geocorr22 crosswalk for all +#' 51 states and Puerto Rico. Depending on the desired geographies, some +#' fields may not be included. +#' \describe{ +#' \item{source_geoid}{A unique identifier for the source geography} +#' \item{target_geoid}{A unique identifier for the target geography} +#' \item{source_geography_name}{The name of the source geography} +#' \item{target_geography_name}{The name of the target geography} +#' \item{source_year}{The year of the source geography} +#' \item{target_year}{The year of the target geography} +#' \item{allocation_factor_source_to_target}{The weight to interpolate values +#' from the source geography to the target geography} +#' \item{weighting_factor}{The attribute used to calculate allocation factors} +#' } +#' @noRd +get_nhgis_crosswalk <- function( + source_year, + source_geography, + target_year, + target_geography, + cache = NULL, + api_key = NULL) { + + if (is.null(cache)) { cache_path = tempdir() } else {cache_path = cache} + + # Convert years to character for consistent processing + source_year = as.character(source_year) + target_year = as.character(target_year) + + # Standardize geography names + source_geography_standardized = standardize_geography(source_geography, "source") + target_geography_standardized = standardize_geography(target_geography, "target") + + crosswalk_sub_path = stringr::str_c(source_geography_standardized, source_year, "_", target_geography_standardized, target_year) + crosswalk_path <- paste0("https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_", crosswalk_sub_path, ".zip") + + ## identify the relevant file paths for potentially-cached crosswalks + csv_path = file.path( + cache_path, + stringr::str_c( + "crosswalk_nhgis_", source_year, "_to_", target_year, "_", + source_geography, "_to_", target_geography, ".csv")) + + ## if the file exists and cache == TRUE + if (file.exists(csv_path) & !is.null(cache)) { + result = readr::read_csv(csv_path) + + message( +"Use of NHGIS crosswalks is subject to the same conditions as for all NHGIS data. +See https://www.nhgis.org/citation-and-use-nhgis-data.") + message("Reading file from cache.") + + return(result) } + + # Validate inputs + valid_decennial_years <- c("1990", "2000", "2010", "2020") + valid_noncensus_years <- c("2011", "2012", "2014", "2015", "2022") + valid_source_years <- valid_decennial_years + valid_target_years <- c(valid_decennial_years, valid_noncensus_years) + valid_source_geogs <- c("blk", "bg", "tr") + valid_target_geogs <- c("blk", "bg", "tr", "co", "ua", "zcta", "puma", "cbsa") + noncensus_target_geogs <- c("bg", "tr", "co") + + if (source_year == "1990" & target_year == "2000") { + stop( +"There are no crosswalks from 1990 to 2000; 1990 source geography crosswalks are +available only to 2010 geographies.")} + + if (!source_year %in% valid_source_years) { + stop("source_year must be one of: ", paste(valid_source_years, collapse = ", "))} + + if (!target_year %in% valid_target_years) { + stop("target_year must be one of: ", paste(valid_target_years, collapse = ", "))} + + if (target_year %in% valid_noncensus_years) { + if (!target_geography_standardized %in% noncensus_target_geogs) { + stop( +"Non-census year crosswalks (2011, 2012, 2014, 2015, 2022) are only available +for block groups, tracts, and counties. The requested geography '", +target_geography, "' is not supported for target year ", target_year, ".") + } + } + + if (is.null(source_geography_standardized)) { + stop( +"source_geography '", source_geography, "' is not valid. Must be one of: blocks, +block group parts, or tracts (various spellings accepted).")} + + if (is.null(target_geography_standardized)) { + stop( +"target_geography '", target_geography, "' is not valid. Must be one of: blocks, +block groups, tracts, or counties (various spellings accepted)")} + + if (!(crosswalk_path %in% list_nhgis_crosswalks()$crosswalk_path)) { + stop(stringr::str_c( +"There is no available crosswalk between the specified geographies and years.")) } + + api_key = Sys.getenv("IPUMS_API_KEY") + if (api_key == "") { + stop( +"API key required. Save your API key to the IPUMS_API_KEY environment +variable. Get your key at https://account.ipums.org/api_keys") } + + crosswalk_df1 = tryCatch({ + + zip_path = file.path(cache_path, stringr::str_c(crosswalk_sub_path, ".zip")) + csv_path_temporary = file.path(cache_path, stringr::str_c("nhgis_", crosswalk_sub_path, ".csv")) + + ## if the specified directory doesn't yet exist, create it + if (!dir.exists(cache_path)) { dir.create(cache_path) } + + # Download the crosswalk file + response = httr::GET( + crosswalk_path, + httr::add_headers(Authorization = api_key), + httr::write_disk(zip_path, overwrite = TRUE), overwrite = TRUE) + + # Unzip the .zip + utils::unzip( + zipfile = zip_path, + exdir = file.path(cache_path)) + + crosswalk_df = readr::read_csv(csv_path_temporary) |> + janitor::clean_names() + + # Remove the zipped folder and the raw CSV file + file.remove(zip_path) + file.remove(csv_path_temporary) + + crosswalk_df + }, + error = function(e) { + stop("Failed to retrieve crosswalk: ", e$message) }) + + crosswalk_df = crosswalk_df1 |> + dplyr::select(-dplyr::matches("gj")) |> + dplyr::rename_with( + .cols = dplyr::everything(), + .fn = ~ .x |> stringr::str_replace_all(c( + ## for block-based crosswalks, there's only a single, combined weight + "^weight$" = "weight_housing_population", + "parea" = "weight_landarea", + "wt" = "weight", + "pop$" = "population", + "fam" = "family", + "hh" = "household", + "_hu" = "_housing_all", + "ownhu" = "housing_owned", + "renthu" = "housing_rented"))) |> + dplyr::rename( + source_geoid = !!(stringr::str_c(source_geography_standardized, source_year, "ge")), + target_geoid = !!(stringr::str_c(target_geography_standardized, target_year, "ge"))) |> + dplyr::mutate( + source_geography_name = source_geography_standardized, + target_geography_name = target_geography_standardized, + dplyr::across( + .cols = dplyr::matches("geography_name"), + .fns = ~ .x |> stringr::str_replace_all(c( + "blk" = "block", + "bgp" = "block_group_part", + "bg" = "block_group", + "tr" = "tract", + "co" = "county"))), + source_year = source_year, + target_year = target_year) |> + tidyr::pivot_longer( + cols = dplyr::matches("weight_"), + names_to = "weighting_factor", + values_to = "allocation_factor_source_to_target") + + ## if the file does not already exist and cache is not NULL + if (!file.exists(csv_path) & !is.null(cache) ) { + readr::write_csv(crosswalk_df, csv_path) } + +message( +"Use of NHGIS crosswalks is subject to the same conditions as for all NHGIS data. +See https://www.nhgis.org/citation-and-use-nhgis-data.") + + return(crosswalk_df) +} + + diff --git a/README.md b/README.md index f33fca7..91bb023 100644 --- a/README.md +++ b/README.md @@ -1,99 +1,99 @@ -# crosswalk - -An R interface to inter-geography and inter-temporal crosswalks. - -## Overview - -This project provides a consistent API and standardized versions of -crosswalks to allow for programmatic interpolation over time and between -geographies. Say goodbye to manual crosswalk downloads and hello to -reproducible workflows. - -## Installation - -``` r -# Install dependencies -renv::install("UI-Research/crosswalks") -``` - -## Usage - -To get started with `library(crosswalk)`: - -``` r -# Load the package -library(crosswalk) - -## obtain a crosswalk to translate data in 2020-vintage place geographies -## to 2020-vintage county geographies, weighted by population -place_county_crosswalk = get_crosswalk( - source_geography = "place", - target_geography = "county", - weight = c("population"), - cache = here::here("data")) - -## obtain a crosswalk to translate data in 2000-vintage place geographies -## to 2010-vintage place geographies. all available weighting options are -## returned -get_crosswalk( - source_year = 2000, - target_year = 2010, - source_geography = "place", - target_geography = "place", - cache = here::here("data")) -``` - -## Why Use `library(crosswalk)`? - -Crosswalks are a critical component of conducting social sciences -research as they enable analysts to translate data from one geography -and/or temporal vintage to another. For example, if source data are only -available at the county level, a crosswalk can help to produce estimates -of those source data at the city level, enabling analysis at a geography -(the city) that may be either more relevant to target audience(s) and/or -may align with the geography of other data that form part of the -overarching analysis. - -There are excellent existing resources for crosswalks, including the -University of Missouri - Missouri Census Data Center's Geocorr 2022 -crosswalking application and the IPUMS National Historical Geographic -Information System (NHGIS). In fact, the crosswalks returned by using -`library(crosswalk)` are those from Geocorr and NHGIS. - -So why use this package at all? It provides: - -- A consistent, programmatic approach to acquire crosswalks, rather - than ad-hoc manual downloads; - -- Standardized and clear crosswalk variable names so that you can - easily work with multiple crosswalks using the same workflow; - -- Crosswalk metadata stored within the returned crosswalk–no more - commenting in your script with the 15 options you configured prior - to clicking "Download"; - -- The ability to easily "cache" crosswalks locally. - -In brief: this package facilitates a well documented and reproducible -analysis workflow, building on top of the robust underlying resources -already available for crosswalking. - -## Citations! - -The intellectual work and credit for the underlying crosswalks returned -by this package belongs to the original developers of those crosswalks. -You should (in the case of Geocorr crosswalks) and in some cases must -(in the case of NHGIS crosswalks) appropriately cite the developers when -you use these resources. - -**For NHGIS**, you should refer to the NHGIS website and terms of use, -including the recommended citations provided at: -. - -**For Geocorr**, the author of `library(crosswalk)` is unaware of a -required or suggested citation format. An example citation might look -like: - -> Missouri Census Data Center, University of Missouri. (2022). Geocorr -> 2022: Geographic Correspondence Engine. Retrieved [202X-XX-XX] from: -> . +# crosswalk + +An R interface to inter-geography and inter-temporal crosswalks. + +## Overview + +This project provides a consistent API and standardized versions of +crosswalks to allow for programmatic interpolation over time and between +geographies. Say goodbye to manual crosswalk downloads and hello to +reproducible workflows. + +## Installation + +``` r +# Install dependencies +renv::install("UI-Research/crosswalks") +``` + +## Usage + +To get started with `library(crosswalk)`: + +``` r +# Load the package +library(crosswalk) + +## obtain a crosswalk to translate data in 2020-vintage place geographies +## to 2020-vintage county geographies, weighted by population +place_county_crosswalk = get_crosswalk( + source_geography = "place", + target_geography = "county", + weight = c("population"), + cache = here::here("data")) + +## obtain a crosswalk to translate data in 2000-vintage place geographies +## to 2010-vintage place geographies. all available weighting options are +## returned +get_crosswalk( + source_year = 2000, + target_year = 2010, + source_geography = "place", + target_geography = "place", + cache = here::here("data")) +``` + +## Why Use `library(crosswalk)`? + +Crosswalks are a critical component of conducting social sciences +research as they enable analysts to translate data from one geography +and/or temporal vintage to another. For example, if source data are only +available at the county level, a crosswalk can help to produce estimates +of those source data at the city level, enabling analysis at a geography +(the city) that may be either more relevant to target audience(s) and/or +may align with the geography of other data that form part of the +overarching analysis. + +There are excellent existing resources for crosswalks, including the +University of Missouri - Missouri Census Data Center's Geocorr 2022 +crosswalking application and the IPUMS National Historical Geographic +Information System (NHGIS). In fact, the crosswalks returned by using +`library(crosswalk)` are those from Geocorr and NHGIS. + +So why use this package at all? It provides: + +- A consistent, programmatic approach to acquire crosswalks, rather + than ad-hoc manual downloads; + +- Standardized and clear crosswalk variable names so that you can + easily work with multiple crosswalks using the same workflow; + +- Crosswalk metadata stored within the returned crosswalk–no more + commenting in your script with the 15 options you configured prior + to clicking "Download"; + +- The ability to easily "cache" crosswalks locally. + +In brief: this package facilitates a well documented and reproducible +analysis workflow, building on top of the robust underlying resources +already available for crosswalking. + +## Citations! + +The intellectual work and credit for the underlying crosswalks returned +by this package belongs to the original developers of those crosswalks. +You should (in the case of Geocorr crosswalks) and in some cases must +(in the case of NHGIS crosswalks) appropriately cite the developers when +you use these resources. + +**For NHGIS**, you should refer to the NHGIS website and terms of use, +including the recommended citations provided at: +. + +**For Geocorr**, the author of `library(crosswalk)` is unaware of a +required or suggested citation format. An example citation might look +like: + +> Missouri Census Data Center, University of Missouri. (2022). Geocorr +> 2022: Geographic Correspondence Engine. Retrieved [202X-XX-XX] from: +> . diff --git a/_pkgdown.yml b/_pkgdown.yml index fe51c35..c7ff89e 100644 --- a/_pkgdown.yml +++ b/_pkgdown.yml @@ -1,4 +1,4 @@ -url: https://ui-research.github.io/crosswalk/ -template: - bootstrap: 5 - +url: https://ui-research.github.io/crosswalk/ +template: + bootstrap: 5 + diff --git a/crosswalk.Rproj b/crosswalk.Rproj index 270314b..6daccaa 100644 --- a/crosswalk.Rproj +++ b/crosswalk.Rproj @@ -1,21 +1,21 @@ -Version: 1.0 - -RestoreWorkspace: Default -SaveWorkspace: Default -AlwaysSaveHistory: Default - -EnableCodeIndexing: Yes -UseSpacesForTab: Yes -NumSpacesForTab: 2 -Encoding: UTF-8 - -RnwWeave: Sweave -LaTeX: pdfLaTeX - -AutoAppendNewline: Yes -StripTrailingWhitespace: Yes - -BuildType: Package -PackageUseDevtools: Yes -PackageInstallArgs: --no-multiarch --with-keep.source -PackageRoxygenize: rd,collate,namespace +Version: 1.0 + +RestoreWorkspace: Default +SaveWorkspace: Default +AlwaysSaveHistory: Default + +EnableCodeIndexing: Yes +UseSpacesForTab: Yes +NumSpacesForTab: 2 +Encoding: UTF-8 + +RnwWeave: Sweave +LaTeX: pdfLaTeX + +AutoAppendNewline: Yes +StripTrailingWhitespace: Yes + +BuildType: Package +PackageUseDevtools: Yes +PackageInstallArgs: --no-multiarch --with-keep.source +PackageRoxygenize: rd,collate,namespace diff --git a/man/get_crosswalk.Rd b/man/get_crosswalk.Rd index f62b1c4..d278fc4 100644 --- a/man/get_crosswalk.Rd +++ b/man/get_crosswalk.Rd @@ -15,33 +15,44 @@ get_crosswalk( } \arguments{ \item{source_geography}{Character. Source geography name. One of c("block", -"block group", "tract", "place", county", "urban_area", "zcta", "puma", "cd118", +"block group", "tract", "place", "county", "urban_area", "zcta", "puma", "cd118", "cd119", "urban_area", "core_based_statistical_area").} \item{target_geography}{Character. Target geography name. One of c("block", -"block group", "tract", "place", county", "urban_area", "zcta", "puma", "cd118", +"block group", "tract", "place", "county", "urban_area", "zcta", "puma", "cd118", "cd119", "urban_area", "core_based_statistical_area").} -\item{source_year}{Character or numeric. Year of the source geography one of +\item{source_year}{Character or numeric. Year of the source geography, one of c(1990, 2000, 2010, 2020).} \item{target_year}{Character or numeric. Year of the target geography, one of -c(1990, 2000, 2010, 2020).} +c(1990, 2000, 2010, 2020) for decennial crosswalks, or c(2011, 2012, 2014, +2015, 2022) for non-census year crosswalks (limited to block groups, tracts, +and counties).} \item{cache}{Directory path. Where to download the crosswalk to. If NULL (default), -crosswalk is returned but not saved to disk.} +crosswalk is returned but not saved to disk. Individual component crosswalks +are cached separately when provided.} -\item{weight}{Character. Weighting variable. One of c("population", "housing", "land").} +\item{weight}{Character. Weighting variable for Geocorr crosswalks. One of +c("population", "housing", "land").} } \value{ -A data frame containing the crosswalk between the specified geographies. +A tibble containing the crosswalk between the specified geographies. Data are tidy-formatted, with each observation reflecting a unique -source-target-weighting factor combination. Note that all (typically two -or three) available weighting factors are returned. +source-target-weighting factor combination. -A dataframe representing the requested crosswalk for all 51 states -and Puerto Rico. Depending on the desired geographies and the source of the -crosswalk (Geocorr vs. NHGIS), some fields may not be included. +The returned tibble includes an attribute \code{crosswalk_metadata} containing: +\describe{ +\item{source}{Character vector of data sources used (e.g., "nhgis", "ctdata")} +\item{source_year}{The source year} +\item{target_year}{The target year} +\item{source_geography}{The source geography} +\item{target_geography}{The target geography} +\item{notes}{Any relevant notes about the crosswalk construction} +} + +Columns in the returned dataframe (some may not be present depending on source): \describe{ \item{source_geoid}{A unique identifier for the source geography} \item{target_geoid}{A unique identifier for the target geography} @@ -57,6 +68,7 @@ from the target geography to the source geography} \item{housing_2020}{The estimated overlap in housing units, if applicable} \item{land_area_sqmi}{The overlap in land area, if applicable} \item{weighting_factor}{The attribute used to calculate allocation factors} +\item{state_fips}{Two-digit state FIPS code, if applicable} } } \description{ @@ -64,10 +76,19 @@ Retrieves a crosswalk with interpolation values from a source geography to a tar geography or from a source year to a target year. } \details{ -This function sources crosswalks from Geocorr 2022 and IPUMS NHGIS. -Crosswalk weights are from the original sources and have not been modified; -this function merely standardizes the format of the returned crosswalks and -enables easy programmatic access and cacheing. +This function sources crosswalks from Geocorr 2022, IPUMS NHGIS, and +CT Data Collaborative. Crosswalk weights are from the original sources and +have not been modified; this function merely standardizes the format of the +returned crosswalks and enables easy programmatic access and caching. + +\strong{Non-census year support}: For target years 2011, 2012, 2014, 2015, and 2022, +crosswalks are available only for block groups, tracts, and counties. These +years correspond to American Community Survey geography changes. + +\strong{2020 to 2022 crosswalks}: The 2022 geographic changes only affected +Connecticut (county-equivalent planning regions replaced historical counties). +For this case, the function combines CT Data Collaborative crosswalks for +Connecticut with identity mappings for other states. Note that an IPUMS NHGIS API key is required to access crosswalks from that source. Use \code{usethis::edit_r_environ(scope = "user")} to save your API key @@ -76,6 +97,27 @@ obtain a key from: https://account.ipums.org/api_keys. } \examples{ \dontrun{ +# Same-year crosswalk between geographies (uses Geocorr) +get_crosswalk( + source_geography = "zcta", + target_geography = "puma22", + weight = "population", + cache = here::here("crosswalks-cache")) + +# Inter-temporal crosswalk (uses NHGIS) +get_crosswalk( + source_geography = "tract", + target_geography = "tract", + source_year = 2010, + target_year = 2020, + cache = here::here("crosswalks-cache")) + +# Non-census year crosswalk (2020 to 2022, CT changes) get_crosswalk( + source_geography = "tract", + target_geography = "tract", + source_year = 2020, + target_year = 2022, + cache = here::here("crosswalks-cache")) } } diff --git a/renv.lock b/renv.lock index 1189a0f..6caeb86 100644 --- a/renv.lock +++ b/renv.lock @@ -35,35 +35,6 @@ "Maintainer": "Winston Chang ", "Repository": "CRAN" }, - "Rcpp": { - "Package": "Rcpp", - "Version": "1.1.0", - "Source": "Repository", - "Title": "Seamless R and C++ Integration", - "Date": "2025-07-01", - "Authors@R": "c(person(\"Dirk\", \"Eddelbuettel\", role = c(\"aut\", \"cre\"), email = \"edd@debian.org\", comment = c(ORCID = \"0000-0001-6419-907X\")), person(\"Romain\", \"Francois\", role = \"aut\", comment = c(ORCID = \"0000-0002-2444-4226\")), person(\"JJ\", \"Allaire\", role = \"aut\", comment = c(ORCID = \"0000-0003-0174-9868\")), person(\"Kevin\", \"Ushey\", role = \"aut\", comment = c(ORCID = \"0000-0003-2880-7407\")), person(\"Qiang\", \"Kou\", role = \"aut\", comment = c(ORCID = \"0000-0001-6786-5453\")), person(\"Nathan\", \"Russell\", role = \"aut\"), person(\"Iñaki\", \"Ucar\", role = \"aut\", comment = c(ORCID = \"0000-0001-6403-5550\")), person(\"Doug\", \"Bates\", role = \"aut\", comment = c(ORCID = \"0000-0001-8316-9503\")), person(\"John\", \"Chambers\", role = \"aut\"))", - "Description": "The 'Rcpp' package provides R functions as well as C++ classes which offer a seamless integration of R and C++. Many R data types and objects can be mapped back and forth to C++ equivalents which facilitates both writing of new code as well as easier integration of third-party libraries. Documentation about 'Rcpp' is provided by several vignettes included in this package, via the 'Rcpp Gallery' site at , the paper by Eddelbuettel and Francois (2011, ), the book by Eddelbuettel (2013, ) and the paper by Eddelbuettel and Balamuta (2018, ); see 'citation(\"Rcpp\")' for details.", - "Imports": [ - "methods", - "utils" - ], - "Suggests": [ - "tinytest", - "inline", - "rbenchmark", - "pkgKitten (>= 0.1.2)" - ], - "URL": "https://www.rcpp.org, https://dirk.eddelbuettel.com/code/rcpp.html, https://github.com/RcppCore/Rcpp", - "License": "GPL (>= 2)", - "BugReports": "https://github.com/RcppCore/Rcpp/issues", - "MailingList": "rcpp-devel@lists.r-forge.r-project.org", - "RoxygenNote": "6.1.1", - "Encoding": "UTF-8", - "NeedsCompilation": "yes", - "Author": "Dirk Eddelbuettel [aut, cre] (ORCID: ), Romain Francois [aut] (ORCID: ), JJ Allaire [aut] (ORCID: ), Kevin Ushey [aut] (ORCID: ), Qiang Kou [aut] (ORCID: ), Nathan Russell [aut], Iñaki Ucar [aut] (ORCID: ), Doug Bates [aut] (ORCID: ), John Chambers [aut]", - "Maintainer": "Dirk Eddelbuettel ", - "Repository": "CRAN" - }, "askpass": { "Package": "askpass", "Version": "1.2.1", @@ -89,193 +60,71 @@ "Maintainer": "Jeroen Ooms ", "Repository": "CRAN" }, - "base64enc": { - "Package": "base64enc", - "Version": "0.1-3", - "Source": "Repository", - "Title": "Tools for base64 encoding", - "Author": "Simon Urbanek ", - "Maintainer": "Simon Urbanek ", - "Depends": [ - "R (>= 2.9.0)" - ], - "Enhances": [ - "png" - ], - "Description": "This package provides tools for handling base64 encoding. It is more flexible than the orphaned base64 package.", - "License": "GPL-2 | GPL-3", - "URL": "http://www.rforge.net/base64enc", - "NeedsCompilation": "yes", - "Repository": "CRAN" - }, - "brew": { - "Package": "brew", - "Version": "1.0-10", - "Source": "Repository", - "Type": "Package", - "Title": "Templating Framework for Report Generation", - "Authors@R": "c( person(\"Jeffrey\", \"Horner\", role = c(\"aut\", \"cph\")), person(\"Greg\", \"Hunt\", , \"greg@firmansyah.com\", role = c(\"aut\", \"cre\", \"cph\")) )", - "Description": "Implements a templating framework for mixing text and R code for report generation. brew template syntax is similar to PHP, Ruby's erb module, Java Server Pages, and Python's psp module.", - "License": "GPL (>= 2)", - "URL": "https://github.com/gregfrog/brew", - "BugReports": "https://github.com/gregfrog/brew/issues", - "Suggests": [ - "testthat (>= 3.0.0)" - ], - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "Repository": "CRAN", - "NeedsCompilation": "no", - "Author": "Jeffrey Horner [aut, cph], Greg Hunt [aut, cre, cph]", - "Maintainer": "Greg Hunt " - }, - "brio": { - "Package": "brio", - "Version": "1.1.5", - "Source": "Repository", - "Title": "Basic R Input Output", - "Authors@R": "c( person(\"Jim\", \"Hester\", role = \"aut\", comment = c(ORCID = \"0000-0002-2739-7082\")), person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = c(\"aut\", \"cre\")), person(given = \"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "Functions to handle basic input output, these functions always read and write UTF-8 (8-bit Unicode Transformation Format) files and provide more explicit control over line endings.", - "License": "MIT + file LICENSE", - "URL": "https://brio.r-lib.org, https://github.com/r-lib/brio", - "BugReports": "https://github.com/r-lib/brio/issues", - "Depends": [ - "R (>= 3.6)" - ], - "Suggests": [ - "covr", - "testthat (>= 3.0.0)" - ], - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "RoxygenNote": "7.2.3", - "NeedsCompilation": "yes", - "Author": "Jim Hester [aut] (), Gábor Csárdi [aut, cre], Posit Software, PBC [cph, fnd]", - "Maintainer": "Gábor Csárdi ", - "Repository": "CRAN" - }, - "bslib": { - "Package": "bslib", - "Version": "0.9.0", + "bit": { + "Package": "bit", + "Version": "4.6.0", "Source": "Repository", - "Title": "Custom 'Bootstrap' 'Sass' Themes for 'shiny' and 'rmarkdown'", - "Authors@R": "c( person(\"Carson\", \"Sievert\", , \"carson@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0002-4958-2844\")), person(\"Joe\", \"Cheng\", , \"joe@posit.co\", role = \"aut\"), person(\"Garrick\", \"Aden-Buie\", , \"garrick@posit.co\", role = \"aut\", comment = c(ORCID = \"0000-0002-7111-0077\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(, \"Bootstrap contributors\", role = \"ctb\", comment = \"Bootstrap library\"), person(, \"Twitter, Inc\", role = \"cph\", comment = \"Bootstrap library\"), person(\"Javi\", \"Aguilar\", role = c(\"ctb\", \"cph\"), comment = \"Bootstrap colorpicker library\"), person(\"Thomas\", \"Park\", role = c(\"ctb\", \"cph\"), comment = \"Bootswatch library\"), person(, \"PayPal\", role = c(\"ctb\", \"cph\"), comment = \"Bootstrap accessibility plugin\") )", - "Description": "Simplifies custom 'CSS' styling of both 'shiny' and 'rmarkdown' via 'Bootstrap' 'Sass'. Supports 'Bootstrap' 3, 4 and 5 as well as their various 'Bootswatch' themes. An interactive widget is also provided for previewing themes in real time.", - "License": "MIT + file LICENSE", - "URL": "https://rstudio.github.io/bslib/, https://github.com/rstudio/bslib", - "BugReports": "https://github.com/rstudio/bslib/issues", + "Title": "Classes and Methods for Fast Memory-Efficient Boolean Selections", + "Authors@R": "c( person(\"Michael\", \"Chirico\", email = \"MichaelChirico4@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Jens\", \"Oehlschlägel\", role = \"aut\"), person(\"Brian\", \"Ripley\", role = \"ctb\") )", "Depends": [ - "R (>= 2.10)" - ], - "Imports": [ - "base64enc", - "cachem", - "fastmap (>= 1.1.1)", - "grDevices", - "htmltools (>= 0.5.8)", - "jquerylib (>= 0.1.3)", - "jsonlite", - "lifecycle", - "memoise (>= 2.0.1)", - "mime", - "rlang", - "sass (>= 0.4.9)" + "R (>= 3.4.0)" ], "Suggests": [ - "bsicons", - "curl", - "fontawesome", - "future", - "ggplot2", + "testthat (>= 3.0.0)", + "roxygen2", "knitr", - "magrittr", - "rappdirs", - "rmarkdown (>= 2.7)", - "shiny (> 1.8.1)", - "testthat", - "thematic", - "tools", - "utils", - "withr", - "yaml" + "markdown", + "rmarkdown", + "microbenchmark", + "bit64 (>= 4.0.0)", + "ff (>= 4.0.0)" ], - "Config/Needs/deploy": "BH, chiflights22, colourpicker, commonmark, cpp11, cpsievert/chiflights22, cpsievert/histoslider, dplyr, DT, ggplot2, ggridges, gt, hexbin, histoslider, htmlwidgets, lattice, leaflet, lubridate, markdown, modelr, plotly, reactable, reshape2, rprojroot, rsconnect, rstudio/shiny, scales, styler, tibble", - "Config/Needs/routine": "chromote, desc, renv", - "Config/Needs/website": "brio, crosstalk, dplyr, DT, ggplot2, glue, htmlwidgets, leaflet, lorem, palmerpenguins, plotly, purrr, rprojroot, rstudio/htmltools, scales, stringr, tidyr, webshot2", - "Config/testthat/edition": "3", - "Config/testthat/parallel": "true", - "Config/testthat/start-first": "zzzz-bs-sass, fonts, zzz-precompile, theme-*, rmd-*", + "Description": "Provided are classes for boolean and skewed boolean vectors, fast boolean methods, fast unique and non-unique integer sorting, fast set operations on sorted and unsorted sets of integers, and foundations for ff (range index, compression, chunked processing).", + "License": "GPL-2 | GPL-3", + "LazyLoad": "yes", + "ByteCompile": "yes", "Encoding": "UTF-8", + "URL": "https://github.com/r-lib/bit", + "VignetteBuilder": "knitr, rmarkdown", "RoxygenNote": "7.3.2", - "Collate": "'accordion.R' 'breakpoints.R' 'bs-current-theme.R' 'bs-dependencies.R' 'bs-global.R' 'bs-remove.R' 'bs-theme-layers.R' 'bs-theme-preset-bootswatch.R' 'bs-theme-preset-brand.R' 'bs-theme-preset-builtin.R' 'bs-theme-preset.R' 'utils.R' 'bs-theme-preview.R' 'bs-theme-update.R' 'bs-theme.R' 'bslib-package.R' 'buttons.R' 'card.R' 'deprecated.R' 'files.R' 'fill.R' 'imports.R' 'input-dark-mode.R' 'input-switch.R' 'layout.R' 'nav-items.R' 'nav-update.R' 'navbar_options.R' 'navs-legacy.R' 'navs.R' 'onLoad.R' 'page.R' 'popover.R' 'precompiled.R' 'print.R' 'shiny-devmode.R' 'sidebar.R' 'staticimports.R' 'tooltip.R' 'utils-deps.R' 'utils-shiny.R' 'utils-tags.R' 'value-box.R' 'version-default.R' 'versions.R'", - "NeedsCompilation": "no", - "Author": "Carson Sievert [aut, cre] (), Joe Cheng [aut], Garrick Aden-Buie [aut] (), Posit Software, PBC [cph, fnd], Bootstrap contributors [ctb] (Bootstrap library), Twitter, Inc [cph] (Bootstrap library), Javi Aguilar [ctb, cph] (Bootstrap colorpicker library), Thomas Park [ctb, cph] (Bootswatch library), PayPal [ctb, cph] (Bootstrap accessibility plugin)", - "Maintainer": "Carson Sievert ", - "Repository": "CRAN" - }, - "cachem": { - "Package": "cachem", - "Version": "1.1.0", - "Source": "Repository", - "Title": "Cache R Objects with Automatic Pruning", - "Description": "Key-value stores with automatic pruning. Caches can limit either their total size or the age of the oldest object (or both), automatically pruning objects to maintain the constraints.", - "Authors@R": "c( person(\"Winston\", \"Chang\", , \"winston@posit.co\", c(\"aut\", \"cre\")), person(family = \"Posit Software, PBC\", role = c(\"cph\", \"fnd\")))", - "License": "MIT + file LICENSE", - "Encoding": "UTF-8", - "ByteCompile": "true", - "URL": "https://cachem.r-lib.org/, https://github.com/r-lib/cachem", - "Imports": [ - "rlang", - "fastmap (>= 1.2.0)" - ], - "Suggests": [ - "testthat" - ], - "RoxygenNote": "7.2.3", - "Config/Needs/routine": "lobstr", - "Config/Needs/website": "pkgdown", + "Config/testthat/edition": "3", "NeedsCompilation": "yes", - "Author": "Winston Chang [aut, cre], Posit Software, PBC [cph, fnd]", - "Maintainer": "Winston Chang ", + "Author": "Michael Chirico [aut, cre], Jens Oehlschlägel [aut], Brian Ripley [ctb]", + "Maintainer": "Michael Chirico ", "Repository": "CRAN" }, - "callr": { - "Package": "callr", - "Version": "3.7.6", + "bit64": { + "Package": "bit64", + "Version": "4.6.0-1", "Source": "Repository", - "Title": "Call R from R", - "Authors@R": "c( person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = c(\"aut\", \"cre\", \"cph\"), comment = c(ORCID = \"0000-0001-7098-9676\")), person(\"Winston\", \"Chang\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(\"Ascent Digital Services\", role = c(\"cph\", \"fnd\")) )", - "Description": "It is sometimes useful to perform a computation in a separate R process, without affecting the current R process at all. This packages does exactly that.", - "License": "MIT + file LICENSE", - "URL": "https://callr.r-lib.org, https://github.com/r-lib/callr", - "BugReports": "https://github.com/r-lib/callr/issues", + "Title": "A S3 Class for Vectors of 64bit Integers", + "Authors@R": "c( person(\"Michael\", \"Chirico\", email = \"michaelchirico4@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Jens\", \"Oehlschlägel\", role = \"aut\"), person(\"Leonardo\", \"Silvestri\", role = \"ctb\"), person(\"Ofek\", \"Shilon\", role = \"ctb\") )", "Depends": [ - "R (>= 3.4)" + "R (>= 3.4.0)", + "bit (>= 4.0.0)" ], + "Description": "Package 'bit64' provides serializable S3 atomic 64bit (signed) integers. These are useful for handling database keys and exact counting in +-2^63. WARNING: do not use them as replacement for 32bit integers, integer64 are not supported for subscripting by R-core and they have different semantics when combined with double, e.g. integer64 + double => integer64. Class integer64 can be used in vectors, matrices, arrays and data.frames. Methods are available for coercion from and to logicals, integers, doubles, characters and factors as well as many elementwise and summary functions. Many fast algorithmic operations such as 'match' and 'order' support inter- active data exploration and manipulation and optionally leverage caching.", + "License": "GPL-2 | GPL-3", + "LazyLoad": "yes", + "ByteCompile": "yes", + "URL": "https://github.com/r-lib/bit64", + "Encoding": "UTF-8", "Imports": [ - "processx (>= 3.6.1)", - "R6", + "graphics", + "methods", + "stats", "utils" ], "Suggests": [ - "asciicast (>= 2.3.1)", - "cli (>= 1.1.0)", - "mockery", - "ps", - "rprojroot", - "spelling", - "testthat (>= 3.2.0)", - "withr (>= 2.3.0)" + "testthat (>= 3.0.3)", + "withr" ], - "Config/Needs/website": "r-lib/asciicast, glue, htmlwidgets, igraph, tibble, tidyverse/tidytemplate", "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "Language": "en-US", - "RoxygenNote": "7.3.1.9000", - "NeedsCompilation": "no", - "Author": "Gábor Csárdi [aut, cre, cph] (), Winston Chang [aut], Posit Software, PBC [cph, fnd], Ascent Digital Services [cph, fnd]", - "Maintainer": "Gábor Csárdi ", + "Config/needs/development": "testthat", + "RoxygenNote": "7.3.2", + "NeedsCompilation": "yes", + "Author": "Michael Chirico [aut, cre], Jens Oehlschlägel [aut], Leonardo Silvestri [ctb], Ofek Shilon [ctb]", + "Maintainer": "Michael Chirico ", "Repository": "CRAN" }, "cli": { @@ -356,30 +205,6 @@ "Maintainer": "Matthew Lincoln ", "Repository": "CRAN" }, - "commonmark": { - "Package": "commonmark", - "Version": "2.0.0", - "Source": "Repository", - "Type": "Package", - "Title": "High Performance CommonMark and Github Markdown Rendering in R", - "Authors@R": "c( person(\"Jeroen\", \"Ooms\", ,\"jeroenooms@gmail.com\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0002-4035-0289\")), person(\"John MacFarlane\", role = \"cph\", comment = \"Author of cmark\"))", - "Description": "The CommonMark specification defines a rationalized version of markdown syntax. This package uses the 'cmark' reference implementation for converting markdown text into various formats including html, latex and groff man. In addition it exposes the markdown parse tree in xml format. Also includes opt-in support for GFM extensions including tables, autolinks, and strikethrough text.", - "License": "BSD_2_clause + file LICENSE", - "URL": "https://docs.ropensci.org/commonmark/ https://ropensci.r-universe.dev/commonmark", - "BugReports": "https://github.com/r-lib/commonmark/issues", - "Suggests": [ - "curl", - "testthat", - "xml2" - ], - "RoxygenNote": "7.3.2", - "Language": "en-US", - "Encoding": "UTF-8", - "NeedsCompilation": "yes", - "Author": "Jeroen Ooms [aut, cre] (ORCID: ), John MacFarlane [cph] (Author of cmark)", - "Maintainer": "Jeroen Ooms ", - "Repository": "CRAN" - }, "cpp11": { "Package": "cpp11", "Version": "0.5.2", @@ -457,39 +282,6 @@ "Maintainer": "Gábor Csárdi ", "Repository": "CRAN" }, - "credentials": { - "Package": "credentials", - "Version": "2.0.2", - "Source": "Repository", - "Type": "Package", - "Title": "Tools for Managing SSH and Git Credentials", - "Authors@R": "person(\"Jeroen\", \"Ooms\", role = c(\"aut\", \"cre\"), email = \"jeroenooms@gmail.com\", comment = c(ORCID = \"0000-0002-4035-0289\"))", - "Description": "Setup and retrieve HTTPS and SSH credentials for use with 'git' and other services. For HTTPS remotes the package interfaces the 'git-credential' utility which 'git' uses to store HTTP usernames and passwords. For SSH remotes we provide convenient functions to find or generate appropriate SSH keys. The package both helps the user to setup a local git installation, and also provides a back-end for git/ssh client libraries to authenticate with existing user credentials.", - "License": "MIT + file LICENSE", - "SystemRequirements": "git (optional)", - "Encoding": "UTF-8", - "Imports": [ - "openssl (>= 1.3)", - "sys (>= 2.1)", - "curl", - "jsonlite", - "askpass" - ], - "Suggests": [ - "testthat", - "knitr", - "rmarkdown" - ], - "RoxygenNote": "7.2.1", - "VignetteBuilder": "knitr", - "Language": "en-US", - "URL": "https://docs.ropensci.org/credentials/ https://r-lib.r-universe.dev/credentials", - "BugReports": "https://github.com/r-lib/credentials/issues", - "NeedsCompilation": "no", - "Author": "Jeroen Ooms [aut, cre] ()", - "Maintainer": "Jeroen Ooms ", - "Repository": "CRAN" - }, "curl": { "Package": "curl", "Version": "6.4.0", @@ -524,2368 +316,898 @@ "Maintainer": "Jeroen Ooms ", "Repository": "CRAN" }, - "desc": { - "Package": "desc", - "Version": "1.4.3", + "dplyr": { + "Package": "dplyr", + "Version": "1.1.4", "Source": "Repository", - "Title": "Manipulate DESCRIPTION Files", - "Authors@R": "c( person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Kirill\", \"Müller\", role = \"aut\"), person(\"Jim\", \"Hester\", , \"james.f.hester@gmail.com\", role = \"aut\"), person(\"Maëlle\", \"Salmon\", role = \"ctb\", comment = c(ORCID = \"0000-0002-2815-0399\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Maintainer": "Gábor Csárdi ", - "Description": "Tools to read, write, create, and manipulate DESCRIPTION files. It is intended for packages that create or manipulate other packages.", + "Type": "Package", + "Title": "A Grammar of Data Manipulation", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Romain\", \"François\", role = \"aut\", comment = c(ORCID = \"0000-0002-2444-4226\")), person(\"Lionel\", \"Henry\", role = \"aut\"), person(\"Kirill\", \"Müller\", role = \"aut\", comment = c(ORCID = \"0000-0002-1416-3412\")), person(\"Davis\", \"Vaughan\", , \"davis@posit.co\", role = \"aut\", comment = c(ORCID = \"0000-0003-4777-038X\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "A fast, consistent tool for working with data frame like objects, both in memory and out of memory.", "License": "MIT + file LICENSE", - "URL": "https://desc.r-lib.org/, https://github.com/r-lib/desc", - "BugReports": "https://github.com/r-lib/desc/issues", + "URL": "https://dplyr.tidyverse.org, https://github.com/tidyverse/dplyr", + "BugReports": "https://github.com/tidyverse/dplyr/issues", "Depends": [ - "R (>= 3.4)" + "R (>= 3.5.0)" ], "Imports": [ - "cli", + "cli (>= 3.4.0)", + "generics", + "glue (>= 1.3.2)", + "lifecycle (>= 1.0.3)", + "magrittr (>= 1.5)", + "methods", + "pillar (>= 1.9.0)", "R6", - "utils" + "rlang (>= 1.1.0)", + "tibble (>= 3.2.0)", + "tidyselect (>= 1.2.0)", + "utils", + "vctrs (>= 0.6.4)" ], "Suggests": [ + "bench", + "broom", "callr", "covr", - "gh", - "spelling", - "testthat", - "whoami", + "DBI", + "dbplyr (>= 2.2.1)", + "ggplot2", + "knitr", + "Lahman", + "lobstr", + "microbenchmark", + "nycflights13", + "purrr", + "rmarkdown", + "RMySQL", + "RPostgreSQL", + "RSQLite", + "stringi (>= 1.7.6)", + "testthat (>= 3.1.5)", + "tidyr (>= 1.3.0)", "withr" ], - "Config/Needs/website": "tidyverse/tidytemplate", + "VignetteBuilder": "knitr", + "Config/Needs/website": "tidyverse, shiny, pkgdown, tidyverse/tidytemplate", "Config/testthat/edition": "3", "Encoding": "UTF-8", - "Language": "en-US", + "LazyData": "true", "RoxygenNote": "7.2.3", - "Collate": "'assertions.R' 'authors-at-r.R' 'built.R' 'classes.R' 'collate.R' 'constants.R' 'deps.R' 'desc-package.R' 'description.R' 'encoding.R' 'find-package-root.R' 'latex.R' 'non-oo-api.R' 'package-archives.R' 'read.R' 'remotes.R' 'str.R' 'syntax_checks.R' 'urls.R' 'utils.R' 'validate.R' 'version.R'", - "NeedsCompilation": "no", - "Author": "Gábor Csárdi [aut, cre], Kirill Müller [aut], Jim Hester [aut], Maëlle Salmon [ctb] (), Posit Software, PBC [cph, fnd]", + "NeedsCompilation": "yes", + "Author": "Hadley Wickham [aut, cre] (), Romain François [aut] (), Lionel Henry [aut], Kirill Müller [aut] (), Davis Vaughan [aut] (), Posit Software, PBC [cph, fnd]", + "Maintainer": "Hadley Wickham ", "Repository": "CRAN" }, - "devtools": { - "Package": "devtools", - "Version": "2.4.5", + "generics": { + "Package": "generics", + "Version": "0.1.4", "Source": "Repository", - "Title": "Tools to Make Developing R Packages Easier", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", role = \"aut\"), person(\"Jim\", \"Hester\", role = \"aut\"), person(\"Winston\", \"Chang\", role = \"aut\"), person(\"Jennifer\", \"Bryan\", , \"jenny@rstudio.com\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0002-6983-2759\")), person(\"RStudio\", role = c(\"cph\", \"fnd\")) )", - "Description": "Collection of package development tools.", + "Title": "Common S3 Generics not Provided by Base R Methods Related to Model Fitting", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Max\", \"Kuhn\", , \"max@posit.co\", role = \"aut\"), person(\"Davis\", \"Vaughan\", , \"davis@posit.co\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\"), comment = c(ROR = \"https://ror.org/03wc8by49\")) )", + "Description": "In order to reduce potential package dependencies and conflicts, generics provides a number of commonly used S3 generics.", "License": "MIT + file LICENSE", - "URL": "https://devtools.r-lib.org/, https://github.com/r-lib/devtools", - "BugReports": "https://github.com/r-lib/devtools/issues", + "URL": "https://generics.r-lib.org, https://github.com/r-lib/generics", + "BugReports": "https://github.com/r-lib/generics/issues", "Depends": [ - "R (>= 3.0.2)", - "usethis (>= 2.1.6)" + "R (>= 3.6)" ], "Imports": [ - "cli (>= 3.3.0)", - "desc (>= 1.4.1)", - "ellipsis (>= 0.3.2)", - "fs (>= 1.5.2)", - "lifecycle (>= 1.0.1)", - "memoise (>= 2.0.1)", - "miniUI (>= 0.1.1.1)", - "pkgbuild (>= 1.3.1)", - "pkgdown (>= 2.0.6)", - "pkgload (>= 1.3.0)", - "profvis (>= 0.3.7)", - "rcmdcheck (>= 1.4.0)", - "remotes (>= 2.4.2)", - "rlang (>= 1.0.4)", - "roxygen2 (>= 7.2.1)", - "rversions (>= 2.1.1)", - "sessioninfo (>= 1.2.2)", - "stats", - "testthat (>= 3.1.5)", - "tools", - "urlchecker (>= 1.0.1)", - "utils", - "withr (>= 2.5.0)" + "methods" ], "Suggests": [ - "BiocManager (>= 1.30.18)", - "callr (>= 3.7.1)", - "covr (>= 3.5.1)", - "curl (>= 4.3.2)", - "digest (>= 0.6.29)", - "DT (>= 0.23)", - "foghorn (>= 1.4.2)", - "gh (>= 1.3.0)", - "gmailr (>= 1.0.1)", - "httr (>= 1.4.3)", - "knitr (>= 1.39)", - "lintr (>= 3.0.0)", - "MASS", - "mockery (>= 0.4.3)", - "pingr (>= 2.0.1)", - "rhub (>= 1.1.1)", - "rmarkdown (>= 2.14)", - "rstudioapi (>= 0.13)", - "spelling (>= 2.2)" + "covr", + "pkgload", + "testthat (>= 3.0.0)", + "tibble", + "withr" ], - "VignetteBuilder": "knitr", "Config/Needs/website": "tidyverse/tidytemplate", - "Encoding": "UTF-8", - "Language": "en-US", - "RoxygenNote": "7.2.1", "Config/testthat/edition": "3", + "Encoding": "UTF-8", + "RoxygenNote": "7.3.2", "NeedsCompilation": "no", - "Author": "Hadley Wickham [aut], Jim Hester [aut], Winston Chang [aut], Jennifer Bryan [aut, cre] (), RStudio [cph, fnd]", - "Maintainer": "Jennifer Bryan ", + "Author": "Hadley Wickham [aut, cre] (ORCID: ), Max Kuhn [aut], Davis Vaughan [aut], Posit Software, PBC [cph, fnd] (ROR: )", + "Maintainer": "Hadley Wickham ", "Repository": "CRAN" }, - "diffobj": { - "Package": "diffobj", - "Version": "0.3.6", + "glue": { + "Package": "glue", + "Version": "1.8.0", "Source": "Repository", - "Type": "Package", - "Title": "Diffs for R Objects", - "Description": "Generate a colorized diff of two R objects for an intuitive visualization of their differences.", - "Authors@R": "c( person( \"Brodie\", \"Gaslam\", email=\"brodie.gaslam@yahoo.com\", role=c(\"aut\", \"cre\")), person( \"Michael B.\", \"Allen\", email=\"ioplex@gmail.com\", role=c(\"ctb\", \"cph\"), comment=\"Original C implementation of Myers Diff Algorithm\"))", + "Title": "Interpreted String Literals", + "Authors@R": "c( person(\"Jim\", \"Hester\", role = \"aut\", comment = c(ORCID = \"0000-0002-2739-7082\")), person(\"Jennifer\", \"Bryan\", , \"jenny@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0002-6983-2759\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "An implementation of interpreted string literals, inspired by Python's Literal String Interpolation and Docstrings and Julia's Triple-Quoted String Literals .", + "License": "MIT + file LICENSE", + "URL": "https://glue.tidyverse.org/, https://github.com/tidyverse/glue", + "BugReports": "https://github.com/tidyverse/glue/issues", "Depends": [ - "R (>= 3.1.0)" + "R (>= 3.6)" ], - "License": "GPL-2 | GPL-3", - "URL": "https://github.com/brodieG/diffobj", - "BugReports": "https://github.com/brodieG/diffobj/issues", - "RoxygenNote": "7.2.3", - "VignetteBuilder": "knitr", - "Encoding": "UTF-8", - "Suggests": [ - "knitr", - "rmarkdown" - ], - "Collate": "'capt.R' 'options.R' 'pager.R' 'check.R' 'finalizer.R' 'misc.R' 'html.R' 'styles.R' 's4.R' 'core.R' 'diff.R' 'get.R' 'guides.R' 'hunks.R' 'layout.R' 'myerssimple.R' 'rdiff.R' 'rds.R' 'set.R' 'subset.R' 'summmary.R' 'system.R' 'text.R' 'tochar.R' 'trim.R' 'word.R'", "Imports": [ - "crayon (>= 1.3.2)", - "tools", - "methods", - "utils", - "stats" + "methods" + ], + "Suggests": [ + "crayon", + "DBI (>= 1.2.0)", + "dplyr", + "knitr", + "magrittr", + "rlang", + "rmarkdown", + "RSQLite", + "testthat (>= 3.2.0)", + "vctrs (>= 0.3.0)", + "waldo (>= 0.5.3)", + "withr" ], + "VignetteBuilder": "knitr", + "ByteCompile": "true", + "Config/Needs/website": "bench, forcats, ggbeeswarm, ggplot2, R.utils, rprintf, tidyr, tidyverse/tidytemplate", + "Config/testthat/edition": "3", + "Encoding": "UTF-8", + "RoxygenNote": "7.3.2", "NeedsCompilation": "yes", - "Author": "Brodie Gaslam [aut, cre], Michael B. Allen [ctb, cph] (Original C implementation of Myers Diff Algorithm)", - "Maintainer": "Brodie Gaslam ", + "Author": "Jim Hester [aut] (), Jennifer Bryan [aut, cre] (), Posit Software, PBC [cph, fnd]", + "Maintainer": "Jennifer Bryan ", "Repository": "CRAN" }, - "digest": { - "Package": "digest", - "Version": "0.6.37", + "hms": { + "Package": "hms", + "Version": "1.1.3", "Source": "Repository", - "Authors@R": "c(person(\"Dirk\", \"Eddelbuettel\", role = c(\"aut\", \"cre\"), email = \"edd@debian.org\", comment = c(ORCID = \"0000-0001-6419-907X\")), person(\"Antoine\", \"Lucas\", role=\"ctb\"), person(\"Jarek\", \"Tuszynski\", role=\"ctb\"), person(\"Henrik\", \"Bengtsson\", role=\"ctb\", comment = c(ORCID = \"0000-0002-7579-5165\")), person(\"Simon\", \"Urbanek\", role=\"ctb\", comment = c(ORCID = \"0000-0003-2297-1732\")), person(\"Mario\", \"Frasca\", role=\"ctb\"), person(\"Bryan\", \"Lewis\", role=\"ctb\"), person(\"Murray\", \"Stokely\", role=\"ctb\"), person(\"Hannes\", \"Muehleisen\", role=\"ctb\"), person(\"Duncan\", \"Murdoch\", role=\"ctb\"), person(\"Jim\", \"Hester\", role=\"ctb\"), person(\"Wush\", \"Wu\", role=\"ctb\", comment = c(ORCID = \"0000-0001-5180-0567\")), person(\"Qiang\", \"Kou\", role=\"ctb\", comment = c(ORCID = \"0000-0001-6786-5453\")), person(\"Thierry\", \"Onkelinx\", role=\"ctb\", comment = c(ORCID = \"0000-0001-8804-4216\")), person(\"Michel\", \"Lang\", role=\"ctb\", comment = c(ORCID = \"0000-0001-9754-0393\")), person(\"Viliam\", \"Simko\", role=\"ctb\"), person(\"Kurt\", \"Hornik\", role=\"ctb\", comment = c(ORCID = \"0000-0003-4198-9911\")), person(\"Radford\", \"Neal\", role=\"ctb\", comment = c(ORCID = \"0000-0002-2473-3407\")), person(\"Kendon\", \"Bell\", role=\"ctb\", comment = c(ORCID = \"0000-0002-9093-8312\")), person(\"Matthew\", \"de Queljoe\", role=\"ctb\"), person(\"Dmitry\", \"Selivanov\", role=\"ctb\"), person(\"Ion\", \"Suruceanu\", role=\"ctb\"), person(\"Bill\", \"Denney\", role=\"ctb\"), person(\"Dirk\", \"Schumacher\", role=\"ctb\"), person(\"András\", \"Svraka\", role=\"ctb\"), person(\"Sergey\", \"Fedorov\", role=\"ctb\"), person(\"Will\", \"Landau\", role=\"ctb\", comment = c(ORCID = \"0000-0003-1878-3253\")), person(\"Floris\", \"Vanderhaeghe\", role=\"ctb\", comment = c(ORCID = \"0000-0002-6378-6229\")), person(\"Kevin\", \"Tappe\", role=\"ctb\"), person(\"Harris\", \"McGehee\", role=\"ctb\"), person(\"Tim\", \"Mastny\", role=\"ctb\"), person(\"Aaron\", \"Peikert\", role=\"ctb\", comment = c(ORCID = \"0000-0001-7813-818X\")), person(\"Mark\", \"van der Loo\", role=\"ctb\", comment = c(ORCID = \"0000-0002-9807-4686\")), person(\"Chris\", \"Muir\", role=\"ctb\", comment = c(ORCID = \"0000-0003-2555-3878\")), person(\"Moritz\", \"Beller\", role=\"ctb\", comment = c(ORCID = \"0000-0003-4852-0526\")), person(\"Sebastian\", \"Campbell\", role=\"ctb\"), person(\"Winston\", \"Chang\", role=\"ctb\", comment = c(ORCID = \"0000-0002-1576-2126\")), person(\"Dean\", \"Attali\", role=\"ctb\", comment = c(ORCID = \"0000-0002-5645-3493\")), person(\"Michael\", \"Chirico\", role=\"ctb\", comment = c(ORCID = \"0000-0003-0787-087X\")), person(\"Kevin\", \"Ushey\", role=\"ctb\"))", - "Date": "2024-08-19", - "Title": "Create Compact Hash Digests of R Objects", - "Description": "Implementation of a function 'digest()' for the creation of hash digests of arbitrary R objects (using the 'md5', 'sha-1', 'sha-256', 'crc32', 'xxhash', 'murmurhash', 'spookyhash', 'blake3', 'crc32c', 'xxh3_64', and 'xxh3_128' algorithms) permitting easy comparison of R language objects, as well as functions such as'hmac()' to create hash-based message authentication code. Please note that this package is not meant to be deployed for cryptographic purposes for which more comprehensive (and widely tested) libraries such as 'OpenSSL' should be used.", - "URL": "https://github.com/eddelbuettel/digest, https://dirk.eddelbuettel.com/code/digest.html", - "BugReports": "https://github.com/eddelbuettel/digest/issues", - "Depends": [ - "R (>= 3.3.0)" - ], + "Title": "Pretty Time of Day", + "Date": "2023-03-21", + "Authors@R": "c( person(\"Kirill\", \"Müller\", role = c(\"aut\", \"cre\"), email = \"kirill@cynkra.com\", comment = c(ORCID = \"0000-0002-1416-3412\")), person(\"R Consortium\", role = \"fnd\"), person(\"RStudio\", role = \"fnd\") )", + "Description": "Implements an S3 class for storing and formatting time-of-day values, based on the 'difftime' class.", "Imports": [ - "utils" + "lifecycle", + "methods", + "pkgconfig", + "rlang (>= 1.0.2)", + "vctrs (>= 0.3.8)" ], - "License": "GPL (>= 2)", "Suggests": [ - "tinytest", - "simplermarkdown" + "crayon", + "lubridate", + "pillar (>= 1.1.0)", + "testthat (>= 3.0.0)" ], - "VignetteBuilder": "simplermarkdown", + "License": "MIT + file LICENSE", "Encoding": "UTF-8", - "NeedsCompilation": "yes", - "Author": "Dirk Eddelbuettel [aut, cre] (), Antoine Lucas [ctb], Jarek Tuszynski [ctb], Henrik Bengtsson [ctb] (), Simon Urbanek [ctb] (), Mario Frasca [ctb], Bryan Lewis [ctb], Murray Stokely [ctb], Hannes Muehleisen [ctb], Duncan Murdoch [ctb], Jim Hester [ctb], Wush Wu [ctb] (), Qiang Kou [ctb] (), Thierry Onkelinx [ctb] (), Michel Lang [ctb] (), Viliam Simko [ctb], Kurt Hornik [ctb] (), Radford Neal [ctb] (), Kendon Bell [ctb] (), Matthew de Queljoe [ctb], Dmitry Selivanov [ctb], Ion Suruceanu [ctb], Bill Denney [ctb], Dirk Schumacher [ctb], András Svraka [ctb], Sergey Fedorov [ctb], Will Landau [ctb] (), Floris Vanderhaeghe [ctb] (), Kevin Tappe [ctb], Harris McGehee [ctb], Tim Mastny [ctb], Aaron Peikert [ctb] (), Mark van der Loo [ctb] (), Chris Muir [ctb] (), Moritz Beller [ctb] (), Sebastian Campbell [ctb], Winston Chang [ctb] (), Dean Attali [ctb] (), Michael Chirico [ctb] (), Kevin Ushey [ctb]", - "Maintainer": "Dirk Eddelbuettel ", + "URL": "https://hms.tidyverse.org/, https://github.com/tidyverse/hms", + "BugReports": "https://github.com/tidyverse/hms/issues", + "RoxygenNote": "7.2.3", + "Config/testthat/edition": "3", + "Config/autostyle/scope": "line_breaks", + "Config/autostyle/strict": "false", + "Config/Needs/website": "tidyverse/tidytemplate", + "NeedsCompilation": "no", + "Author": "Kirill Müller [aut, cre] (), R Consortium [fnd], RStudio [fnd]", + "Maintainer": "Kirill Müller ", "Repository": "CRAN" }, - "downlit": { - "Package": "downlit", - "Version": "0.4.4", + "httr": { + "Package": "httr", + "Version": "1.4.7", "Source": "Repository", - "Title": "Syntax Highlighting and Automatic Linking", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "Syntax highlighting of R code, specifically designed for the needs of 'RMarkdown' packages like 'pkgdown', 'hugodown', and 'bookdown'. It includes linking of function calls to their documentation on the web, and automatic translation of ANSI escapes in output to the equivalent HTML.", + "Title": "Tools for Working with URLs and HTTP", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "Useful tools for working with HTTP organised by HTTP verbs (GET(), POST(), etc). Configuration functions make it easy to control additional request components (authenticate(), add_headers() and so on).", "License": "MIT + file LICENSE", - "URL": "https://downlit.r-lib.org/, https://github.com/r-lib/downlit", - "BugReports": "https://github.com/r-lib/downlit/issues", + "URL": "https://httr.r-lib.org/, https://github.com/r-lib/httr", + "BugReports": "https://github.com/r-lib/httr/issues", "Depends": [ - "R (>= 4.0.0)" + "R (>= 3.5)" ], "Imports": [ - "brio", - "desc", - "digest", - "evaluate", - "fansi", - "memoise", - "rlang", - "vctrs", - "withr", - "yaml" + "curl (>= 5.0.2)", + "jsonlite", + "mime", + "openssl (>= 0.8)", + "R6" ], "Suggests": [ "covr", - "htmltools", - "jsonlite", - "MASS", - "MassSpecWavelet", - "pkgload", + "httpuv", + "jpeg", + "knitr", + "png", + "readr", "rmarkdown", - "testthat (>= 3.0.0)", + "testthat (>= 0.8.0)", "xml2" ], + "VignetteBuilder": "knitr", "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", "Encoding": "UTF-8", - "RoxygenNote": "7.3.1", + "RoxygenNote": "7.2.3", "NeedsCompilation": "no", - "Author": "Hadley Wickham [aut, cre], Posit Software, PBC [cph, fnd]", + "Author": "Hadley Wickham [aut, cre], Posit, PBC [cph, fnd]", "Maintainer": "Hadley Wickham ", "Repository": "CRAN" }, - "dplyr": { - "Package": "dplyr", - "Version": "1.1.4", + "httr2": { + "Package": "httr2", + "Version": "1.2.0", "Source": "Repository", - "Type": "Package", - "Title": "A Grammar of Data Manipulation", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Romain\", \"François\", role = \"aut\", comment = c(ORCID = \"0000-0002-2444-4226\")), person(\"Lionel\", \"Henry\", role = \"aut\"), person(\"Kirill\", \"Müller\", role = \"aut\", comment = c(ORCID = \"0000-0002-1416-3412\")), person(\"Davis\", \"Vaughan\", , \"davis@posit.co\", role = \"aut\", comment = c(ORCID = \"0000-0003-4777-038X\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "A fast, consistent tool for working with data frame like objects, both in memory and out of memory.", + "Title": "Perform HTTP Requests and Process the Responses", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(\"Maximilian\", \"Girlich\", role = \"ctb\") )", + "Description": "Tools for creating and modifying HTTP requests, then performing them and processing the results. 'httr2' is a modern re-imagining of 'httr' that uses a pipe-based interface and solves more of the problems that API wrapping packages face.", "License": "MIT + file LICENSE", - "URL": "https://dplyr.tidyverse.org, https://github.com/tidyverse/dplyr", - "BugReports": "https://github.com/tidyverse/dplyr/issues", + "URL": "https://httr2.r-lib.org, https://github.com/r-lib/httr2", + "BugReports": "https://github.com/r-lib/httr2/issues", "Depends": [ - "R (>= 3.5.0)" + "R (>= 4.1)" ], "Imports": [ - "cli (>= 3.4.0)", - "generics", - "glue (>= 1.3.2)", - "lifecycle (>= 1.0.3)", - "magrittr (>= 1.5)", - "methods", - "pillar (>= 1.9.0)", + "cli (>= 3.0.0)", + "curl (>= 6.4.0)", + "glue", + "lifecycle", + "magrittr", + "openssl", "R6", + "rappdirs", "rlang (>= 1.1.0)", - "tibble (>= 3.2.0)", - "tidyselect (>= 1.2.0)", - "utils", - "vctrs (>= 0.6.4)" + "vctrs (>= 0.6.3)", + "withr" ], "Suggests": [ + "askpass", "bench", - "broom", - "callr", + "clipr", "covr", - "DBI", - "dbplyr (>= 2.2.1)", - "ggplot2", + "docopt", + "httpuv", + "jose", + "jsonlite", "knitr", - "Lahman", - "lobstr", - "microbenchmark", - "nycflights13", - "purrr", + "later (>= 1.4.0)", + "nanonext", + "paws.common", + "promises", "rmarkdown", - "RMySQL", - "RPostgreSQL", - "RSQLite", - "stringi (>= 1.7.6)", - "testthat (>= 3.1.5)", - "tidyr (>= 1.3.0)", - "withr" + "testthat (>= 3.1.8)", + "tibble", + "webfakes (>= 1.4.0)", + "xml2" ], "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse, shiny, pkgdown, tidyverse/tidytemplate", + "Config/Needs/website": "tidyverse/tidytemplate", "Config/testthat/edition": "3", + "Config/testthat/parallel": "true", + "Config/testthat/start-first": "resp-stream, req-perform", "Encoding": "UTF-8", - "LazyData": "true", - "RoxygenNote": "7.2.3", - "NeedsCompilation": "yes", - "Author": "Hadley Wickham [aut, cre] (), Romain François [aut] (), Lionel Henry [aut], Kirill Müller [aut] (), Davis Vaughan [aut] (), Posit Software, PBC [cph, fnd]", + "RoxygenNote": "7.3.2", + "NeedsCompilation": "no", + "Author": "Hadley Wickham [aut, cre], Posit Software, PBC [cph, fnd], Maximilian Girlich [ctb]", "Maintainer": "Hadley Wickham ", "Repository": "CRAN" }, - "ellipsis": { - "Package": "ellipsis", - "Version": "0.3.2", + "janitor": { + "Package": "janitor", + "Version": "2.2.1", "Source": "Repository", - "Title": "Tools for Working with ...", - "Description": "The ellipsis is a powerful tool for extending functions. Unfortunately this power comes at a cost: misspelled arguments will be silently ignored. The ellipsis package provides a collection of functions to catch problems and alert the user.", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@rstudio.com\", role = c(\"aut\", \"cre\")), person(\"RStudio\", role = \"cph\") )", - "License": "MIT + file LICENSE", - "Encoding": "UTF-8", - "RoxygenNote": "7.1.1", - "URL": "https://ellipsis.r-lib.org, https://github.com/r-lib/ellipsis", - "BugReports": "https://github.com/r-lib/ellipsis/issues", + "Title": "Simple Tools for Examining and Cleaning Dirty Data", + "Authors@R": "c(person(\"Sam\", \"Firke\", email = \"samuel.firke@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Bill\", \"Denney\", email = \"wdenney@humanpredictions.com\", role = \"ctb\"), person(\"Chris\", \"Haid\", email = \"chrishaid@gmail.com\", role = \"ctb\"), person(\"Ryan\", \"Knight\", email = \"ryangknight@gmail.com\", role = \"ctb\"), person(\"Malte\", \"Grosser\", email = \"malte.grosser@gmail.com\", role = \"ctb\"), person(\"Jonathan\", \"Zadra\", email = \"jonathan.zadra@sorensonimpact.com\", role = \"ctb\"))", + "Description": "The main janitor functions can: perfectly format data.frame column names; provide quick counts of variable combinations (i.e., frequency tables and crosstabs); and explore duplicate records. Other janitor functions nicely format the tabulation results. These tabulate-and-report functions approximate popular features of SPSS and Microsoft Excel. This package follows the principles of the \"tidyverse\" and works well with the pipe function %>%. janitor was built with beginning-to-intermediate R users in mind and is optimized for user-friendliness.", + "URL": "https://github.com/sfirke/janitor, https://sfirke.github.io/janitor/", + "BugReports": "https://github.com/sfirke/janitor/issues", "Depends": [ - "R (>= 3.2)" + "R (>= 3.1.2)" ], "Imports": [ - "rlang (>= 0.3.0)" - ], - "Suggests": [ - "covr", - "testthat" + "dplyr (>= 1.0.0)", + "hms", + "lifecycle", + "lubridate", + "magrittr", + "purrr", + "rlang", + "stringi", + "stringr", + "snakecase (>= 0.9.2)", + "tidyselect (>= 1.0.0)", + "tidyr (>= 0.7.0)" ], - "NeedsCompilation": "yes", - "Author": "Hadley Wickham [aut, cre], RStudio [cph]", - "Maintainer": "Hadley Wickham ", - "Repository": "CRAN" - }, - "evaluate": { - "Package": "evaluate", - "Version": "1.0.4", - "Source": "Repository", - "Type": "Package", - "Title": "Parsing and Evaluation Tools that Provide More Details than the Default", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\")), person(\"Yihui\", \"Xie\", role = \"aut\", comment = c(ORCID = \"0000-0003-0645-5666\")), person(\"Michael\", \"Lawrence\", role = \"ctb\"), person(\"Thomas\", \"Kluyver\", role = \"ctb\"), person(\"Jeroen\", \"Ooms\", role = \"ctb\"), person(\"Barret\", \"Schloerke\", role = \"ctb\"), person(\"Adam\", \"Ryczkowski\", role = \"ctb\"), person(\"Hiroaki\", \"Yutani\", role = \"ctb\"), person(\"Michel\", \"Lang\", role = \"ctb\"), person(\"Karolis\", \"Koncevičius\", role = \"ctb\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "Parsing and evaluation tools that make it easy to recreate the command line behaviour of R.", "License": "MIT + file LICENSE", - "URL": "https://evaluate.r-lib.org/, https://github.com/r-lib/evaluate", - "BugReports": "https://github.com/r-lib/evaluate/issues", - "Depends": [ - "R (>= 3.6.0)" - ], + "RoxygenNote": "7.2.3", "Suggests": [ - "callr", - "covr", - "ggplot2 (>= 3.3.6)", - "lattice", - "methods", - "pkgload", - "ragg (>= 1.4.0)", - "rlang (>= 1.1.5)", + "dbplyr", "knitr", + "rmarkdown", + "RSQLite", + "sf", "testthat (>= 3.0.0)", - "withr" + "tibble", + "tidygraph" ], - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", + "VignetteBuilder": "knitr", "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", + "Config/testthat/edition": "3", "NeedsCompilation": "no", - "Author": "Hadley Wickham [aut, cre], Yihui Xie [aut] (ORCID: ), Michael Lawrence [ctb], Thomas Kluyver [ctb], Jeroen Ooms [ctb], Barret Schloerke [ctb], Adam Ryczkowski [ctb], Hiroaki Yutani [ctb], Michel Lang [ctb], Karolis Koncevičius [ctb], Posit Software, PBC [cph, fnd]", - "Maintainer": "Hadley Wickham ", + "Author": "Sam Firke [aut, cre], Bill Denney [ctb], Chris Haid [ctb], Ryan Knight [ctb], Malte Grosser [ctb], Jonathan Zadra [ctb]", + "Maintainer": "Sam Firke ", "Repository": "CRAN" }, - "fansi": { - "Package": "fansi", - "Version": "1.0.6", + "jsonlite": { + "Package": "jsonlite", + "Version": "2.0.0", "Source": "Repository", - "Title": "ANSI Control Sequence Aware String Functions", - "Description": "Counterparts to R string manipulation functions that account for the effects of ANSI text formatting control sequences.", - "Authors@R": "c( person(\"Brodie\", \"Gaslam\", email=\"brodie.gaslam@yahoo.com\", role=c(\"aut\", \"cre\")), person(\"Elliott\", \"Sales De Andrade\", role=\"ctb\"), person(family=\"R Core Team\", email=\"R-core@r-project.org\", role=\"cph\", comment=\"UTF8 byte length calcs from src/util.c\" ))", + "Title": "A Simple and Robust JSON Parser and Generator for R", + "License": "MIT + file LICENSE", "Depends": [ - "R (>= 3.1.0)" + "methods" ], - "License": "GPL-2 | GPL-3", - "URL": "https://github.com/brodieG/fansi", - "BugReports": "https://github.com/brodieG/fansi/issues", - "VignetteBuilder": "knitr", + "Authors@R": "c( person(\"Jeroen\", \"Ooms\", role = c(\"aut\", \"cre\"), email = \"jeroenooms@gmail.com\", comment = c(ORCID = \"0000-0002-4035-0289\")), person(\"Duncan\", \"Temple Lang\", role = \"ctb\"), person(\"Lloyd\", \"Hilaiel\", role = \"cph\", comment=\"author of bundled libyajl\"))", + "URL": "https://jeroen.r-universe.dev/jsonlite https://arxiv.org/abs/1403.2805", + "BugReports": "https://github.com/jeroen/jsonlite/issues", + "Maintainer": "Jeroen Ooms ", + "VignetteBuilder": "knitr, R.rsp", + "Description": "A reasonably fast JSON parser and generator, optimized for statistical data and the web. Offers simple, flexible tools for working with JSON in R, and is particularly powerful for building pipelines and interacting with a web API. The implementation is based on the mapping described in the vignette (Ooms, 2014). In addition to converting JSON data from/to R objects, 'jsonlite' contains functions to stream, validate, and prettify JSON data. The unit tests included with the package verify that all edge cases are encoded and decoded consistently for use with dynamic data in systems and applications.", "Suggests": [ - "unitizer", - "knitr", - "rmarkdown" - ], - "Imports": [ - "grDevices", - "utils" - ], - "RoxygenNote": "7.2.3", - "Encoding": "UTF-8", - "Collate": "'constants.R' 'fansi-package.R' 'internal.R' 'load.R' 'misc.R' 'nchar.R' 'strwrap.R' 'strtrim.R' 'strsplit.R' 'substr2.R' 'trimws.R' 'tohtml.R' 'unhandled.R' 'normalize.R' 'sgr.R'", - "NeedsCompilation": "yes", - "Author": "Brodie Gaslam [aut, cre], Elliott Sales De Andrade [ctb], R Core Team [cph] (UTF8 byte length calcs from src/util.c)", - "Maintainer": "Brodie Gaslam ", - "Repository": "CRAN" - }, - "fastmap": { - "Package": "fastmap", - "Version": "1.2.0", - "Source": "Repository", - "Title": "Fast Data Structures", - "Authors@R": "c( person(\"Winston\", \"Chang\", email = \"winston@posit.co\", role = c(\"aut\", \"cre\")), person(given = \"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(given = \"Tessil\", role = \"cph\", comment = \"hopscotch_map library\") )", - "Description": "Fast implementation of data structures, including a key-value store, stack, and queue. Environments are commonly used as key-value stores in R, but every time a new key is used, it is added to R's global symbol table, causing a small amount of memory leakage. This can be problematic in cases where many different keys are used. Fastmap avoids this memory leak issue by implementing the map using data structures in C++.", - "License": "MIT + file LICENSE", - "Encoding": "UTF-8", - "RoxygenNote": "7.2.3", - "Suggests": [ - "testthat (>= 2.1.1)" - ], - "URL": "https://r-lib.github.io/fastmap/, https://github.com/r-lib/fastmap", - "BugReports": "https://github.com/r-lib/fastmap/issues", - "NeedsCompilation": "yes", - "Author": "Winston Chang [aut, cre], Posit Software, PBC [cph, fnd], Tessil [cph] (hopscotch_map library)", - "Maintainer": "Winston Chang ", - "Repository": "CRAN" - }, - "fontawesome": { - "Package": "fontawesome", - "Version": "0.5.3", - "Source": "Repository", - "Type": "Package", - "Title": "Easily Work with 'Font Awesome' Icons", - "Description": "Easily and flexibly insert 'Font Awesome' icons into 'R Markdown' documents and 'Shiny' apps. These icons can be inserted into HTML content through inline 'SVG' tags or 'i' tags. There is also a utility function for exporting 'Font Awesome' icons as 'PNG' images for those situations where raster graphics are needed.", - "Authors@R": "c( person(\"Richard\", \"Iannone\", , \"rich@posit.co\", c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0003-3925-190X\")), person(\"Christophe\", \"Dervieux\", , \"cderv@posit.co\", role = \"ctb\", comment = c(ORCID = \"0000-0003-4474-2498\")), person(\"Winston\", \"Chang\", , \"winston@posit.co\", role = \"ctb\"), person(\"Dave\", \"Gandy\", role = c(\"ctb\", \"cph\"), comment = \"Font-Awesome font\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "License": "MIT + file LICENSE", - "URL": "https://github.com/rstudio/fontawesome, https://rstudio.github.io/fontawesome/", - "BugReports": "https://github.com/rstudio/fontawesome/issues", - "Encoding": "UTF-8", - "ByteCompile": "true", - "RoxygenNote": "7.3.2", - "Depends": [ - "R (>= 3.3.0)" - ], - "Imports": [ - "rlang (>= 1.0.6)", - "htmltools (>= 0.5.1.1)" - ], - "Suggests": [ - "covr", - "dplyr (>= 1.0.8)", - "gt (>= 0.9.0)", - "knitr (>= 1.31)", - "testthat (>= 3.0.0)", - "rsvg" - ], - "Config/testthat/edition": "3", - "NeedsCompilation": "no", - "Author": "Richard Iannone [aut, cre] (), Christophe Dervieux [ctb] (), Winston Chang [ctb], Dave Gandy [ctb, cph] (Font-Awesome font), Posit Software, PBC [cph, fnd]", - "Maintainer": "Richard Iannone ", - "Repository": "CRAN" - }, - "fs": { - "Package": "fs", - "Version": "1.6.6", - "Source": "Repository", - "Title": "Cross-Platform File System Operations Based on 'libuv'", - "Authors@R": "c( person(\"Jim\", \"Hester\", role = \"aut\"), person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = \"aut\"), person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = c(\"aut\", \"cre\")), person(\"libuv project contributors\", role = \"cph\", comment = \"libuv library\"), person(\"Joyent, Inc. and other Node contributors\", role = \"cph\", comment = \"libuv library\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "A cross-platform interface to file system operations, built on top of the 'libuv' C library.", - "License": "MIT + file LICENSE", - "URL": "https://fs.r-lib.org, https://github.com/r-lib/fs", - "BugReports": "https://github.com/r-lib/fs/issues", - "Depends": [ - "R (>= 3.6)" - ], - "Imports": [ - "methods" - ], - "Suggests": [ - "covr", - "crayon", - "knitr", - "pillar (>= 1.0.0)", - "rmarkdown", - "spelling", - "testthat (>= 3.0.0)", - "tibble (>= 1.1.0)", - "vctrs (>= 0.3.0)", - "withr" - ], - "VignetteBuilder": "knitr", - "ByteCompile": "true", - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Copyright": "file COPYRIGHTS", - "Encoding": "UTF-8", - "Language": "en-US", - "RoxygenNote": "7.2.3", - "SystemRequirements": "GNU make", - "NeedsCompilation": "yes", - "Author": "Jim Hester [aut], Hadley Wickham [aut], Gábor Csárdi [aut, cre], libuv project contributors [cph] (libuv library), Joyent, Inc. and other Node contributors [cph] (libuv library), Posit Software, PBC [cph, fnd]", - "Maintainer": "Gábor Csárdi ", - "Repository": "CRAN" - }, - "generics": { - "Package": "generics", - "Version": "0.1.4", - "Source": "Repository", - "Title": "Common S3 Generics not Provided by Base R Methods Related to Model Fitting", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Max\", \"Kuhn\", , \"max@posit.co\", role = \"aut\"), person(\"Davis\", \"Vaughan\", , \"davis@posit.co\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\"), comment = c(ROR = \"https://ror.org/03wc8by49\")) )", - "Description": "In order to reduce potential package dependencies and conflicts, generics provides a number of commonly used S3 generics.", - "License": "MIT + file LICENSE", - "URL": "https://generics.r-lib.org, https://github.com/r-lib/generics", - "BugReports": "https://github.com/r-lib/generics/issues", - "Depends": [ - "R (>= 3.6)" - ], - "Imports": [ - "methods" - ], - "Suggests": [ - "covr", - "pkgload", - "testthat (>= 3.0.0)", - "tibble", - "withr" - ], - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "NeedsCompilation": "no", - "Author": "Hadley Wickham [aut, cre] (ORCID: ), Max Kuhn [aut], Davis Vaughan [aut], Posit Software, PBC [cph, fnd] (ROR: )", - "Maintainer": "Hadley Wickham ", - "Repository": "CRAN" - }, - "gert": { - "Package": "gert", - "Version": "2.1.5", - "Source": "Repository", - "Type": "Package", - "Title": "Simple Git Client for R", - "Authors@R": "c( person(\"Jeroen\", \"Ooms\", role = c(\"aut\", \"cre\"), email = \"jeroenooms@gmail.com\", comment = c(ORCID = \"0000-0002-4035-0289\")), person(\"Jennifer\", \"Bryan\", role = \"ctb\", email = \"jenny@posit.co\", comment = c(ORCID = \"0000-0002-6983-2759\")))", - "Description": "Simple git client for R based on 'libgit2' with support for SSH and HTTPS remotes. All functions in 'gert' use basic R data types (such as vectors and data-frames) for their arguments and return values. User credentials are shared with command line 'git' through the git-credential store and ssh keys stored on disk or ssh-agent.", - "License": "MIT + file LICENSE", - "URL": "https://docs.ropensci.org/gert/, https://ropensci.r-universe.dev/gert", - "BugReports": "https://github.com/r-lib/gert/issues", - "Imports": [ - "askpass", - "credentials (>= 1.2.1)", - "openssl (>= 2.0.3)", - "rstudioapi (>= 0.11)", - "sys", - "zip (>= 2.1.0)" - ], - "Suggests": [ - "spelling", - "knitr", - "rmarkdown", - "testthat" - ], - "VignetteBuilder": "knitr", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2.9000", - "SystemRequirements": "libgit2 (>= 1.0): libgit2-devel (rpm) or libgit2-dev (deb)", - "Language": "en-US", - "NeedsCompilation": "yes", - "Author": "Jeroen Ooms [aut, cre] (), Jennifer Bryan [ctb] ()", - "Maintainer": "Jeroen Ooms ", - "Repository": "CRAN" - }, - "gh": { - "Package": "gh", - "Version": "1.5.0", - "Source": "Repository", - "Title": "'GitHub' 'API'", - "Authors@R": "c( person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = c(\"cre\", \"ctb\")), person(\"Jennifer\", \"Bryan\", role = \"aut\"), person(\"Hadley\", \"Wickham\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\"), comment = c(ROR = \"03wc8by49\")) )", - "Description": "Minimal client to access the 'GitHub' 'API'.", - "License": "MIT + file LICENSE", - "URL": "https://gh.r-lib.org/, https://github.com/r-lib/gh#readme", - "BugReports": "https://github.com/r-lib/gh/issues", - "Depends": [ - "R (>= 4.1)" - ], - "Imports": [ - "cli (>= 3.0.1)", - "gitcreds", - "glue", - "httr2 (>= 1.0.6)", - "ini", - "jsonlite", - "lifecycle", - "rlang (>= 1.0.0)" - ], - "Suggests": [ - "connectcreds", - "covr", - "knitr", - "rmarkdown", - "rprojroot", - "spelling", - "testthat (>= 3.0.0)", - "withr" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Config/usethis/last-upkeep": "2025-04-29", - "Encoding": "UTF-8", - "Language": "en-US", - "RoxygenNote": "7.3.2.9000", - "NeedsCompilation": "no", - "Author": "Gábor Csárdi [cre, ctb], Jennifer Bryan [aut], Hadley Wickham [aut], Posit Software, PBC [cph, fnd] (ROR: )", - "Maintainer": "Gábor Csárdi ", - "Repository": "CRAN" - }, - "gitcreds": { - "Package": "gitcreds", - "Version": "0.1.2", - "Source": "Repository", - "Title": "Query 'git' Credentials from 'R'", - "Authors@R": "c( person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = c(\"aut\", \"cre\")), person(\"RStudio\", role = c(\"cph\", \"fnd\")) )", - "Description": "Query, set, delete credentials from the 'git' credential store. Manage 'GitHub' tokens and other 'git' credentials. This package is to be used by other packages that need to authenticate to 'GitHub' and/or other 'git' repositories.", - "License": "MIT + file LICENSE", - "URL": "https://gitcreds.r-lib.org/, https://github.com/r-lib/gitcreds", - "BugReports": "https://github.com/r-lib/gitcreds/issues", - "Depends": [ - "R (>= 3.4)" - ], - "Suggests": [ - "codetools", - "covr", - "knitr", - "mockery", - "oskeyring", - "rmarkdown", - "testthat (>= 3.0.0)", - "withr" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse/tidytemplate", - "Encoding": "UTF-8", - "RoxygenNote": "7.2.1.9000", - "SystemRequirements": "git", - "Config/testthat/edition": "3", - "NeedsCompilation": "no", - "Author": "Gábor Csárdi [aut, cre], RStudio [cph, fnd]", - "Maintainer": "Gábor Csárdi ", - "Repository": "CRAN" - }, - "glue": { - "Package": "glue", - "Version": "1.8.0", - "Source": "Repository", - "Title": "Interpreted String Literals", - "Authors@R": "c( person(\"Jim\", \"Hester\", role = \"aut\", comment = c(ORCID = \"0000-0002-2739-7082\")), person(\"Jennifer\", \"Bryan\", , \"jenny@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0002-6983-2759\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "An implementation of interpreted string literals, inspired by Python's Literal String Interpolation and Docstrings and Julia's Triple-Quoted String Literals .", - "License": "MIT + file LICENSE", - "URL": "https://glue.tidyverse.org/, https://github.com/tidyverse/glue", - "BugReports": "https://github.com/tidyverse/glue/issues", - "Depends": [ - "R (>= 3.6)" - ], - "Imports": [ - "methods" - ], - "Suggests": [ - "crayon", - "DBI (>= 1.2.0)", - "dplyr", - "knitr", - "magrittr", - "rlang", - "rmarkdown", - "RSQLite", - "testthat (>= 3.2.0)", - "vctrs (>= 0.3.0)", - "waldo (>= 0.5.3)", - "withr" - ], - "VignetteBuilder": "knitr", - "ByteCompile": "true", - "Config/Needs/website": "bench, forcats, ggbeeswarm, ggplot2, R.utils, rprintf, tidyr, tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "NeedsCompilation": "yes", - "Author": "Jim Hester [aut] (), Jennifer Bryan [aut, cre] (), Posit Software, PBC [cph, fnd]", - "Maintainer": "Jennifer Bryan ", - "Repository": "CRAN" - }, - "here": { - "Package": "here", - "Version": "1.0.1", - "Source": "Repository", - "Title": "A Simpler Way to Find Your Files", - "Date": "2020-12-13", - "Authors@R": "c(person(given = \"Kirill\", family = \"M\\u00fcller\", role = c(\"aut\", \"cre\"), email = \"krlmlr+r@mailbox.org\", comment = c(ORCID = \"0000-0002-1416-3412\")), person(given = \"Jennifer\", family = \"Bryan\", role = \"ctb\", email = \"jenny@rstudio.com\", comment = c(ORCID = \"0000-0002-6983-2759\")))", - "Description": "Constructs paths to your project's files. Declare the relative path of a file within your project with 'i_am()'. Use the 'here()' function as a drop-in replacement for 'file.path()', it will always locate the files relative to your project root.", - "License": "MIT + file LICENSE", - "URL": "https://here.r-lib.org/, https://github.com/r-lib/here", - "BugReports": "https://github.com/r-lib/here/issues", - "Imports": [ - "rprojroot (>= 2.0.2)" - ], - "Suggests": [ - "conflicted", - "covr", - "fs", - "knitr", - "palmerpenguins", - "plyr", - "readr", - "rlang", - "rmarkdown", - "testthat", - "uuid", - "withr" - ], - "VignetteBuilder": "knitr", - "Encoding": "UTF-8", - "LazyData": "true", - "RoxygenNote": "7.1.1.9000", - "Config/testthat/edition": "3", - "NeedsCompilation": "no", - "Author": "Kirill Müller [aut, cre] (), Jennifer Bryan [ctb] ()", - "Maintainer": "Kirill Müller ", - "Repository": "CRAN" - }, - "highr": { - "Package": "highr", - "Version": "0.11", - "Source": "Repository", - "Type": "Package", - "Title": "Syntax Highlighting for R Source Code", - "Authors@R": "c( person(\"Yihui\", \"Xie\", role = c(\"aut\", \"cre\"), email = \"xie@yihui.name\", comment = c(ORCID = \"0000-0003-0645-5666\")), person(\"Yixuan\", \"Qiu\", role = \"aut\"), person(\"Christopher\", \"Gandrud\", role = \"ctb\"), person(\"Qiang\", \"Li\", role = \"ctb\") )", - "Description": "Provides syntax highlighting for R source code. Currently it supports LaTeX and HTML output. Source code of other languages is supported via Andre Simon's highlight package ().", - "Depends": [ - "R (>= 3.3.0)" - ], - "Imports": [ - "xfun (>= 0.18)" - ], - "Suggests": [ - "knitr", - "markdown", - "testit" - ], - "License": "GPL", - "URL": "https://github.com/yihui/highr", - "BugReports": "https://github.com/yihui/highr/issues", - "VignetteBuilder": "knitr", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.1", - "NeedsCompilation": "no", - "Author": "Yihui Xie [aut, cre] (), Yixuan Qiu [aut], Christopher Gandrud [ctb], Qiang Li [ctb]", - "Maintainer": "Yihui Xie ", - "Repository": "CRAN" - }, - "hms": { - "Package": "hms", - "Version": "1.1.3", - "Source": "Repository", - "Title": "Pretty Time of Day", - "Date": "2023-03-21", - "Authors@R": "c( person(\"Kirill\", \"Müller\", role = c(\"aut\", \"cre\"), email = \"kirill@cynkra.com\", comment = c(ORCID = \"0000-0002-1416-3412\")), person(\"R Consortium\", role = \"fnd\"), person(\"RStudio\", role = \"fnd\") )", - "Description": "Implements an S3 class for storing and formatting time-of-day values, based on the 'difftime' class.", - "Imports": [ - "lifecycle", - "methods", - "pkgconfig", - "rlang (>= 1.0.2)", - "vctrs (>= 0.3.8)" - ], - "Suggests": [ - "crayon", - "lubridate", - "pillar (>= 1.1.0)", - "testthat (>= 3.0.0)" - ], - "License": "MIT + file LICENSE", - "Encoding": "UTF-8", - "URL": "https://hms.tidyverse.org/, https://github.com/tidyverse/hms", - "BugReports": "https://github.com/tidyverse/hms/issues", - "RoxygenNote": "7.2.3", - "Config/testthat/edition": "3", - "Config/autostyle/scope": "line_breaks", - "Config/autostyle/strict": "false", - "Config/Needs/website": "tidyverse/tidytemplate", - "NeedsCompilation": "no", - "Author": "Kirill Müller [aut, cre] (), R Consortium [fnd], RStudio [fnd]", - "Maintainer": "Kirill Müller ", - "Repository": "CRAN" - }, - "htmltools": { - "Package": "htmltools", - "Version": "0.5.8.1", - "Source": "Repository", - "Type": "Package", - "Title": "Tools for HTML", - "Authors@R": "c( person(\"Joe\", \"Cheng\", , \"joe@posit.co\", role = \"aut\"), person(\"Carson\", \"Sievert\", , \"carson@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0002-4958-2844\")), person(\"Barret\", \"Schloerke\", , \"barret@posit.co\", role = \"aut\", comment = c(ORCID = \"0000-0001-9986-114X\")), person(\"Winston\", \"Chang\", , \"winston@posit.co\", role = \"aut\", comment = c(ORCID = \"0000-0002-1576-2126\")), person(\"Yihui\", \"Xie\", , \"yihui@posit.co\", role = \"aut\"), person(\"Jeff\", \"Allen\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "Tools for HTML generation and output.", - "License": "GPL (>= 2)", - "URL": "https://github.com/rstudio/htmltools, https://rstudio.github.io/htmltools/", - "BugReports": "https://github.com/rstudio/htmltools/issues", - "Depends": [ - "R (>= 2.14.1)" - ], - "Imports": [ - "base64enc", - "digest", - "fastmap (>= 1.1.0)", - "grDevices", - "rlang (>= 1.0.0)", - "utils" - ], - "Suggests": [ - "Cairo", - "markdown", - "ragg", - "shiny", - "testthat", - "withr" - ], - "Enhances": [ - "knitr" - ], - "Config/Needs/check": "knitr", - "Config/Needs/website": "rstudio/quillt, bench", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.1", - "Collate": "'colors.R' 'fill.R' 'html_dependency.R' 'html_escape.R' 'html_print.R' 'htmltools-package.R' 'images.R' 'known_tags.R' 'selector.R' 'staticimports.R' 'tag_query.R' 'utils.R' 'tags.R' 'template.R'", - "NeedsCompilation": "yes", - "Author": "Joe Cheng [aut], Carson Sievert [aut, cre] (), Barret Schloerke [aut] (), Winston Chang [aut] (), Yihui Xie [aut], Jeff Allen [aut], Posit Software, PBC [cph, fnd]", - "Maintainer": "Carson Sievert ", - "Repository": "CRAN" - }, - "htmlwidgets": { - "Package": "htmlwidgets", - "Version": "1.6.4", - "Source": "Repository", - "Type": "Package", - "Title": "HTML Widgets for R", - "Authors@R": "c( person(\"Ramnath\", \"Vaidyanathan\", role = c(\"aut\", \"cph\")), person(\"Yihui\", \"Xie\", role = \"aut\"), person(\"JJ\", \"Allaire\", role = \"aut\"), person(\"Joe\", \"Cheng\", , \"joe@posit.co\", role = \"aut\"), person(\"Carson\", \"Sievert\", , \"carson@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0002-4958-2844\")), person(\"Kenton\", \"Russell\", role = c(\"aut\", \"cph\")), person(\"Ellis\", \"Hughes\", role = \"ctb\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "A framework for creating HTML widgets that render in various contexts including the R console, 'R Markdown' documents, and 'Shiny' web applications.", - "License": "MIT + file LICENSE", - "URL": "https://github.com/ramnathv/htmlwidgets", - "BugReports": "https://github.com/ramnathv/htmlwidgets/issues", - "Imports": [ - "grDevices", - "htmltools (>= 0.5.7)", - "jsonlite (>= 0.9.16)", - "knitr (>= 1.8)", - "rmarkdown", - "yaml" - ], - "Suggests": [ - "testthat" - ], - "Enhances": [ - "shiny (>= 1.1)" - ], - "VignetteBuilder": "knitr", - "Encoding": "UTF-8", - "RoxygenNote": "7.2.3", - "NeedsCompilation": "no", - "Author": "Ramnath Vaidyanathan [aut, cph], Yihui Xie [aut], JJ Allaire [aut], Joe Cheng [aut], Carson Sievert [aut, cre] (), Kenton Russell [aut, cph], Ellis Hughes [ctb], Posit Software, PBC [cph, fnd]", - "Maintainer": "Carson Sievert ", - "Repository": "CRAN" - }, - "httpuv": { - "Package": "httpuv", - "Version": "1.6.16", - "Source": "Repository", - "Type": "Package", - "Title": "HTTP and WebSocket Server Library", - "Authors@R": "c( person(\"Joe\", \"Cheng\", , \"joe@posit.co\", role = \"aut\"), person(\"Winston\", \"Chang\", , \"winston@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit, PBC\", \"fnd\", role = \"cph\"), person(\"Hector\", \"Corrada Bravo\", role = \"ctb\"), person(\"Jeroen\", \"Ooms\", role = \"ctb\"), person(\"Andrzej\", \"Krzemienski\", role = \"cph\", comment = \"optional.hpp\"), person(\"libuv project contributors\", role = \"cph\", comment = \"libuv library, see src/libuv/AUTHORS file\"), person(\"Joyent, Inc. and other Node contributors\", role = \"cph\", comment = \"libuv library, see src/libuv/AUTHORS file; and http-parser library, see src/http-parser/AUTHORS file\"), person(\"Niels\", \"Provos\", role = \"cph\", comment = \"libuv subcomponent: tree.h\"), person(\"Internet Systems Consortium, Inc.\", role = \"cph\", comment = \"libuv subcomponent: inet_pton and inet_ntop, contained in src/libuv/src/inet.c\"), person(\"Alexander\", \"Chemeris\", role = \"cph\", comment = \"libuv subcomponent: stdint-msvc2008.h (from msinttypes)\"), person(\"Google, Inc.\", role = \"cph\", comment = \"libuv subcomponent: pthread-fixes.c\"), person(\"Sony Mobile Communcations AB\", role = \"cph\", comment = \"libuv subcomponent: pthread-fixes.c\"), person(\"Berkeley Software Design Inc.\", role = \"cph\", comment = \"libuv subcomponent: android-ifaddrs.h, android-ifaddrs.c\"), person(\"Kenneth\", \"MacKay\", role = \"cph\", comment = \"libuv subcomponent: android-ifaddrs.h, android-ifaddrs.c\"), person(\"Emergya (Cloud4all, FP7/2007-2013, grant agreement no 289016)\", role = \"cph\", comment = \"libuv subcomponent: android-ifaddrs.h, android-ifaddrs.c\"), person(\"Steve\", \"Reid\", role = \"aut\", comment = \"SHA-1 implementation\"), person(\"James\", \"Brown\", role = \"aut\", comment = \"SHA-1 implementation\"), person(\"Bob\", \"Trower\", role = \"aut\", comment = \"base64 implementation\"), person(\"Alexander\", \"Peslyak\", role = \"aut\", comment = \"MD5 implementation\"), person(\"Trantor Standard Systems\", role = \"cph\", comment = \"base64 implementation\"), person(\"Igor\", \"Sysoev\", role = \"cph\", comment = \"http-parser\") )", - "Description": "Provides low-level socket and protocol support for handling HTTP and WebSocket requests directly from within R. It is primarily intended as a building block for other packages, rather than making it particularly easy to create complete web applications using httpuv alone. httpuv is built on top of the libuv and http-parser C libraries, both of which were developed by Joyent, Inc. (See LICENSE file for libuv and http-parser license information.)", - "License": "GPL (>= 2) | file LICENSE", - "URL": "https://github.com/rstudio/httpuv", - "BugReports": "https://github.com/rstudio/httpuv/issues", - "Depends": [ - "R (>= 2.15.1)" - ], - "Imports": [ - "later (>= 0.8.0)", - "promises", - "R6", - "Rcpp (>= 1.0.7)", - "utils" - ], - "Suggests": [ - "callr", - "curl", - "jsonlite", - "testthat", - "websocket" - ], - "LinkingTo": [ - "later", - "Rcpp" - ], - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "SystemRequirements": "GNU make, zlib", - "Collate": "'RcppExports.R' 'httpuv.R' 'random_port.R' 'server.R' 'staticServer.R' 'static_paths.R' 'utils.R'", - "NeedsCompilation": "yes", - "Author": "Joe Cheng [aut], Winston Chang [aut, cre], Posit, PBC fnd [cph], Hector Corrada Bravo [ctb], Jeroen Ooms [ctb], Andrzej Krzemienski [cph] (optional.hpp), libuv project contributors [cph] (libuv library, see src/libuv/AUTHORS file), Joyent, Inc. and other Node contributors [cph] (libuv library, see src/libuv/AUTHORS file; and http-parser library, see src/http-parser/AUTHORS file), Niels Provos [cph] (libuv subcomponent: tree.h), Internet Systems Consortium, Inc. [cph] (libuv subcomponent: inet_pton and inet_ntop, contained in src/libuv/src/inet.c), Alexander Chemeris [cph] (libuv subcomponent: stdint-msvc2008.h (from msinttypes)), Google, Inc. [cph] (libuv subcomponent: pthread-fixes.c), Sony Mobile Communcations AB [cph] (libuv subcomponent: pthread-fixes.c), Berkeley Software Design Inc. [cph] (libuv subcomponent: android-ifaddrs.h, android-ifaddrs.c), Kenneth MacKay [cph] (libuv subcomponent: android-ifaddrs.h, android-ifaddrs.c), Emergya (Cloud4all, FP7/2007-2013, grant agreement no 289016) [cph] (libuv subcomponent: android-ifaddrs.h, android-ifaddrs.c), Steve Reid [aut] (SHA-1 implementation), James Brown [aut] (SHA-1 implementation), Bob Trower [aut] (base64 implementation), Alexander Peslyak [aut] (MD5 implementation), Trantor Standard Systems [cph] (base64 implementation), Igor Sysoev [cph] (http-parser)", - "Maintainer": "Winston Chang ", - "Repository": "CRAN" - }, - "httr": { - "Package": "httr", - "Version": "1.4.7", - "Source": "Repository", - "Title": "Tools for Working with URLs and HTTP", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "Useful tools for working with HTTP organised by HTTP verbs (GET(), POST(), etc). Configuration functions make it easy to control additional request components (authenticate(), add_headers() and so on).", - "License": "MIT + file LICENSE", - "URL": "https://httr.r-lib.org/, https://github.com/r-lib/httr", - "BugReports": "https://github.com/r-lib/httr/issues", - "Depends": [ - "R (>= 3.5)" - ], - "Imports": [ - "curl (>= 5.0.2)", - "jsonlite", - "mime", - "openssl (>= 0.8)", - "R6" - ], - "Suggests": [ - "covr", - "httpuv", - "jpeg", - "knitr", - "png", - "readr", - "rmarkdown", - "testthat (>= 0.8.0)", - "xml2" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse/tidytemplate", - "Encoding": "UTF-8", - "RoxygenNote": "7.2.3", - "NeedsCompilation": "no", - "Author": "Hadley Wickham [aut, cre], Posit, PBC [cph, fnd]", - "Maintainer": "Hadley Wickham ", - "Repository": "CRAN" - }, - "httr2": { - "Package": "httr2", - "Version": "1.2.0", - "Source": "Repository", - "Title": "Perform HTTP Requests and Process the Responses", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(\"Maximilian\", \"Girlich\", role = \"ctb\") )", - "Description": "Tools for creating and modifying HTTP requests, then performing them and processing the results. 'httr2' is a modern re-imagining of 'httr' that uses a pipe-based interface and solves more of the problems that API wrapping packages face.", - "License": "MIT + file LICENSE", - "URL": "https://httr2.r-lib.org, https://github.com/r-lib/httr2", - "BugReports": "https://github.com/r-lib/httr2/issues", - "Depends": [ - "R (>= 4.1)" - ], - "Imports": [ - "cli (>= 3.0.0)", - "curl (>= 6.4.0)", - "glue", - "lifecycle", - "magrittr", - "openssl", - "R6", - "rappdirs", - "rlang (>= 1.1.0)", - "vctrs (>= 0.6.3)", - "withr" - ], - "Suggests": [ - "askpass", - "bench", - "clipr", - "covr", - "docopt", - "httpuv", - "jose", - "jsonlite", - "knitr", - "later (>= 1.4.0)", - "nanonext", - "paws.common", - "promises", - "rmarkdown", - "testthat (>= 3.1.8)", - "tibble", - "webfakes (>= 1.4.0)", - "xml2" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Config/testthat/parallel": "true", - "Config/testthat/start-first": "resp-stream, req-perform", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "NeedsCompilation": "no", - "Author": "Hadley Wickham [aut, cre], Posit Software, PBC [cph, fnd], Maximilian Girlich [ctb]", - "Maintainer": "Hadley Wickham ", - "Repository": "CRAN" - }, - "ini": { - "Package": "ini", - "Version": "0.3.1", - "Source": "Repository", - "Type": "Package", - "Title": "Read and Write '.ini' Files", - "Date": "2018-05-19", - "Author": "David Valentim Dias", - "Maintainer": "David Valentim Dias ", - "Description": "Parse simple '.ini' configuration files to an structured list. Users can manipulate this resulting list with lapply() functions. This same structured list can be used to write back to file after modifications.", - "License": "GPL-3", - "URL": "https://github.com/dvdscripter/ini", - "BugReports": "https://github.com/dvdscripter/ini/issues", - "LazyData": "FALSE", - "RoxygenNote": "6.0.1", - "Suggests": [ - "testthat" - ], - "NeedsCompilation": "no", - "Repository": "CRAN", - "Encoding": "UTF-8" - }, - "janitor": { - "Package": "janitor", - "Version": "2.2.1", - "Source": "Repository", - "Title": "Simple Tools for Examining and Cleaning Dirty Data", - "Authors@R": "c(person(\"Sam\", \"Firke\", email = \"samuel.firke@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Bill\", \"Denney\", email = \"wdenney@humanpredictions.com\", role = \"ctb\"), person(\"Chris\", \"Haid\", email = \"chrishaid@gmail.com\", role = \"ctb\"), person(\"Ryan\", \"Knight\", email = \"ryangknight@gmail.com\", role = \"ctb\"), person(\"Malte\", \"Grosser\", email = \"malte.grosser@gmail.com\", role = \"ctb\"), person(\"Jonathan\", \"Zadra\", email = \"jonathan.zadra@sorensonimpact.com\", role = \"ctb\"))", - "Description": "The main janitor functions can: perfectly format data.frame column names; provide quick counts of variable combinations (i.e., frequency tables and crosstabs); and explore duplicate records. Other janitor functions nicely format the tabulation results. These tabulate-and-report functions approximate popular features of SPSS and Microsoft Excel. This package follows the principles of the \"tidyverse\" and works well with the pipe function %>%. janitor was built with beginning-to-intermediate R users in mind and is optimized for user-friendliness.", - "URL": "https://github.com/sfirke/janitor, https://sfirke.github.io/janitor/", - "BugReports": "https://github.com/sfirke/janitor/issues", - "Depends": [ - "R (>= 3.1.2)" - ], - "Imports": [ - "dplyr (>= 1.0.0)", - "hms", - "lifecycle", - "lubridate", - "magrittr", - "purrr", - "rlang", - "stringi", - "stringr", - "snakecase (>= 0.9.2)", - "tidyselect (>= 1.0.0)", - "tidyr (>= 0.7.0)" - ], - "License": "MIT + file LICENSE", - "RoxygenNote": "7.2.3", - "Suggests": [ - "dbplyr", - "knitr", - "rmarkdown", - "RSQLite", - "sf", - "testthat (>= 3.0.0)", - "tibble", - "tidygraph" - ], - "VignetteBuilder": "knitr", - "Encoding": "UTF-8", - "Config/testthat/edition": "3", - "NeedsCompilation": "no", - "Author": "Sam Firke [aut, cre], Bill Denney [ctb], Chris Haid [ctb], Ryan Knight [ctb], Malte Grosser [ctb], Jonathan Zadra [ctb]", - "Maintainer": "Sam Firke ", - "Repository": "CRAN" - }, - "jquerylib": { - "Package": "jquerylib", - "Version": "0.1.4", - "Source": "Repository", - "Title": "Obtain 'jQuery' as an HTML Dependency Object", - "Authors@R": "c( person(\"Carson\", \"Sievert\", role = c(\"aut\", \"cre\"), email = \"carson@rstudio.com\", comment = c(ORCID = \"0000-0002-4958-2844\")), person(\"Joe\", \"Cheng\", role = \"aut\", email = \"joe@rstudio.com\"), person(family = \"RStudio\", role = \"cph\"), person(family = \"jQuery Foundation\", role = \"cph\", comment = \"jQuery library and jQuery UI library\"), person(family = \"jQuery contributors\", role = c(\"ctb\", \"cph\"), comment = \"jQuery library; authors listed in inst/lib/jquery-AUTHORS.txt\") )", - "Description": "Obtain any major version of 'jQuery' () and use it in any webpage generated by 'htmltools' (e.g. 'shiny', 'htmlwidgets', and 'rmarkdown'). Most R users don't need to use this package directly, but other R packages (e.g. 'shiny', 'rmarkdown', etc.) depend on this package to avoid bundling redundant copies of 'jQuery'.", - "License": "MIT + file LICENSE", - "Encoding": "UTF-8", - "Config/testthat/edition": "3", - "RoxygenNote": "7.0.2", - "Imports": [ - "htmltools" - ], - "Suggests": [ - "testthat" - ], - "NeedsCompilation": "no", - "Author": "Carson Sievert [aut, cre] (), Joe Cheng [aut], RStudio [cph], jQuery Foundation [cph] (jQuery library and jQuery UI library), jQuery contributors [ctb, cph] (jQuery library; authors listed in inst/lib/jquery-AUTHORS.txt)", - "Maintainer": "Carson Sievert ", - "Repository": "CRAN" - }, - "jsonlite": { - "Package": "jsonlite", - "Version": "2.0.0", - "Source": "Repository", - "Title": "A Simple and Robust JSON Parser and Generator for R", - "License": "MIT + file LICENSE", - "Depends": [ - "methods" - ], - "Authors@R": "c( person(\"Jeroen\", \"Ooms\", role = c(\"aut\", \"cre\"), email = \"jeroenooms@gmail.com\", comment = c(ORCID = \"0000-0002-4035-0289\")), person(\"Duncan\", \"Temple Lang\", role = \"ctb\"), person(\"Lloyd\", \"Hilaiel\", role = \"cph\", comment=\"author of bundled libyajl\"))", - "URL": "https://jeroen.r-universe.dev/jsonlite https://arxiv.org/abs/1403.2805", - "BugReports": "https://github.com/jeroen/jsonlite/issues", - "Maintainer": "Jeroen Ooms ", - "VignetteBuilder": "knitr, R.rsp", - "Description": "A reasonably fast JSON parser and generator, optimized for statistical data and the web. Offers simple, flexible tools for working with JSON in R, and is particularly powerful for building pipelines and interacting with a web API. The implementation is based on the mapping described in the vignette (Ooms, 2014). In addition to converting JSON data from/to R objects, 'jsonlite' contains functions to stream, validate, and prettify JSON data. The unit tests included with the package verify that all edge cases are encoded and decoded consistently for use with dynamic data in systems and applications.", - "Suggests": [ - "httr", - "vctrs", - "testthat", - "knitr", - "rmarkdown", - "R.rsp", - "sf" - ], - "RoxygenNote": "7.3.2", - "Encoding": "UTF-8", - "NeedsCompilation": "yes", - "Author": "Jeroen Ooms [aut, cre] (), Duncan Temple Lang [ctb], Lloyd Hilaiel [cph] (author of bundled libyajl)", - "Repository": "CRAN" - }, - "knitr": { - "Package": "knitr", - "Version": "1.50", - "Source": "Repository", - "Type": "Package", - "Title": "A General-Purpose Package for Dynamic Report Generation in R", - "Authors@R": "c( person(\"Yihui\", \"Xie\", role = c(\"aut\", \"cre\"), email = \"xie@yihui.name\", comment = c(ORCID = \"0000-0003-0645-5666\", URL = \"https://yihui.org\")), person(\"Abhraneel\", \"Sarma\", role = \"ctb\"), person(\"Adam\", \"Vogt\", role = \"ctb\"), person(\"Alastair\", \"Andrew\", role = \"ctb\"), person(\"Alex\", \"Zvoleff\", role = \"ctb\"), person(\"Amar\", \"Al-Zubaidi\", role = \"ctb\"), person(\"Andre\", \"Simon\", role = \"ctb\", comment = \"the CSS files under inst/themes/ were derived from the Highlight package http://www.andre-simon.de\"), person(\"Aron\", \"Atkins\", role = \"ctb\"), person(\"Aaron\", \"Wolen\", role = \"ctb\"), person(\"Ashley\", \"Manton\", role = \"ctb\"), person(\"Atsushi\", \"Yasumoto\", role = \"ctb\", comment = c(ORCID = \"0000-0002-8335-495X\")), person(\"Ben\", \"Baumer\", role = \"ctb\"), person(\"Brian\", \"Diggs\", role = \"ctb\"), person(\"Brian\", \"Zhang\", role = \"ctb\"), person(\"Bulat\", \"Yapparov\", role = \"ctb\"), person(\"Cassio\", \"Pereira\", role = \"ctb\"), person(\"Christophe\", \"Dervieux\", role = \"ctb\"), person(\"David\", \"Hall\", role = \"ctb\"), person(\"David\", \"Hugh-Jones\", role = \"ctb\"), person(\"David\", \"Robinson\", role = \"ctb\"), person(\"Doug\", \"Hemken\", role = \"ctb\"), person(\"Duncan\", \"Murdoch\", role = \"ctb\"), person(\"Elio\", \"Campitelli\", role = \"ctb\"), person(\"Ellis\", \"Hughes\", role = \"ctb\"), person(\"Emily\", \"Riederer\", role = \"ctb\"), person(\"Fabian\", \"Hirschmann\", role = \"ctb\"), person(\"Fitch\", \"Simeon\", role = \"ctb\"), person(\"Forest\", \"Fang\", role = \"ctb\"), person(c(\"Frank\", \"E\", \"Harrell\", \"Jr\"), role = \"ctb\", comment = \"the Sweavel package at inst/misc/Sweavel.sty\"), person(\"Garrick\", \"Aden-Buie\", role = \"ctb\"), person(\"Gregoire\", \"Detrez\", role = \"ctb\"), person(\"Hadley\", \"Wickham\", role = \"ctb\"), person(\"Hao\", \"Zhu\", role = \"ctb\"), person(\"Heewon\", \"Jeon\", role = \"ctb\"), person(\"Henrik\", \"Bengtsson\", role = \"ctb\"), person(\"Hiroaki\", \"Yutani\", role = \"ctb\"), person(\"Ian\", \"Lyttle\", role = \"ctb\"), person(\"Hodges\", \"Daniel\", role = \"ctb\"), person(\"Jacob\", \"Bien\", role = \"ctb\"), person(\"Jake\", \"Burkhead\", role = \"ctb\"), person(\"James\", \"Manton\", role = \"ctb\"), person(\"Jared\", \"Lander\", role = \"ctb\"), person(\"Jason\", \"Punyon\", role = \"ctb\"), person(\"Javier\", \"Luraschi\", role = \"ctb\"), person(\"Jeff\", \"Arnold\", role = \"ctb\"), person(\"Jenny\", \"Bryan\", role = \"ctb\"), person(\"Jeremy\", \"Ashkenas\", role = c(\"ctb\", \"cph\"), comment = \"the CSS file at inst/misc/docco-classic.css\"), person(\"Jeremy\", \"Stephens\", role = \"ctb\"), person(\"Jim\", \"Hester\", role = \"ctb\"), person(\"Joe\", \"Cheng\", role = \"ctb\"), person(\"Johannes\", \"Ranke\", role = \"ctb\"), person(\"John\", \"Honaker\", role = \"ctb\"), person(\"John\", \"Muschelli\", role = \"ctb\"), person(\"Jonathan\", \"Keane\", role = \"ctb\"), person(\"JJ\", \"Allaire\", role = \"ctb\"), person(\"Johan\", \"Toloe\", role = \"ctb\"), person(\"Jonathan\", \"Sidi\", role = \"ctb\"), person(\"Joseph\", \"Larmarange\", role = \"ctb\"), person(\"Julien\", \"Barnier\", role = \"ctb\"), person(\"Kaiyin\", \"Zhong\", role = \"ctb\"), person(\"Kamil\", \"Slowikowski\", role = \"ctb\"), person(\"Karl\", \"Forner\", role = \"ctb\"), person(c(\"Kevin\", \"K.\"), \"Smith\", role = \"ctb\"), person(\"Kirill\", \"Mueller\", role = \"ctb\"), person(\"Kohske\", \"Takahashi\", role = \"ctb\"), person(\"Lorenz\", \"Walthert\", role = \"ctb\"), person(\"Lucas\", \"Gallindo\", role = \"ctb\"), person(\"Marius\", \"Hofert\", role = \"ctb\"), person(\"Martin\", \"Modrák\", role = \"ctb\"), person(\"Michael\", \"Chirico\", role = \"ctb\"), person(\"Michael\", \"Friendly\", role = \"ctb\"), person(\"Michal\", \"Bojanowski\", role = \"ctb\"), person(\"Michel\", \"Kuhlmann\", role = \"ctb\"), person(\"Miller\", \"Patrick\", role = \"ctb\"), person(\"Nacho\", \"Caballero\", role = \"ctb\"), person(\"Nick\", \"Salkowski\", role = \"ctb\"), person(\"Niels Richard\", \"Hansen\", role = \"ctb\"), person(\"Noam\", \"Ross\", role = \"ctb\"), person(\"Obada\", \"Mahdi\", role = \"ctb\"), person(\"Pavel N.\", \"Krivitsky\", role = \"ctb\", comment=c(ORCID = \"0000-0002-9101-3362\")), person(\"Pedro\", \"Faria\", role = \"ctb\"), person(\"Qiang\", \"Li\", role = \"ctb\"), person(\"Ramnath\", \"Vaidyanathan\", role = \"ctb\"), person(\"Richard\", \"Cotton\", role = \"ctb\"), person(\"Robert\", \"Krzyzanowski\", role = \"ctb\"), person(\"Rodrigo\", \"Copetti\", role = \"ctb\"), person(\"Romain\", \"Francois\", role = \"ctb\"), person(\"Ruaridh\", \"Williamson\", role = \"ctb\"), person(\"Sagiru\", \"Mati\", role = \"ctb\", comment = c(ORCID = \"0000-0003-1413-3974\")), person(\"Scott\", \"Kostyshak\", role = \"ctb\"), person(\"Sebastian\", \"Meyer\", role = \"ctb\"), person(\"Sietse\", \"Brouwer\", role = \"ctb\"), person(c(\"Simon\", \"de\"), \"Bernard\", role = \"ctb\"), person(\"Sylvain\", \"Rousseau\", role = \"ctb\"), person(\"Taiyun\", \"Wei\", role = \"ctb\"), person(\"Thibaut\", \"Assus\", role = \"ctb\"), person(\"Thibaut\", \"Lamadon\", role = \"ctb\"), person(\"Thomas\", \"Leeper\", role = \"ctb\"), person(\"Tim\", \"Mastny\", role = \"ctb\"), person(\"Tom\", \"Torsney-Weir\", role = \"ctb\"), person(\"Trevor\", \"Davis\", role = \"ctb\"), person(\"Viktoras\", \"Veitas\", role = \"ctb\"), person(\"Weicheng\", \"Zhu\", role = \"ctb\"), person(\"Wush\", \"Wu\", role = \"ctb\"), person(\"Zachary\", \"Foster\", role = \"ctb\"), person(\"Zhian N.\", \"Kamvar\", role = \"ctb\", comment = c(ORCID = \"0000-0003-1458-7108\")), person(given = \"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "Provides a general-purpose tool for dynamic report generation in R using Literate Programming techniques.", - "Depends": [ - "R (>= 3.6.0)" - ], - "Imports": [ - "evaluate (>= 0.15)", - "highr (>= 0.11)", - "methods", - "tools", - "xfun (>= 0.51)", - "yaml (>= 2.1.19)" - ], - "Suggests": [ - "bslib", - "codetools", - "DBI (>= 0.4-1)", - "digest", - "formatR", - "gifski", - "gridSVG", - "htmlwidgets (>= 0.7)", - "jpeg", - "JuliaCall (>= 0.11.1)", - "magick", - "litedown", - "markdown (>= 1.3)", - "png", - "ragg", - "reticulate (>= 1.4)", - "rgl (>= 0.95.1201)", - "rlang", - "rmarkdown", - "sass", - "showtext", - "styler (>= 1.2.0)", - "targets (>= 0.6.0)", - "testit", - "tibble", - "tikzDevice (>= 0.10)", - "tinytex (>= 0.56)", - "webshot", - "rstudioapi", - "svglite" - ], - "License": "GPL", - "URL": "https://yihui.org/knitr/", - "BugReports": "https://github.com/yihui/knitr/issues", - "Encoding": "UTF-8", - "VignetteBuilder": "litedown, knitr", - "SystemRequirements": "Package vignettes based on R Markdown v2 or reStructuredText require Pandoc (http://pandoc.org). The function rst2pdf() requires rst2pdf (https://github.com/rst2pdf/rst2pdf).", - "Collate": "'block.R' 'cache.R' 'citation.R' 'hooks-html.R' 'plot.R' 'utils.R' 'defaults.R' 'concordance.R' 'engine.R' 'highlight.R' 'themes.R' 'header.R' 'hooks-asciidoc.R' 'hooks-chunk.R' 'hooks-extra.R' 'hooks-latex.R' 'hooks-md.R' 'hooks-rst.R' 'hooks-textile.R' 'hooks.R' 'output.R' 'package.R' 'pandoc.R' 'params.R' 'parser.R' 'pattern.R' 'rocco.R' 'spin.R' 'table.R' 'template.R' 'utils-conversion.R' 'utils-rd2html.R' 'utils-string.R' 'utils-sweave.R' 'utils-upload.R' 'utils-vignettes.R' 'zzz.R'", - "RoxygenNote": "7.3.2", - "NeedsCompilation": "no", - "Author": "Yihui Xie [aut, cre] (, https://yihui.org), Abhraneel Sarma [ctb], Adam Vogt [ctb], Alastair Andrew [ctb], Alex Zvoleff [ctb], Amar Al-Zubaidi [ctb], Andre Simon [ctb] (the CSS files under inst/themes/ were derived from the Highlight package http://www.andre-simon.de), Aron Atkins [ctb], Aaron Wolen [ctb], Ashley Manton [ctb], Atsushi Yasumoto [ctb] (), Ben Baumer [ctb], Brian Diggs [ctb], Brian Zhang [ctb], Bulat Yapparov [ctb], Cassio Pereira [ctb], Christophe Dervieux [ctb], David Hall [ctb], David Hugh-Jones [ctb], David Robinson [ctb], Doug Hemken [ctb], Duncan Murdoch [ctb], Elio Campitelli [ctb], Ellis Hughes [ctb], Emily Riederer [ctb], Fabian Hirschmann [ctb], Fitch Simeon [ctb], Forest Fang [ctb], Frank E Harrell Jr [ctb] (the Sweavel package at inst/misc/Sweavel.sty), Garrick Aden-Buie [ctb], Gregoire Detrez [ctb], Hadley Wickham [ctb], Hao Zhu [ctb], Heewon Jeon [ctb], Henrik Bengtsson [ctb], Hiroaki Yutani [ctb], Ian Lyttle [ctb], Hodges Daniel [ctb], Jacob Bien [ctb], Jake Burkhead [ctb], James Manton [ctb], Jared Lander [ctb], Jason Punyon [ctb], Javier Luraschi [ctb], Jeff Arnold [ctb], Jenny Bryan [ctb], Jeremy Ashkenas [ctb, cph] (the CSS file at inst/misc/docco-classic.css), Jeremy Stephens [ctb], Jim Hester [ctb], Joe Cheng [ctb], Johannes Ranke [ctb], John Honaker [ctb], John Muschelli [ctb], Jonathan Keane [ctb], JJ Allaire [ctb], Johan Toloe [ctb], Jonathan Sidi [ctb], Joseph Larmarange [ctb], Julien Barnier [ctb], Kaiyin Zhong [ctb], Kamil Slowikowski [ctb], Karl Forner [ctb], Kevin K. Smith [ctb], Kirill Mueller [ctb], Kohske Takahashi [ctb], Lorenz Walthert [ctb], Lucas Gallindo [ctb], Marius Hofert [ctb], Martin Modrák [ctb], Michael Chirico [ctb], Michael Friendly [ctb], Michal Bojanowski [ctb], Michel Kuhlmann [ctb], Miller Patrick [ctb], Nacho Caballero [ctb], Nick Salkowski [ctb], Niels Richard Hansen [ctb], Noam Ross [ctb], Obada Mahdi [ctb], Pavel N. Krivitsky [ctb] (), Pedro Faria [ctb], Qiang Li [ctb], Ramnath Vaidyanathan [ctb], Richard Cotton [ctb], Robert Krzyzanowski [ctb], Rodrigo Copetti [ctb], Romain Francois [ctb], Ruaridh Williamson [ctb], Sagiru Mati [ctb] (), Scott Kostyshak [ctb], Sebastian Meyer [ctb], Sietse Brouwer [ctb], Simon de Bernard [ctb], Sylvain Rousseau [ctb], Taiyun Wei [ctb], Thibaut Assus [ctb], Thibaut Lamadon [ctb], Thomas Leeper [ctb], Tim Mastny [ctb], Tom Torsney-Weir [ctb], Trevor Davis [ctb], Viktoras Veitas [ctb], Weicheng Zhu [ctb], Wush Wu [ctb], Zachary Foster [ctb], Zhian N. Kamvar [ctb] (), Posit Software, PBC [cph, fnd]", - "Maintainer": "Yihui Xie ", - "Repository": "CRAN" - }, - "later": { - "Package": "later", - "Version": "1.4.2", - "Source": "Repository", - "Type": "Package", - "Title": "Utilities for Scheduling Functions to Execute Later with Event Loops", - "Authors@R": "c( person(\"Winston\", \"Chang\", role = c(\"aut\", \"cre\"), email = \"winston@posit.co\"), person(\"Joe\", \"Cheng\", role = c(\"aut\"), email = \"joe@posit.co\"), person(\"Charlie\", \"Gao\", role = c(\"aut\"), email = \"charlie.gao@shikokuchuo.net\", comment = c(ORCID = \"0000-0002-0750-061X\")), person(family = \"Posit Software, PBC\", role = \"cph\"), person(\"Marcus\", \"Geelnard\", role = c(\"ctb\", \"cph\"), comment = \"TinyCThread library, https://tinycthread.github.io/\"), person(\"Evan\", \"Nemerson\", role = c(\"ctb\", \"cph\"), comment = \"TinyCThread library, https://tinycthread.github.io/\") )", - "Description": "Executes arbitrary R or C functions some time after the current time, after the R execution stack has emptied. The functions are scheduled in an event loop.", - "URL": "https://r-lib.github.io/later/, https://github.com/r-lib/later", - "BugReports": "https://github.com/r-lib/later/issues", - "License": "MIT + file LICENSE", - "Imports": [ - "Rcpp (>= 0.12.9)", - "rlang" - ], - "LinkingTo": [ - "Rcpp" - ], - "RoxygenNote": "7.3.2", - "Suggests": [ - "knitr", - "nanonext", - "R6", - "rmarkdown", - "testthat (>= 2.1.0)" - ], - "VignetteBuilder": "knitr", - "Encoding": "UTF-8", - "NeedsCompilation": "yes", - "Author": "Winston Chang [aut, cre], Joe Cheng [aut], Charlie Gao [aut] (), Posit Software, PBC [cph], Marcus Geelnard [ctb, cph] (TinyCThread library, https://tinycthread.github.io/), Evan Nemerson [ctb, cph] (TinyCThread library, https://tinycthread.github.io/)", - "Maintainer": "Winston Chang ", - "Repository": "CRAN" - }, - "lifecycle": { - "Package": "lifecycle", - "Version": "1.0.4", - "Source": "Repository", - "Title": "Manage the Life Cycle of your Package Functions", - "Authors@R": "c( person(\"Lionel\", \"Henry\", , \"lionel@posit.co\", role = c(\"aut\", \"cre\")), person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = \"aut\", comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "Manage the life cycle of your exported functions with shared conventions, documentation badges, and user-friendly deprecation warnings.", - "License": "MIT + file LICENSE", - "URL": "https://lifecycle.r-lib.org/, https://github.com/r-lib/lifecycle", - "BugReports": "https://github.com/r-lib/lifecycle/issues", - "Depends": [ - "R (>= 3.6)" - ], - "Imports": [ - "cli (>= 3.4.0)", - "glue", - "rlang (>= 1.1.0)" - ], - "Suggests": [ - "covr", - "crayon", - "knitr", - "lintr", - "rmarkdown", - "testthat (>= 3.0.1)", - "tibble", - "tidyverse", - "tools", - "vctrs", - "withr" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse/tidytemplate, usethis", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "RoxygenNote": "7.2.1", - "NeedsCompilation": "no", - "Author": "Lionel Henry [aut, cre], Hadley Wickham [aut] (), Posit Software, PBC [cph, fnd]", - "Maintainer": "Lionel Henry ", - "Repository": "CRAN" - }, - "lubridate": { - "Package": "lubridate", - "Version": "1.9.4", - "Source": "Repository", - "Type": "Package", - "Title": "Make Dealing with Dates a Little Easier", - "Authors@R": "c( person(\"Vitalie\", \"Spinu\", , \"spinuvit@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Garrett\", \"Grolemund\", role = \"aut\"), person(\"Hadley\", \"Wickham\", role = \"aut\"), person(\"Davis\", \"Vaughan\", role = \"ctb\"), person(\"Ian\", \"Lyttle\", role = \"ctb\"), person(\"Imanuel\", \"Costigan\", role = \"ctb\"), person(\"Jason\", \"Law\", role = \"ctb\"), person(\"Doug\", \"Mitarotonda\", role = \"ctb\"), person(\"Joseph\", \"Larmarange\", role = \"ctb\"), person(\"Jonathan\", \"Boiser\", role = \"ctb\"), person(\"Chel Hee\", \"Lee\", role = \"ctb\") )", - "Maintainer": "Vitalie Spinu ", - "Description": "Functions to work with date-times and time-spans: fast and user friendly parsing of date-time data, extraction and updating of components of a date-time (years, months, days, hours, minutes, and seconds), algebraic manipulation on date-time and time-span objects. The 'lubridate' package has a consistent and memorable syntax that makes working with dates easy and fun.", - "License": "GPL (>= 2)", - "URL": "https://lubridate.tidyverse.org, https://github.com/tidyverse/lubridate", - "BugReports": "https://github.com/tidyverse/lubridate/issues", - "Depends": [ - "methods", - "R (>= 3.2)" - ], - "Imports": [ - "generics", - "timechange (>= 0.3.0)" - ], - "Suggests": [ - "covr", - "knitr", - "rmarkdown", - "testthat (>= 2.1.0)", - "vctrs (>= 0.6.5)" - ], - "Enhances": [ - "chron", - "data.table", - "timeDate", - "tis", - "zoo" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "LazyData": "true", - "RoxygenNote": "7.2.3", - "SystemRequirements": "C++11, A system with zoneinfo data (e.g. /usr/share/zoneinfo). On Windows the zoneinfo included with R is used.", - "Collate": "'Dates.r' 'POSIXt.r' 'util.r' 'parse.r' 'timespans.r' 'intervals.r' 'difftimes.r' 'durations.r' 'periods.r' 'accessors-date.R' 'accessors-day.r' 'accessors-dst.r' 'accessors-hour.r' 'accessors-minute.r' 'accessors-month.r' 'accessors-quarter.r' 'accessors-second.r' 'accessors-tz.r' 'accessors-week.r' 'accessors-year.r' 'am-pm.r' 'time-zones.r' 'numeric.r' 'coercion.r' 'constants.r' 'cyclic_encoding.r' 'data.r' 'decimal-dates.r' 'deprecated.r' 'format_ISO8601.r' 'guess.r' 'hidden.r' 'instants.r' 'leap-years.r' 'ops-addition.r' 'ops-compare.r' 'ops-division.r' 'ops-integer-division.r' 'ops-m+.r' 'ops-modulo.r' 'ops-multiplication.r' 'ops-subtraction.r' 'package.r' 'pretty.r' 'round.r' 'stamp.r' 'tzdir.R' 'update.r' 'vctrs.R' 'zzz.R'", - "NeedsCompilation": "yes", - "Author": "Vitalie Spinu [aut, cre], Garrett Grolemund [aut], Hadley Wickham [aut], Davis Vaughan [ctb], Ian Lyttle [ctb], Imanuel Costigan [ctb], Jason Law [ctb], Doug Mitarotonda [ctb], Joseph Larmarange [ctb], Jonathan Boiser [ctb], Chel Hee Lee [ctb]", - "Repository": "CRAN" - }, - "magrittr": { - "Package": "magrittr", - "Version": "2.0.3", - "Source": "Repository", - "Type": "Package", - "Title": "A Forward-Pipe Operator for R", - "Authors@R": "c( person(\"Stefan Milton\", \"Bache\", , \"stefan@stefanbache.dk\", role = c(\"aut\", \"cph\"), comment = \"Original author and creator of magrittr\"), person(\"Hadley\", \"Wickham\", , \"hadley@rstudio.com\", role = \"aut\"), person(\"Lionel\", \"Henry\", , \"lionel@rstudio.com\", role = \"cre\"), person(\"RStudio\", role = c(\"cph\", \"fnd\")) )", - "Description": "Provides a mechanism for chaining commands with a new forward-pipe operator, %>%. This operator will forward a value, or the result of an expression, into the next function call/expression. There is flexible support for the type of right-hand side expressions. For more information, see package vignette. To quote Rene Magritte, \"Ceci n'est pas un pipe.\"", - "License": "MIT + file LICENSE", - "URL": "https://magrittr.tidyverse.org, https://github.com/tidyverse/magrittr", - "BugReports": "https://github.com/tidyverse/magrittr/issues", - "Depends": [ - "R (>= 3.4.0)" - ], - "Suggests": [ - "covr", - "knitr", - "rlang", - "rmarkdown", - "testthat" - ], - "VignetteBuilder": "knitr", - "ByteCompile": "Yes", - "Config/Needs/website": "tidyverse/tidytemplate", - "Encoding": "UTF-8", - "RoxygenNote": "7.1.2", - "NeedsCompilation": "yes", - "Author": "Stefan Milton Bache [aut, cph] (Original author and creator of magrittr), Hadley Wickham [aut], Lionel Henry [cre], RStudio [cph, fnd]", - "Maintainer": "Lionel Henry ", - "Repository": "CRAN" - }, - "memoise": { - "Package": "memoise", - "Version": "2.0.1", - "Source": "Repository", - "Title": "'Memoisation' of Functions", - "Authors@R": "c(person(given = \"Hadley\", family = \"Wickham\", role = \"aut\", email = \"hadley@rstudio.com\"), person(given = \"Jim\", family = \"Hester\", role = \"aut\"), person(given = \"Winston\", family = \"Chang\", role = c(\"aut\", \"cre\"), email = \"winston@rstudio.com\"), person(given = \"Kirill\", family = \"Müller\", role = \"aut\", email = \"krlmlr+r@mailbox.org\"), person(given = \"Daniel\", family = \"Cook\", role = \"aut\", email = \"danielecook@gmail.com\"), person(given = \"Mark\", family = \"Edmondson\", role = \"ctb\", email = \"r@sunholo.com\"))", - "Description": "Cache the results of a function so that when you call it again with the same arguments it returns the previously computed value.", - "License": "MIT + file LICENSE", - "URL": "https://memoise.r-lib.org, https://github.com/r-lib/memoise", - "BugReports": "https://github.com/r-lib/memoise/issues", - "Imports": [ - "rlang (>= 0.4.10)", - "cachem" - ], - "Suggests": [ - "digest", - "aws.s3", - "covr", - "googleAuthR", - "googleCloudStorageR", - "httr", - "testthat" - ], - "Encoding": "UTF-8", - "RoxygenNote": "7.1.2", - "NeedsCompilation": "no", - "Author": "Hadley Wickham [aut], Jim Hester [aut], Winston Chang [aut, cre], Kirill Müller [aut], Daniel Cook [aut], Mark Edmondson [ctb]", - "Maintainer": "Winston Chang ", - "Repository": "CRAN" - }, - "mime": { - "Package": "mime", - "Version": "0.13", - "Source": "Repository", - "Type": "Package", - "Title": "Map Filenames to MIME Types", - "Authors@R": "c( person(\"Yihui\", \"Xie\", role = c(\"aut\", \"cre\"), email = \"xie@yihui.name\", comment = c(ORCID = \"0000-0003-0645-5666\", URL = \"https://yihui.org\")), person(\"Jeffrey\", \"Horner\", role = \"ctb\"), person(\"Beilei\", \"Bian\", role = \"ctb\") )", - "Description": "Guesses the MIME type from a filename extension using the data derived from /etc/mime.types in UNIX-type systems.", - "Imports": [ - "tools" - ], - "License": "GPL", - "URL": "https://github.com/yihui/mime", - "BugReports": "https://github.com/yihui/mime/issues", - "RoxygenNote": "7.3.2", - "Encoding": "UTF-8", - "NeedsCompilation": "yes", - "Author": "Yihui Xie [aut, cre] (, https://yihui.org), Jeffrey Horner [ctb], Beilei Bian [ctb]", - "Maintainer": "Yihui Xie ", - "Repository": "CRAN" - }, - "miniUI": { - "Package": "miniUI", - "Version": "0.1.2", - "Source": "Repository", - "Type": "Package", - "Title": "Shiny UI Widgets for Small Screens", - "Authors@R": "c( person(\"Joe\", \"Cheng\", role = c(\"cre\", \"aut\"), email = \"joe@posit.co\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\"), comment = c(ROR = \"03wc8by49\")) )", - "Description": "Provides UI widget and layout functions for writing Shiny apps that work well on small screens.", - "License": "GPL-3", - "URL": "https://github.com/rstudio/miniUI", - "BugReports": "https://github.com/rstudio/miniUI/issues", - "Imports": [ - "shiny (>= 0.13)", - "htmltools (>= 0.3)", - "utils" - ], - "RoxygenNote": "7.3.2", - "Encoding": "UTF-8", - "NeedsCompilation": "no", - "Author": "Joe Cheng [cre, aut], Posit Software, PBC [cph, fnd] (03wc8by49)", - "Maintainer": "Joe Cheng ", - "Repository": "CRAN" - }, - "openssl": { - "Package": "openssl", - "Version": "2.3.3", - "Source": "Repository", - "Type": "Package", - "Title": "Toolkit for Encryption, Signatures and Certificates Based on OpenSSL", - "Authors@R": "c(person(\"Jeroen\", \"Ooms\", role = c(\"aut\", \"cre\"), email = \"jeroenooms@gmail.com\", comment = c(ORCID = \"0000-0002-4035-0289\")), person(\"Oliver\", \"Keyes\", role = \"ctb\"))", - "Description": "Bindings to OpenSSL libssl and libcrypto, plus custom SSH key parsers. Supports RSA, DSA and EC curves P-256, P-384, P-521, and curve25519. Cryptographic signatures can either be created and verified manually or via x509 certificates. AES can be used in cbc, ctr or gcm mode for symmetric encryption; RSA for asymmetric (public key) encryption or EC for Diffie Hellman. High-level envelope functions combine RSA and AES for encrypting arbitrary sized data. Other utilities include key generators, hash functions (md5, sha1, sha256, etc), base64 encoder, a secure random number generator, and 'bignum' math methods for manually performing crypto calculations on large multibyte integers.", - "License": "MIT + file LICENSE", - "URL": "https://jeroen.r-universe.dev/openssl", - "BugReports": "https://github.com/jeroen/openssl/issues", - "SystemRequirements": "OpenSSL >= 1.0.2", - "VignetteBuilder": "knitr", - "Imports": [ - "askpass" - ], - "Suggests": [ - "curl", - "testthat (>= 2.1.0)", - "digest", - "knitr", - "rmarkdown", - "jsonlite", - "jose", - "sodium" - ], - "RoxygenNote": "7.3.2", - "Encoding": "UTF-8", - "NeedsCompilation": "yes", - "Author": "Jeroen Ooms [aut, cre] (ORCID: ), Oliver Keyes [ctb]", - "Maintainer": "Jeroen Ooms ", - "Repository": "CRAN" - }, - "pillar": { - "Package": "pillar", - "Version": "1.11.0", - "Source": "Repository", - "Title": "Coloured Formatting for Columns", - "Authors@R": "c(person(given = \"Kirill\", family = \"M\\u00fcller\", role = c(\"aut\", \"cre\"), email = \"kirill@cynkra.com\", comment = c(ORCID = \"0000-0002-1416-3412\")), person(given = \"Hadley\", family = \"Wickham\", role = \"aut\"), person(given = \"RStudio\", role = \"cph\"))", - "Description": "Provides 'pillar' and 'colonnade' generics designed for formatting columns of data using the full range of colours provided by modern terminals.", - "License": "MIT + file LICENSE", - "URL": "https://pillar.r-lib.org/, https://github.com/r-lib/pillar", - "BugReports": "https://github.com/r-lib/pillar/issues", - "Imports": [ - "cli (>= 2.3.0)", - "glue", - "lifecycle", - "rlang (>= 1.0.2)", - "utf8 (>= 1.1.0)", - "utils", - "vctrs (>= 0.5.0)" - ], - "Suggests": [ - "bit64", - "DBI", - "debugme", - "DiagrammeR", - "dplyr", - "formattable", - "ggplot2", - "knitr", - "lubridate", - "nanotime", - "nycflights13", - "palmerpenguins", - "rmarkdown", - "scales", - "stringi", - "survival", - "testthat (>= 3.1.1)", - "tibble", - "units (>= 0.7.2)", - "vdiffr", - "withr" - ], - "VignetteBuilder": "knitr", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2.9000", - "Config/testthat/edition": "3", - "Config/testthat/parallel": "true", - "Config/testthat/start-first": "format_multi_fuzz, format_multi_fuzz_2, format_multi, ctl_colonnade, ctl_colonnade_1, ctl_colonnade_2", - "Config/autostyle/scope": "line_breaks", - "Config/autostyle/strict": "true", - "Config/gha/extra-packages": "units=?ignore-before-r=4.3.0", - "Config/Needs/website": "tidyverse/tidytemplate", - "NeedsCompilation": "no", - "Author": "Kirill Müller [aut, cre] (ORCID: ), Hadley Wickham [aut], RStudio [cph]", - "Maintainer": "Kirill Müller ", - "Repository": "CRAN" - }, - "pkgbuild": { - "Package": "pkgbuild", - "Version": "1.4.8", - "Source": "Repository", - "Title": "Find Tools Needed to Build R Packages", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", role = \"aut\"), person(\"Jim\", \"Hester\", role = \"aut\"), person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\"), comment = c(ROR = \"03wc8by49\")) )", - "Description": "Provides functions used to build R packages. Locates compilers needed to build R packages on various platforms and ensures the PATH is configured appropriately so R can use them.", - "License": "MIT + file LICENSE", - "URL": "https://github.com/r-lib/pkgbuild, https://pkgbuild.r-lib.org", - "BugReports": "https://github.com/r-lib/pkgbuild/issues", - "Depends": [ - "R (>= 3.5)" - ], - "Imports": [ - "callr (>= 3.2.0)", - "cli (>= 3.4.0)", - "desc", - "processx", - "R6" - ], - "Suggests": [ - "covr", - "cpp11", - "knitr", - "Rcpp", - "rmarkdown", - "testthat (>= 3.2.0)", - "withr (>= 2.3.0)" - ], - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Config/usethis/last-upkeep": "2025-04-30", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "NeedsCompilation": "no", - "Author": "Hadley Wickham [aut], Jim Hester [aut], Gábor Csárdi [aut, cre], Posit Software, PBC [cph, fnd] (ROR: )", - "Maintainer": "Gábor Csárdi ", - "Repository": "CRAN" - }, - "pkgconfig": { - "Package": "pkgconfig", - "Version": "2.0.3", - "Source": "Repository", - "Title": "Private Configuration for 'R' Packages", - "Author": "Gábor Csárdi", - "Maintainer": "Gábor Csárdi ", - "Description": "Set configuration options on a per-package basis. Options set by a given package only apply to that package, other packages are unaffected.", - "License": "MIT + file LICENSE", - "LazyData": "true", - "Imports": [ - "utils" - ], - "Suggests": [ - "covr", - "testthat", - "disposables (>= 1.0.3)" - ], - "URL": "https://github.com/r-lib/pkgconfig#readme", - "BugReports": "https://github.com/r-lib/pkgconfig/issues", - "Encoding": "UTF-8", - "NeedsCompilation": "no", - "Repository": "CRAN" - }, - "pkgdown": { - "Package": "pkgdown", - "Version": "2.1.3", - "Source": "Repository", - "Title": "Make Static HTML Documentation for a Package", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Jay\", \"Hesselberth\", role = \"aut\", comment = c(ORCID = \"0000-0002-6299-179X\")), person(\"Maëlle\", \"Salmon\", role = \"aut\", comment = c(ORCID = \"0000-0002-2815-0399\")), person(\"Olivier\", \"Roy\", role = \"aut\"), person(\"Salim\", \"Brüggemann\", role = \"aut\", comment = c(ORCID = \"0000-0002-5329-5987\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\"), comment = c(ROR = \"03wc8by49\")) )", - "Description": "Generate an attractive and useful website from a source package. 'pkgdown' converts your documentation, vignettes, 'README', and more to 'HTML' making it easy to share information about your package online.", - "License": "MIT + file LICENSE", - "URL": "https://pkgdown.r-lib.org/, https://github.com/r-lib/pkgdown", - "BugReports": "https://github.com/r-lib/pkgdown/issues", - "Depends": [ - "R (>= 4.0.0)" - ], - "Imports": [ - "bslib (>= 0.5.1)", - "callr (>= 3.7.3)", - "cli (>= 3.6.1)", - "desc (>= 1.4.0)", - "downlit (>= 0.4.4)", - "fontawesome", - "fs (>= 1.4.0)", - "httr2 (>= 1.0.2)", - "jsonlite", - "openssl", - "purrr (>= 1.0.0)", - "ragg (>= 1.4.0)", - "rlang (>= 1.1.4)", - "rmarkdown (>= 2.27)", - "tibble", - "whisker", - "withr (>= 2.4.3)", - "xml2 (>= 1.3.1)", - "yaml" - ], - "Suggests": [ - "covr", - "diffviewer", - "evaluate (>= 0.24.0)", - "gert", - "gt", - "htmltools", - "htmlwidgets", - "knitr (>= 1.50)", - "lifecycle", - "magick", - "methods", - "pkgload (>= 1.0.2)", - "quarto", - "rsconnect", - "rstudioapi", - "rticles", - "sass", - "testthat (>= 3.1.3)", - "tools" - ], - "VignetteBuilder": "knitr, quarto", - "Config/Needs/website": "usethis, servr", - "Config/potools/style": "explicit", - "Config/testthat/edition": "3", - "Config/testthat/parallel": "true", - "Config/testthat/start-first": "build-article, build-quarto-article, build-reference", - "Encoding": "UTF-8", - "SystemRequirements": "pandoc", - "RoxygenNote": "7.3.2", - "NeedsCompilation": "no", - "Author": "Hadley Wickham [aut, cre] (ORCID: ), Jay Hesselberth [aut] (ORCID: ), Maëlle Salmon [aut] (ORCID: ), Olivier Roy [aut], Salim Brüggemann [aut] (ORCID: ), Posit Software, PBC [cph, fnd] (ROR: )", - "Maintainer": "Hadley Wickham ", - "Repository": "CRAN" - }, - "pkgload": { - "Package": "pkgload", - "Version": "1.4.0", - "Source": "Repository", - "Title": "Simulate Package Installation and Attach", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", role = \"aut\"), person(\"Winston\", \"Chang\", role = \"aut\"), person(\"Jim\", \"Hester\", role = \"aut\"), person(\"Lionel\", \"Henry\", , \"lionel@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(\"R Core team\", role = \"ctb\", comment = \"Some namespace and vignette code extracted from base R\") )", - "Description": "Simulates the process of installing a package and then attaching it. This is a key part of the 'devtools' package as it allows you to rapidly iterate while developing a package.", - "License": "GPL-3", - "URL": "https://github.com/r-lib/pkgload, https://pkgload.r-lib.org", - "BugReports": "https://github.com/r-lib/pkgload/issues", - "Depends": [ - "R (>= 3.4.0)" - ], - "Imports": [ - "cli (>= 3.3.0)", - "desc", - "fs", - "glue", - "lifecycle", - "methods", - "pkgbuild", - "processx", - "rlang (>= 1.1.1)", - "rprojroot", - "utils", - "withr (>= 2.4.3)" - ], - "Suggests": [ - "bitops", - "jsonlite", - "mathjaxr", - "pak", - "Rcpp", - "remotes", - "rstudioapi", - "testthat (>= 3.2.1.1)", - "usethis" - ], - "Config/Needs/website": "tidyverse/tidytemplate, ggplot2", - "Config/testthat/edition": "3", - "Config/testthat/parallel": "TRUE", - "Config/testthat/start-first": "dll", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.1", - "NeedsCompilation": "no", - "Author": "Hadley Wickham [aut], Winston Chang [aut], Jim Hester [aut], Lionel Henry [aut, cre], Posit Software, PBC [cph, fnd], R Core team [ctb] (Some namespace and vignette code extracted from base R)", - "Maintainer": "Lionel Henry ", - "Repository": "CRAN" - }, - "praise": { - "Package": "praise", - "Version": "1.0.0", - "Source": "Repository", - "Title": "Praise Users", - "Author": "Gabor Csardi, Sindre Sorhus", - "Maintainer": "Gabor Csardi ", - "Description": "Build friendly R packages that praise their users if they have done something good, or they just need it to feel better.", - "License": "MIT + file LICENSE", - "LazyData": "true", - "URL": "https://github.com/gaborcsardi/praise", - "BugReports": "https://github.com/gaborcsardi/praise/issues", - "Suggests": [ - "testthat" - ], - "Collate": "'adjective.R' 'adverb.R' 'exclamation.R' 'verb.R' 'rpackage.R' 'package.R'", - "NeedsCompilation": "no", - "Repository": "CRAN", - "Encoding": "UTF-8" - }, - "prettyunits": { - "Package": "prettyunits", - "Version": "1.2.0", - "Source": "Repository", - "Title": "Pretty, Human Readable Formatting of Quantities", - "Authors@R": "c( person(\"Gabor\", \"Csardi\", email=\"csardi.gabor@gmail.com\", role=c(\"aut\", \"cre\")), person(\"Bill\", \"Denney\", email=\"wdenney@humanpredictions.com\", role=c(\"ctb\"), comment=c(ORCID=\"0000-0002-5759-428X\")), person(\"Christophe\", \"Regouby\", email=\"christophe.regouby@free.fr\", role=c(\"ctb\")) )", - "Description": "Pretty, human readable formatting of quantities. Time intervals: '1337000' -> '15d 11h 23m 20s'. Vague time intervals: '2674000' -> 'about a month ago'. Bytes: '1337' -> '1.34 kB'. Rounding: '99' with 3 significant digits -> '99.0' p-values: '0.00001' -> '<0.0001'. Colors: '#FF0000' -> 'red'. Quantities: '1239437' -> '1.24 M'.", - "License": "MIT + file LICENSE", - "URL": "https://github.com/r-lib/prettyunits", - "BugReports": "https://github.com/r-lib/prettyunits/issues", - "Depends": [ - "R(>= 2.10)" - ], - "Suggests": [ - "codetools", - "covr", - "testthat" - ], - "RoxygenNote": "7.2.3", - "Encoding": "UTF-8", - "NeedsCompilation": "no", - "Author": "Gabor Csardi [aut, cre], Bill Denney [ctb] (), Christophe Regouby [ctb]", - "Maintainer": "Gabor Csardi ", - "Repository": "CRAN" - }, - "processx": { - "Package": "processx", - "Version": "3.8.6", - "Source": "Repository", - "Title": "Execute and Control System Processes", - "Authors@R": "c( person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = c(\"aut\", \"cre\", \"cph\"), comment = c(ORCID = \"0000-0001-7098-9676\")), person(\"Winston\", \"Chang\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(\"Ascent Digital Services\", role = c(\"cph\", \"fnd\")) )", - "Description": "Tools to run system processes in the background. It can check if a background process is running; wait on a background process to finish; get the exit status of finished processes; kill background processes. It can read the standard output and error of the processes, using non-blocking connections. 'processx' can poll a process for standard output or error, with a timeout. It can also poll several processes at once.", - "License": "MIT + file LICENSE", - "URL": "https://processx.r-lib.org, https://github.com/r-lib/processx", - "BugReports": "https://github.com/r-lib/processx/issues", - "Depends": [ - "R (>= 3.4.0)" - ], - "Imports": [ - "ps (>= 1.2.0)", - "R6", - "utils" - ], - "Suggests": [ - "callr (>= 3.7.3)", - "cli (>= 3.3.0)", - "codetools", - "covr", - "curl", - "debugme", - "parallel", - "rlang (>= 1.0.2)", - "testthat (>= 3.0.0)", - "webfakes", - "withr" - ], - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.1.9000", - "NeedsCompilation": "yes", - "Author": "Gábor Csárdi [aut, cre, cph] (), Winston Chang [aut], Posit Software, PBC [cph, fnd], Ascent Digital Services [cph, fnd]", - "Maintainer": "Gábor Csárdi ", - "Repository": "CRAN" - }, - "profvis": { - "Package": "profvis", - "Version": "0.4.0", - "Source": "Repository", - "Title": "Interactive Visualizations for Profiling R Code", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\")), person(\"Winston\", \"Chang\", role = \"aut\"), person(\"Javier\", \"Luraschi\", role = \"aut\"), person(\"Timothy\", \"Mastny\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(, \"jQuery Foundation\", role = \"cph\", comment = \"jQuery library\"), person(, \"jQuery contributors\", role = c(\"ctb\", \"cph\"), comment = \"jQuery library; authors listed in inst/htmlwidgets/lib/jquery/AUTHORS.txt\"), person(\"Mike\", \"Bostock\", role = c(\"ctb\", \"cph\"), comment = \"D3 library\"), person(, \"D3 contributors\", role = \"ctb\", comment = \"D3 library\"), person(\"Ivan\", \"Sagalaev\", role = c(\"ctb\", \"cph\"), comment = \"highlight.js library\") )", - "Description": "Interactive visualizations for profiling R code.", - "License": "MIT + file LICENSE", - "URL": "https://profvis.r-lib.org, https://github.com/r-lib/profvis", - "BugReports": "https://github.com/r-lib/profvis/issues", - "Depends": [ - "R (>= 4.0)" - ], - "Imports": [ - "htmlwidgets (>= 0.3.2)", - "rlang (>= 1.1.0)", - "vctrs" - ], - "Suggests": [ - "htmltools", + "httr", + "vctrs", + "testthat", "knitr", "rmarkdown", - "shiny", - "testthat (>= 3.0.0)" + "R.rsp", + "sf" ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse/tidytemplate, rmarkdown", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", "RoxygenNote": "7.3.2", - "NeedsCompilation": "yes", - "Author": "Hadley Wickham [aut, cre], Winston Chang [aut], Javier Luraschi [aut], Timothy Mastny [aut], Posit Software, PBC [cph, fnd], jQuery Foundation [cph] (jQuery library), jQuery contributors [ctb, cph] (jQuery library; authors listed in inst/htmlwidgets/lib/jquery/AUTHORS.txt), Mike Bostock [ctb, cph] (D3 library), D3 contributors [ctb] (D3 library), Ivan Sagalaev [ctb, cph] (highlight.js library)", - "Maintainer": "Hadley Wickham ", - "Repository": "CRAN" - }, - "promises": { - "Package": "promises", - "Version": "1.3.3", - "Source": "Repository", - "Type": "Package", - "Title": "Abstractions for Promise-Based Asynchronous Programming", - "Authors@R": "c( person(\"Joe\", \"Cheng\", , \"joe@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\"), comment = c(ROR = \"03wc8by49\")) )", - "Description": "Provides fundamental abstractions for doing asynchronous programming in R using promises. Asynchronous programming is useful for allowing a single R process to orchestrate multiple tasks in the background while also attending to something else. Semantics are similar to 'JavaScript' promises, but with a syntax that is idiomatic R.", - "License": "MIT + file LICENSE", - "URL": "https://rstudio.github.io/promises/, https://github.com/rstudio/promises", - "BugReports": "https://github.com/rstudio/promises/issues", - "Imports": [ - "fastmap (>= 1.1.0)", - "later", - "magrittr (>= 1.5)", - "R6", - "Rcpp", - "rlang", - "stats" - ], - "Suggests": [ - "future (>= 1.21.0)", - "knitr", - "purrr", - "rmarkdown", - "spelling", - "testthat (>= 3.0.0)", - "vembedr" - ], - "LinkingTo": [ - "later", - "Rcpp" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "rsconnect, tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Config/usethis/last-upkeep": "2025-05-27", "Encoding": "UTF-8", - "Language": "en-US", - "RoxygenNote": "7.3.2", "NeedsCompilation": "yes", - "Author": "Joe Cheng [aut, cre], Posit Software, PBC [cph, fnd] (ROR: )", - "Maintainer": "Joe Cheng ", + "Author": "Jeroen Ooms [aut, cre] (), Duncan Temple Lang [ctb], Lloyd Hilaiel [cph] (author of bundled libyajl)", "Repository": "CRAN" }, - "ps": { - "Package": "ps", - "Version": "1.9.1", + "lifecycle": { + "Package": "lifecycle", + "Version": "1.0.4", "Source": "Repository", - "Title": "List, Query, Manipulate System Processes", - "Authors@R": "c( person(\"Jay\", \"Loden\", role = \"aut\"), person(\"Dave\", \"Daeschler\", role = \"aut\"), person(\"Giampaolo\", \"Rodola'\", role = \"aut\"), person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "List, query and manipulate all system processes, on 'Windows', 'Linux' and 'macOS'.", + "Title": "Manage the Life Cycle of your Package Functions", + "Authors@R": "c( person(\"Lionel\", \"Henry\", , \"lionel@posit.co\", role = c(\"aut\", \"cre\")), person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = \"aut\", comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "Manage the life cycle of your exported functions with shared conventions, documentation badges, and user-friendly deprecation warnings.", "License": "MIT + file LICENSE", - "URL": "https://github.com/r-lib/ps, https://ps.r-lib.org/", - "BugReports": "https://github.com/r-lib/ps/issues", + "URL": "https://lifecycle.r-lib.org/, https://github.com/r-lib/lifecycle", + "BugReports": "https://github.com/r-lib/lifecycle/issues", "Depends": [ - "R (>= 3.4)" + "R (>= 3.6)" ], "Imports": [ - "utils" + "cli (>= 3.4.0)", + "glue", + "rlang (>= 1.1.0)" ], "Suggests": [ - "callr", "covr", - "curl", - "pillar", - "pingr", - "processx (>= 3.1.0)", - "R6", - "rlang", - "testthat (>= 3.0.0)", - "webfakes", + "crayon", + "knitr", + "lintr", + "rmarkdown", + "testthat (>= 3.0.1)", + "tibble", + "tidyverse", + "tools", + "vctrs", "withr" ], - "Biarch": "true", - "Config/Needs/website": "tidyverse/tidytemplate", + "VignetteBuilder": "knitr", + "Config/Needs/website": "tidyverse/tidytemplate, usethis", "Config/testthat/edition": "3", "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "NeedsCompilation": "yes", - "Author": "Jay Loden [aut], Dave Daeschler [aut], Giampaolo Rodola' [aut], Gábor Csárdi [aut, cre], Posit Software, PBC [cph, fnd]", - "Maintainer": "Gábor Csárdi ", + "RoxygenNote": "7.2.1", + "NeedsCompilation": "no", + "Author": "Lionel Henry [aut, cre], Hadley Wickham [aut] (), Posit Software, PBC [cph, fnd]", + "Maintainer": "Lionel Henry ", "Repository": "CRAN" }, - "purrr": { - "Package": "purrr", - "Version": "1.1.0", + "lubridate": { + "Package": "lubridate", + "Version": "1.9.4", "Source": "Repository", - "Title": "Functional Programming Tools", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Lionel\", \"Henry\", , \"lionel@posit.co\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\"), comment = c(ROR = \"https://ror.org/03wc8by49\")) )", - "Description": "A complete and consistent functional programming toolkit for R.", - "License": "MIT + file LICENSE", - "URL": "https://purrr.tidyverse.org/, https://github.com/tidyverse/purrr", - "BugReports": "https://github.com/tidyverse/purrr/issues", + "Type": "Package", + "Title": "Make Dealing with Dates a Little Easier", + "Authors@R": "c( person(\"Vitalie\", \"Spinu\", , \"spinuvit@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Garrett\", \"Grolemund\", role = \"aut\"), person(\"Hadley\", \"Wickham\", role = \"aut\"), person(\"Davis\", \"Vaughan\", role = \"ctb\"), person(\"Ian\", \"Lyttle\", role = \"ctb\"), person(\"Imanuel\", \"Costigan\", role = \"ctb\"), person(\"Jason\", \"Law\", role = \"ctb\"), person(\"Doug\", \"Mitarotonda\", role = \"ctb\"), person(\"Joseph\", \"Larmarange\", role = \"ctb\"), person(\"Jonathan\", \"Boiser\", role = \"ctb\"), person(\"Chel Hee\", \"Lee\", role = \"ctb\") )", + "Maintainer": "Vitalie Spinu ", + "Description": "Functions to work with date-times and time-spans: fast and user friendly parsing of date-time data, extraction and updating of components of a date-time (years, months, days, hours, minutes, and seconds), algebraic manipulation on date-time and time-span objects. The 'lubridate' package has a consistent and memorable syntax that makes working with dates easy and fun.", + "License": "GPL (>= 2)", + "URL": "https://lubridate.tidyverse.org, https://github.com/tidyverse/lubridate", + "BugReports": "https://github.com/tidyverse/lubridate/issues", "Depends": [ - "R (>= 4.1)" + "methods", + "R (>= 3.2)" ], "Imports": [ - "cli (>= 3.6.1)", - "lifecycle (>= 1.0.3)", - "magrittr (>= 1.5.0)", - "rlang (>= 1.1.1)", - "vctrs (>= 0.6.3)" + "generics", + "timechange (>= 0.3.0)" ], "Suggests": [ - "carrier (>= 0.2.0)", "covr", - "dplyr (>= 0.7.8)", - "httr", "knitr", - "lubridate", - "mirai (>= 2.4.0)", "rmarkdown", - "testthat (>= 3.0.0)", - "tibble", - "tidyselect" + "testthat (>= 2.1.0)", + "vctrs (>= 0.6.5)" ], - "LinkingTo": [ - "cli" + "Enhances": [ + "chron", + "data.table", + "timeDate", + "tis", + "zoo" ], "VignetteBuilder": "knitr", - "Biarch": "true", - "Config/build/compilation-database": "true", - "Config/Needs/website": "tidyverse/tidytemplate, tidyr", + "Config/Needs/website": "tidyverse/tidytemplate", "Config/testthat/edition": "3", - "Config/testthat/parallel": "TRUE", "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", + "LazyData": "true", + "RoxygenNote": "7.2.3", + "SystemRequirements": "C++11, A system with zoneinfo data (e.g. /usr/share/zoneinfo). On Windows the zoneinfo included with R is used.", + "Collate": "'Dates.r' 'POSIXt.r' 'util.r' 'parse.r' 'timespans.r' 'intervals.r' 'difftimes.r' 'durations.r' 'periods.r' 'accessors-date.R' 'accessors-day.r' 'accessors-dst.r' 'accessors-hour.r' 'accessors-minute.r' 'accessors-month.r' 'accessors-quarter.r' 'accessors-second.r' 'accessors-tz.r' 'accessors-week.r' 'accessors-year.r' 'am-pm.r' 'time-zones.r' 'numeric.r' 'coercion.r' 'constants.r' 'cyclic_encoding.r' 'data.r' 'decimal-dates.r' 'deprecated.r' 'format_ISO8601.r' 'guess.r' 'hidden.r' 'instants.r' 'leap-years.r' 'ops-addition.r' 'ops-compare.r' 'ops-division.r' 'ops-integer-division.r' 'ops-m+.r' 'ops-modulo.r' 'ops-multiplication.r' 'ops-subtraction.r' 'package.r' 'pretty.r' 'round.r' 'stamp.r' 'tzdir.R' 'update.r' 'vctrs.R' 'zzz.R'", "NeedsCompilation": "yes", - "Author": "Hadley Wickham [aut, cre] (ORCID: ), Lionel Henry [aut], Posit Software, PBC [cph, fnd] (ROR: )", - "Maintainer": "Hadley Wickham ", + "Author": "Vitalie Spinu [aut, cre], Garrett Grolemund [aut], Hadley Wickham [aut], Davis Vaughan [ctb], Ian Lyttle [ctb], Imanuel Costigan [ctb], Jason Law [ctb], Doug Mitarotonda [ctb], Joseph Larmarange [ctb], Jonathan Boiser [ctb], Chel Hee Lee [ctb]", "Repository": "CRAN" }, - "ragg": { - "Package": "ragg", - "Version": "1.4.0", + "magrittr": { + "Package": "magrittr", + "Version": "2.0.3", "Source": "Repository", "Type": "Package", - "Title": "Graphic Devices Based on AGG", - "Authors@R": "c( person(\"Thomas Lin\", \"Pedersen\", , \"thomas.pedersen@posit.co\", role = c(\"cre\", \"aut\"), comment = c(ORCID = \"0000-0002-5147-4711\")), person(\"Maxim\", \"Shemanarev\", role = c(\"aut\", \"cph\"), comment = \"Author of AGG\"), person(\"Tony\", \"Juricic\", , \"tonygeek@yahoo.com\", role = c(\"ctb\", \"cph\"), comment = \"Contributor to AGG\"), person(\"Milan\", \"Marusinec\", , \"milan@marusinec.sk\", role = c(\"ctb\", \"cph\"), comment = \"Contributor to AGG\"), person(\"Spencer\", \"Garrett\", role = \"ctb\", comment = \"Contributor to AGG\"), person(\"Posit, PBC\", role = c(\"cph\", \"fnd\")) )", - "Maintainer": "Thomas Lin Pedersen ", - "Description": "Anti-Grain Geometry (AGG) is a high-quality and high-performance 2D drawing library. The 'ragg' package provides a set of graphic devices based on AGG to use as alternative to the raster devices provided through the 'grDevices' package.", + "Title": "A Forward-Pipe Operator for R", + "Authors@R": "c( person(\"Stefan Milton\", \"Bache\", , \"stefan@stefanbache.dk\", role = c(\"aut\", \"cph\"), comment = \"Original author and creator of magrittr\"), person(\"Hadley\", \"Wickham\", , \"hadley@rstudio.com\", role = \"aut\"), person(\"Lionel\", \"Henry\", , \"lionel@rstudio.com\", role = \"cre\"), person(\"RStudio\", role = c(\"cph\", \"fnd\")) )", + "Description": "Provides a mechanism for chaining commands with a new forward-pipe operator, %>%. This operator will forward a value, or the result of an expression, into the next function call/expression. There is flexible support for the type of right-hand side expressions. For more information, see package vignette. To quote Rene Magritte, \"Ceci n'est pas un pipe.\"", "License": "MIT + file LICENSE", - "URL": "https://ragg.r-lib.org, https://github.com/r-lib/ragg", - "BugReports": "https://github.com/r-lib/ragg/issues", - "Imports": [ - "systemfonts (>= 1.0.3)", - "textshaping (>= 0.3.0)" + "URL": "https://magrittr.tidyverse.org, https://github.com/tidyverse/magrittr", + "BugReports": "https://github.com/tidyverse/magrittr/issues", + "Depends": [ + "R (>= 3.4.0)" ], "Suggests": [ "covr", - "graphics", - "grid", - "testthat (>= 3.0.0)" - ], - "LinkingTo": [ - "systemfonts", - "textshaping" + "knitr", + "rlang", + "rmarkdown", + "testthat" ], - "Config/Needs/website": "ggplot2, devoid, magick, bench, tidyr, ggridges, hexbin, sessioninfo, pkgdown, tidyverse/tidytemplate", + "VignetteBuilder": "knitr", + "ByteCompile": "Yes", + "Config/Needs/website": "tidyverse/tidytemplate", "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "SystemRequirements": "freetype2, libpng, libtiff, libjpeg", - "Config/testthat/edition": "3", - "Config/build/compilation-database": "true", + "RoxygenNote": "7.1.2", "NeedsCompilation": "yes", - "Author": "Thomas Lin Pedersen [cre, aut] (), Maxim Shemanarev [aut, cph] (Author of AGG), Tony Juricic [ctb, cph] (Contributor to AGG), Milan Marusinec [ctb, cph] (Contributor to AGG), Spencer Garrett [ctb] (Contributor to AGG), Posit, PBC [cph, fnd]", + "Author": "Stefan Milton Bache [aut, cph] (Original author and creator of magrittr), Hadley Wickham [aut], Lionel Henry [cre], RStudio [cph, fnd]", + "Maintainer": "Lionel Henry ", "Repository": "CRAN" }, - "rappdirs": { - "Package": "rappdirs", - "Version": "0.3.3", + "mime": { + "Package": "mime", + "Version": "0.13", "Source": "Repository", "Type": "Package", - "Title": "Application Directories: Determine Where to Save Data, Caches, and Logs", - "Authors@R": "c(person(given = \"Hadley\", family = \"Wickham\", role = c(\"trl\", \"cre\", \"cph\"), email = \"hadley@rstudio.com\"), person(given = \"RStudio\", role = \"cph\"), person(given = \"Sridhar\", family = \"Ratnakumar\", role = \"aut\"), person(given = \"Trent\", family = \"Mick\", role = \"aut\"), person(given = \"ActiveState\", role = \"cph\", comment = \"R/appdir.r, R/cache.r, R/data.r, R/log.r translated from appdirs\"), person(given = \"Eddy\", family = \"Petrisor\", role = \"ctb\"), person(given = \"Trevor\", family = \"Davis\", role = c(\"trl\", \"aut\")), person(given = \"Gabor\", family = \"Csardi\", role = \"ctb\"), person(given = \"Gregory\", family = \"Jefferis\", role = \"ctb\"))", - "Description": "An easy way to determine which directories on the users computer you should use to save data, caches and logs. A port of Python's 'Appdirs' () to R.", - "License": "MIT + file LICENSE", - "URL": "https://rappdirs.r-lib.org, https://github.com/r-lib/rappdirs", - "BugReports": "https://github.com/r-lib/rappdirs/issues", - "Depends": [ - "R (>= 3.2)" - ], - "Suggests": [ - "roxygen2", - "testthat (>= 3.0.0)", - "covr", - "withr" + "Title": "Map Filenames to MIME Types", + "Authors@R": "c( person(\"Yihui\", \"Xie\", role = c(\"aut\", \"cre\"), email = \"xie@yihui.name\", comment = c(ORCID = \"0000-0003-0645-5666\", URL = \"https://yihui.org\")), person(\"Jeffrey\", \"Horner\", role = \"ctb\"), person(\"Beilei\", \"Bian\", role = \"ctb\") )", + "Description": "Guesses the MIME type from a filename extension using the data derived from /etc/mime.types in UNIX-type systems.", + "Imports": [ + "tools" ], - "Copyright": "Original python appdirs module copyright (c) 2010 ActiveState Software Inc. R port copyright Hadley Wickham, RStudio. See file LICENSE for details.", + "License": "GPL", + "URL": "https://github.com/yihui/mime", + "BugReports": "https://github.com/yihui/mime/issues", + "RoxygenNote": "7.3.2", "Encoding": "UTF-8", - "RoxygenNote": "7.1.1", - "Config/testthat/edition": "3", "NeedsCompilation": "yes", - "Author": "Hadley Wickham [trl, cre, cph], RStudio [cph], Sridhar Ratnakumar [aut], Trent Mick [aut], ActiveState [cph] (R/appdir.r, R/cache.r, R/data.r, R/log.r translated from appdirs), Eddy Petrisor [ctb], Trevor Davis [trl, aut], Gabor Csardi [ctb], Gregory Jefferis [ctb]", - "Maintainer": "Hadley Wickham ", + "Author": "Yihui Xie [aut, cre] (, https://yihui.org), Jeffrey Horner [ctb], Beilei Bian [ctb]", + "Maintainer": "Yihui Xie ", "Repository": "CRAN" }, - "rcmdcheck": { - "Package": "rcmdcheck", - "Version": "1.4.0", + "openssl": { + "Package": "openssl", + "Version": "2.3.3", "Source": "Repository", - "Title": "Run 'R CMD check' from 'R' and Capture Results", - "Authors@R": "person(given = \"Gábor\", family = \"Csárdi\", role = c(\"cre\", \"aut\"), email = \"csardi.gabor@gmail.com\")", - "Description": "Run 'R CMD check' from 'R' and capture the results of the individual checks. Supports running checks in the background, timeouts, pretty printing and comparing check results.", + "Type": "Package", + "Title": "Toolkit for Encryption, Signatures and Certificates Based on OpenSSL", + "Authors@R": "c(person(\"Jeroen\", \"Ooms\", role = c(\"aut\", \"cre\"), email = \"jeroenooms@gmail.com\", comment = c(ORCID = \"0000-0002-4035-0289\")), person(\"Oliver\", \"Keyes\", role = \"ctb\"))", + "Description": "Bindings to OpenSSL libssl and libcrypto, plus custom SSH key parsers. Supports RSA, DSA and EC curves P-256, P-384, P-521, and curve25519. Cryptographic signatures can either be created and verified manually or via x509 certificates. AES can be used in cbc, ctr or gcm mode for symmetric encryption; RSA for asymmetric (public key) encryption or EC for Diffie Hellman. High-level envelope functions combine RSA and AES for encrypting arbitrary sized data. Other utilities include key generators, hash functions (md5, sha1, sha256, etc), base64 encoder, a secure random number generator, and 'bignum' math methods for manually performing crypto calculations on large multibyte integers.", "License": "MIT + file LICENSE", - "URL": "https://r-lib.github.io/rcmdcheck/, https://github.com/r-Lib/rcmdcheck#readme", - "BugReports": "https://github.com/r-Lib/rcmdcheck/issues", + "URL": "https://jeroen.r-universe.dev/openssl", + "BugReports": "https://github.com/jeroen/openssl/issues", + "SystemRequirements": "OpenSSL >= 1.0.2", + "VignetteBuilder": "knitr", "Imports": [ - "callr (>= 3.1.1.9000)", - "cli (>= 3.0.0)", - "curl", - "desc (>= 1.2.0)", - "digest", - "pkgbuild", - "prettyunits", - "R6", - "rprojroot", - "sessioninfo (>= 1.1.1)", - "utils", - "withr", - "xopen" + "askpass" ], "Suggests": [ - "covr", + "curl", + "testthat (>= 2.1.0)", + "digest", "knitr", - "mockery", - "processx", - "ps", "rmarkdown", - "svglite", - "testthat", - "webfakes" + "jsonlite", + "jose", + "sodium" ], + "RoxygenNote": "7.3.2", "Encoding": "UTF-8", - "RoxygenNote": "7.1.2", - "Config/testthat/edition": "3", - "NeedsCompilation": "no", - "Author": "Gábor Csárdi [cre, aut]", - "Maintainer": "Gábor Csárdi ", + "NeedsCompilation": "yes", + "Author": "Jeroen Ooms [aut, cre] (ORCID: ), Oliver Keyes [ctb]", + "Maintainer": "Jeroen Ooms ", "Repository": "CRAN" }, - "remotes": { - "Package": "remotes", - "Version": "2.5.0", + "pillar": { + "Package": "pillar", + "Version": "1.11.0", "Source": "Repository", - "Title": "R Package Installation from Remote Repositories, Including 'GitHub'", - "Authors@R": "c( person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Jim\", \"Hester\", role = \"aut\"), person(\"Hadley\", \"Wickham\", role = \"aut\"), person(\"Winston\", \"Chang\", role = \"aut\"), person(\"Martin\", \"Morgan\", role = \"aut\"), person(\"Dan\", \"Tenenbaum\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(\"Ascent Digital Services\", role = \"cph\") )", - "Description": "Download and install R packages stored in 'GitHub', 'GitLab', 'Bitbucket', 'Bioconductor', or plain 'subversion' or 'git' repositories. This package provides the 'install_*' functions in 'devtools'. Indeed most of the code was copied over from 'devtools'.", + "Title": "Coloured Formatting for Columns", + "Authors@R": "c(person(given = \"Kirill\", family = \"M\\u00fcller\", role = c(\"aut\", \"cre\"), email = \"kirill@cynkra.com\", comment = c(ORCID = \"0000-0002-1416-3412\")), person(given = \"Hadley\", family = \"Wickham\", role = \"aut\"), person(given = \"RStudio\", role = \"cph\"))", + "Description": "Provides 'pillar' and 'colonnade' generics designed for formatting columns of data using the full range of colours provided by modern terminals.", "License": "MIT + file LICENSE", - "URL": "https://remotes.r-lib.org, https://github.com/r-lib/remotes#readme", - "BugReports": "https://github.com/r-lib/remotes/issues", - "Depends": [ - "R (>= 3.0.0)" - ], + "URL": "https://pillar.r-lib.org/, https://github.com/r-lib/pillar", + "BugReports": "https://github.com/r-lib/pillar/issues", "Imports": [ - "methods", - "stats", - "tools", - "utils" + "cli (>= 2.3.0)", + "glue", + "lifecycle", + "rlang (>= 1.0.2)", + "utf8 (>= 1.1.0)", + "utils", + "vctrs (>= 0.5.0)" ], "Suggests": [ - "brew", - "callr", - "codetools", - "covr", - "curl", - "git2r (>= 0.23.0)", + "bit64", + "DBI", + "debugme", + "DiagrammeR", + "dplyr", + "formattable", + "ggplot2", "knitr", - "mockery", - "pingr", - "pkgbuild (>= 1.0.1)", + "lubridate", + "nanotime", + "nycflights13", + "palmerpenguins", "rmarkdown", - "rprojroot", - "testthat (>= 3.0.0)", - "webfakes", + "scales", + "stringi", + "survival", + "testthat (>= 3.1.1)", + "tibble", + "units (>= 0.7.2)", + "vdiffr", "withr" ], "VignetteBuilder": "knitr", + "Encoding": "UTF-8", + "RoxygenNote": "7.3.2.9000", + "Config/testthat/edition": "3", + "Config/testthat/parallel": "true", + "Config/testthat/start-first": "format_multi_fuzz, format_multi_fuzz_2, format_multi, ctl_colonnade, ctl_colonnade_1, ctl_colonnade_2", + "Config/autostyle/scope": "line_breaks", + "Config/autostyle/strict": "true", + "Config/gha/extra-packages": "units=?ignore-before-r=4.3.0", "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "RoxygenNote": "7.2.3", - "SystemRequirements": "Subversion for install_svn, git for install_git", "NeedsCompilation": "no", - "Author": "Gábor Csárdi [aut, cre], Jim Hester [aut], Hadley Wickham [aut], Winston Chang [aut], Martin Morgan [aut], Dan Tenenbaum [aut], Posit Software, PBC [cph, fnd], Ascent Digital Services [cph]", - "Maintainer": "Gábor Csárdi ", + "Author": "Kirill Müller [aut, cre] (ORCID: ), Hadley Wickham [aut], RStudio [cph]", + "Maintainer": "Kirill Müller ", "Repository": "CRAN" }, - "renv": { - "Package": "renv", - "Version": "1.1.4", + "pkgconfig": { + "Package": "pkgconfig", + "Version": "2.0.3", "Source": "Repository", - "Type": "Package", - "Title": "Project Environments", - "Authors@R": "c( person(\"Kevin\", \"Ushey\", role = c(\"aut\", \"cre\"), email = \"kevin@rstudio.com\", comment = c(ORCID = \"0000-0003-2880-7407\")), person(\"Hadley\", \"Wickham\", role = c(\"aut\"), email = \"hadley@rstudio.com\", comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "A dependency management toolkit for R. Using 'renv', you can create and manage project-local R libraries, save the state of these libraries to a 'lockfile', and later restore your library as required. Together, these tools can help make your projects more isolated, portable, and reproducible.", + "Title": "Private Configuration for 'R' Packages", + "Author": "Gábor Csárdi", + "Maintainer": "Gábor Csárdi ", + "Description": "Set configuration options on a per-package basis. Options set by a given package only apply to that package, other packages are unaffected.", "License": "MIT + file LICENSE", - "URL": "https://rstudio.github.io/renv/, https://github.com/rstudio/renv", - "BugReports": "https://github.com/rstudio/renv/issues", + "LazyData": "true", "Imports": [ "utils" ], "Suggests": [ - "BiocManager", - "cli", - "compiler", "covr", - "cpp11", - "devtools", - "gitcreds", - "jsonlite", - "jsonvalidate", - "knitr", - "miniUI", - "modules", - "packrat", - "pak", - "R6", - "remotes", - "reticulate", - "rmarkdown", - "rstudioapi", - "shiny", "testthat", - "uuid", - "waldo", - "yaml", - "webfakes" + "disposables (>= 1.0.3)" ], + "URL": "https://github.com/r-lib/pkgconfig#readme", + "BugReports": "https://github.com/r-lib/pkgconfig/issues", "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Config/testthat/parallel": "true", - "Config/testthat/start-first": "bioconductor,python,install,restore,snapshot,retrieve,remotes", "NeedsCompilation": "no", - "Author": "Kevin Ushey [aut, cre] (), Hadley Wickham [aut] (), Posit Software, PBC [cph, fnd]", - "Maintainer": "Kevin Ushey ", "Repository": "CRAN" }, - "rlang": { - "Package": "rlang", - "Version": "1.1.6", + "prettyunits": { + "Package": "prettyunits", + "Version": "1.2.0", "Source": "Repository", - "Title": "Functions for Base Types and Core R and 'Tidyverse' Features", - "Description": "A toolbox for working with base types, core R features like the condition system, and core 'Tidyverse' features like tidy evaluation.", - "Authors@R": "c( person(\"Lionel\", \"Henry\", ,\"lionel@posit.co\", c(\"aut\", \"cre\")), person(\"Hadley\", \"Wickham\", ,\"hadley@posit.co\", \"aut\"), person(given = \"mikefc\", email = \"mikefc@coolbutuseless.com\", role = \"cph\", comment = \"Hash implementation based on Mike's xxhashlite\"), person(given = \"Yann\", family = \"Collet\", role = \"cph\", comment = \"Author of the embedded xxHash library\"), person(given = \"Posit, PBC\", role = c(\"cph\", \"fnd\")) )", + "Title": "Pretty, Human Readable Formatting of Quantities", + "Authors@R": "c( person(\"Gabor\", \"Csardi\", email=\"csardi.gabor@gmail.com\", role=c(\"aut\", \"cre\")), person(\"Bill\", \"Denney\", email=\"wdenney@humanpredictions.com\", role=c(\"ctb\"), comment=c(ORCID=\"0000-0002-5759-428X\")), person(\"Christophe\", \"Regouby\", email=\"christophe.regouby@free.fr\", role=c(\"ctb\")) )", + "Description": "Pretty, human readable formatting of quantities. Time intervals: '1337000' -> '15d 11h 23m 20s'. Vague time intervals: '2674000' -> 'about a month ago'. Bytes: '1337' -> '1.34 kB'. Rounding: '99' with 3 significant digits -> '99.0' p-values: '0.00001' -> '<0.0001'. Colors: '#FF0000' -> 'red'. Quantities: '1239437' -> '1.24 M'.", "License": "MIT + file LICENSE", - "ByteCompile": "true", - "Biarch": "true", + "URL": "https://github.com/r-lib/prettyunits", + "BugReports": "https://github.com/r-lib/prettyunits/issues", "Depends": [ - "R (>= 3.5.0)" - ], - "Imports": [ - "utils" + "R(>= 2.10)" ], "Suggests": [ - "cli (>= 3.1.0)", + "codetools", "covr", - "crayon", - "desc", - "fs", - "glue", - "knitr", - "magrittr", - "methods", - "pillar", - "pkgload", - "rmarkdown", - "stats", - "testthat (>= 3.2.0)", - "tibble", - "usethis", - "vctrs (>= 0.2.3)", - "withr" - ], - "Enhances": [ - "winch" + "testthat" ], + "RoxygenNote": "7.2.3", "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "URL": "https://rlang.r-lib.org, https://github.com/r-lib/rlang", - "BugReports": "https://github.com/r-lib/rlang/issues", - "Config/build/compilation-database": "true", - "Config/testthat/edition": "3", - "Config/Needs/website": "dplyr, tidyverse/tidytemplate", - "NeedsCompilation": "yes", - "Author": "Lionel Henry [aut, cre], Hadley Wickham [aut], mikefc [cph] (Hash implementation based on Mike's xxhashlite), Yann Collet [cph] (Author of the embedded xxHash library), Posit, PBC [cph, fnd]", - "Maintainer": "Lionel Henry ", + "NeedsCompilation": "no", + "Author": "Gabor Csardi [aut, cre], Bill Denney [ctb] (), Christophe Regouby [ctb]", + "Maintainer": "Gabor Csardi ", "Repository": "CRAN" }, - "rmarkdown": { - "Package": "rmarkdown", - "Version": "2.29", + "progress": { + "Package": "progress", + "Version": "1.2.3", "Source": "Repository", - "Type": "Package", - "Title": "Dynamic Documents for R", - "Authors@R": "c( person(\"JJ\", \"Allaire\", , \"jj@posit.co\", role = \"aut\"), person(\"Yihui\", \"Xie\", , \"xie@yihui.name\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0003-0645-5666\")), person(\"Christophe\", \"Dervieux\", , \"cderv@posit.co\", role = \"aut\", comment = c(ORCID = \"0000-0003-4474-2498\")), person(\"Jonathan\", \"McPherson\", , \"jonathan@posit.co\", role = \"aut\"), person(\"Javier\", \"Luraschi\", role = \"aut\"), person(\"Kevin\", \"Ushey\", , \"kevin@posit.co\", role = \"aut\"), person(\"Aron\", \"Atkins\", , \"aron@posit.co\", role = \"aut\"), person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = \"aut\"), person(\"Joe\", \"Cheng\", , \"joe@posit.co\", role = \"aut\"), person(\"Winston\", \"Chang\", , \"winston@posit.co\", role = \"aut\"), person(\"Richard\", \"Iannone\", , \"rich@posit.co\", role = \"aut\", comment = c(ORCID = \"0000-0003-3925-190X\")), person(\"Andrew\", \"Dunning\", role = \"ctb\", comment = c(ORCID = \"0000-0003-0464-5036\")), person(\"Atsushi\", \"Yasumoto\", role = c(\"ctb\", \"cph\"), comment = c(ORCID = \"0000-0002-8335-495X\", cph = \"Number sections Lua filter\")), person(\"Barret\", \"Schloerke\", role = \"ctb\"), person(\"Carson\", \"Sievert\", role = \"ctb\", comment = c(ORCID = \"0000-0002-4958-2844\")), person(\"Devon\", \"Ryan\", , \"dpryan79@gmail.com\", role = \"ctb\", comment = c(ORCID = \"0000-0002-8549-0971\")), person(\"Frederik\", \"Aust\", , \"frederik.aust@uni-koeln.de\", role = \"ctb\", comment = c(ORCID = \"0000-0003-4900-788X\")), person(\"Jeff\", \"Allen\", , \"jeff@posit.co\", role = \"ctb\"), person(\"JooYoung\", \"Seo\", role = \"ctb\", comment = c(ORCID = \"0000-0002-4064-6012\")), person(\"Malcolm\", \"Barrett\", role = \"ctb\"), person(\"Rob\", \"Hyndman\", , \"Rob.Hyndman@monash.edu\", role = \"ctb\"), person(\"Romain\", \"Lesur\", role = \"ctb\"), person(\"Roy\", \"Storey\", role = \"ctb\"), person(\"Ruben\", \"Arslan\", , \"ruben.arslan@uni-goettingen.de\", role = \"ctb\"), person(\"Sergio\", \"Oller\", role = \"ctb\"), person(given = \"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(, \"jQuery UI contributors\", role = c(\"ctb\", \"cph\"), comment = \"jQuery UI library; authors listed in inst/rmd/h/jqueryui/AUTHORS.txt\"), person(\"Mark\", \"Otto\", role = \"ctb\", comment = \"Bootstrap library\"), person(\"Jacob\", \"Thornton\", role = \"ctb\", comment = \"Bootstrap library\"), person(, \"Bootstrap contributors\", role = \"ctb\", comment = \"Bootstrap library\"), person(, \"Twitter, Inc\", role = \"cph\", comment = \"Bootstrap library\"), person(\"Alexander\", \"Farkas\", role = c(\"ctb\", \"cph\"), comment = \"html5shiv library\"), person(\"Scott\", \"Jehl\", role = c(\"ctb\", \"cph\"), comment = \"Respond.js library\"), person(\"Ivan\", \"Sagalaev\", role = c(\"ctb\", \"cph\"), comment = \"highlight.js library\"), person(\"Greg\", \"Franko\", role = c(\"ctb\", \"cph\"), comment = \"tocify library\"), person(\"John\", \"MacFarlane\", role = c(\"ctb\", \"cph\"), comment = \"Pandoc templates\"), person(, \"Google, Inc.\", role = c(\"ctb\", \"cph\"), comment = \"ioslides library\"), person(\"Dave\", \"Raggett\", role = \"ctb\", comment = \"slidy library\"), person(, \"W3C\", role = \"cph\", comment = \"slidy library\"), person(\"Dave\", \"Gandy\", role = c(\"ctb\", \"cph\"), comment = \"Font-Awesome\"), person(\"Ben\", \"Sperry\", role = \"ctb\", comment = \"Ionicons\"), person(, \"Drifty\", role = \"cph\", comment = \"Ionicons\"), person(\"Aidan\", \"Lister\", role = c(\"ctb\", \"cph\"), comment = \"jQuery StickyTabs\"), person(\"Benct Philip\", \"Jonsson\", role = c(\"ctb\", \"cph\"), comment = \"pagebreak Lua filter\"), person(\"Albert\", \"Krewinkel\", role = c(\"ctb\", \"cph\"), comment = \"pagebreak Lua filter\") )", - "Description": "Convert R Markdown documents into a variety of formats.", - "License": "GPL-3", - "URL": "https://github.com/rstudio/rmarkdown, https://pkgs.rstudio.com/rmarkdown/", - "BugReports": "https://github.com/rstudio/rmarkdown/issues", + "Title": "Terminal Progress Bars", + "Authors@R": "c( person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Rich\", \"FitzJohn\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "Configurable Progress bars, they may include percentage, elapsed time, and/or the estimated completion time. They work in terminals, in 'Emacs' 'ESS', 'RStudio', 'Windows' 'Rgui' and the 'macOS' 'R.app'. The package also provides a 'C++' 'API', that works with or without 'Rcpp'.", + "License": "MIT + file LICENSE", + "URL": "https://github.com/r-lib/progress#readme, http://r-lib.github.io/progress/", + "BugReports": "https://github.com/r-lib/progress/issues", "Depends": [ - "R (>= 3.0)" + "R (>= 3.6)" ], "Imports": [ - "bslib (>= 0.2.5.1)", - "evaluate (>= 0.13)", - "fontawesome (>= 0.5.0)", - "htmltools (>= 0.5.1)", - "jquerylib", - "jsonlite", - "knitr (>= 1.43)", - "methods", - "tinytex (>= 0.31)", - "tools", - "utils", - "xfun (>= 0.36)", - "yaml (>= 2.1.19)" + "crayon", + "hms", + "prettyunits", + "R6" ], "Suggests": [ - "digest", - "dygraphs", - "fs", - "rsconnect", - "downlit (>= 0.4.0)", - "katex (>= 1.4.0)", - "sass (>= 0.4.0)", - "shiny (>= 1.6.0)", - "testthat (>= 3.0.3)", - "tibble", - "vctrs", - "cleanrmd", - "withr (>= 2.4.2)", - "xml2" + "Rcpp", + "testthat (>= 3.0.0)", + "withr" ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "rstudio/quillt, pkgdown", + "Config/Needs/website": "tidyverse/tidytemplate", "Config/testthat/edition": "3", "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "SystemRequirements": "pandoc (>= 1.14) - http://pandoc.org", + "RoxygenNote": "7.2.3", "NeedsCompilation": "no", - "Author": "JJ Allaire [aut], Yihui Xie [aut, cre] (), Christophe Dervieux [aut] (), Jonathan McPherson [aut], Javier Luraschi [aut], Kevin Ushey [aut], Aron Atkins [aut], Hadley Wickham [aut], Joe Cheng [aut], Winston Chang [aut], Richard Iannone [aut] (), Andrew Dunning [ctb] (), Atsushi Yasumoto [ctb, cph] (, Number sections Lua filter), Barret Schloerke [ctb], Carson Sievert [ctb] (), Devon Ryan [ctb] (), Frederik Aust [ctb] (), Jeff Allen [ctb], JooYoung Seo [ctb] (), Malcolm Barrett [ctb], Rob Hyndman [ctb], Romain Lesur [ctb], Roy Storey [ctb], Ruben Arslan [ctb], Sergio Oller [ctb], Posit Software, PBC [cph, fnd], jQuery UI contributors [ctb, cph] (jQuery UI library; authors listed in inst/rmd/h/jqueryui/AUTHORS.txt), Mark Otto [ctb] (Bootstrap library), Jacob Thornton [ctb] (Bootstrap library), Bootstrap contributors [ctb] (Bootstrap library), Twitter, Inc [cph] (Bootstrap library), Alexander Farkas [ctb, cph] (html5shiv library), Scott Jehl [ctb, cph] (Respond.js library), Ivan Sagalaev [ctb, cph] (highlight.js library), Greg Franko [ctb, cph] (tocify library), John MacFarlane [ctb, cph] (Pandoc templates), Google, Inc. [ctb, cph] (ioslides library), Dave Raggett [ctb] (slidy library), W3C [cph] (slidy library), Dave Gandy [ctb, cph] (Font-Awesome), Ben Sperry [ctb] (Ionicons), Drifty [cph] (Ionicons), Aidan Lister [ctb, cph] (jQuery StickyTabs), Benct Philip Jonsson [ctb, cph] (pagebreak Lua filter), Albert Krewinkel [ctb, cph] (pagebreak Lua filter)", - "Maintainer": "Yihui Xie ", + "Author": "Gábor Csárdi [aut, cre], Rich FitzJohn [aut], Posit Software, PBC [cph, fnd]", + "Maintainer": "Gábor Csárdi ", "Repository": "CRAN" }, - "roxygen2": { - "Package": "roxygen2", - "Version": "7.3.2", + "purrr": { + "Package": "purrr", + "Version": "1.1.0", "Source": "Repository", - "Title": "In-Line Documentation for R", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\", \"cph\"), comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Peter\", \"Danenberg\", , \"pcd@roxygen.org\", role = c(\"aut\", \"cph\")), person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = \"aut\"), person(\"Manuel\", \"Eugster\", role = c(\"aut\", \"cph\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "Generate your Rd documentation, 'NAMESPACE' file, and collation field using specially formatted comments. Writing documentation in-line with code makes it easier to keep your documentation up-to-date as your requirements change. 'roxygen2' is inspired by the 'Doxygen' system for C++.", + "Title": "Functional Programming Tools", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Lionel\", \"Henry\", , \"lionel@posit.co\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\"), comment = c(ROR = \"https://ror.org/03wc8by49\")) )", + "Description": "A complete and consistent functional programming toolkit for R.", "License": "MIT + file LICENSE", - "URL": "https://roxygen2.r-lib.org/, https://github.com/r-lib/roxygen2", - "BugReports": "https://github.com/r-lib/roxygen2/issues", + "URL": "https://purrr.tidyverse.org/, https://github.com/tidyverse/purrr", + "BugReports": "https://github.com/tidyverse/purrr/issues", "Depends": [ - "R (>= 3.6)" + "R (>= 4.1)" ], "Imports": [ - "brew", - "cli (>= 3.3.0)", - "commonmark", - "desc (>= 1.2.0)", - "knitr", - "methods", - "pkgload (>= 1.0.2)", - "purrr (>= 1.0.0)", - "R6 (>= 2.1.2)", - "rlang (>= 1.0.6)", - "stringi", - "stringr (>= 1.0.0)", - "utils", - "withr", - "xml2" + "cli (>= 3.6.1)", + "lifecycle (>= 1.0.3)", + "magrittr (>= 1.5.0)", + "rlang (>= 1.1.1)", + "vctrs (>= 0.6.3)" ], "Suggests": [ + "carrier (>= 0.2.0)", "covr", - "R.methodsS3", - "R.oo", - "rmarkdown (>= 2.16)", - "testthat (>= 3.1.2)", - "yaml" + "dplyr (>= 0.7.8)", + "httr", + "knitr", + "lubridate", + "mirai (>= 2.4.0)", + "rmarkdown", + "testthat (>= 3.0.0)", + "tibble", + "tidyselect" ], "LinkingTo": [ - "cpp11" + "cli" ], "VignetteBuilder": "knitr", - "Config/Needs/development": "testthat", - "Config/Needs/website": "tidyverse/tidytemplate", + "Biarch": "true", + "Config/build/compilation-database": "true", + "Config/Needs/website": "tidyverse/tidytemplate, tidyr", "Config/testthat/edition": "3", "Config/testthat/parallel": "TRUE", "Encoding": "UTF-8", - "Language": "en-GB", - "RoxygenNote": "7.3.1.9000", + "RoxygenNote": "7.3.2", "NeedsCompilation": "yes", - "Author": "Hadley Wickham [aut, cre, cph] (), Peter Danenberg [aut, cph], Gábor Csárdi [aut], Manuel Eugster [aut, cph], Posit Software, PBC [cph, fnd]", + "Author": "Hadley Wickham [aut, cre] (ORCID: ), Lionel Henry [aut], Posit Software, PBC [cph, fnd] (ROR: )", "Maintainer": "Hadley Wickham ", "Repository": "CRAN" }, - "rprojroot": { - "Package": "rprojroot", - "Version": "2.1.0", + "rappdirs": { + "Package": "rappdirs", + "Version": "0.3.3", "Source": "Repository", - "Title": "Finding Files in Project Subdirectories", - "Authors@R": "person(given = \"Kirill\", family = \"M\\u00fcller\", role = c(\"aut\", \"cre\"), email = \"kirill@cynkra.com\", comment = c(ORCID = \"0000-0002-1416-3412\"))", - "Description": "Robust, reliable and flexible paths to files below a project root. The 'root' of a project is defined as a directory that matches a certain criterion, e.g., it contains a certain regular file.", + "Type": "Package", + "Title": "Application Directories: Determine Where to Save Data, Caches, and Logs", + "Authors@R": "c(person(given = \"Hadley\", family = \"Wickham\", role = c(\"trl\", \"cre\", \"cph\"), email = \"hadley@rstudio.com\"), person(given = \"RStudio\", role = \"cph\"), person(given = \"Sridhar\", family = \"Ratnakumar\", role = \"aut\"), person(given = \"Trent\", family = \"Mick\", role = \"aut\"), person(given = \"ActiveState\", role = \"cph\", comment = \"R/appdir.r, R/cache.r, R/data.r, R/log.r translated from appdirs\"), person(given = \"Eddy\", family = \"Petrisor\", role = \"ctb\"), person(given = \"Trevor\", family = \"Davis\", role = c(\"trl\", \"aut\")), person(given = \"Gabor\", family = \"Csardi\", role = \"ctb\"), person(given = \"Gregory\", family = \"Jefferis\", role = \"ctb\"))", + "Description": "An easy way to determine which directories on the users computer you should use to save data, caches and logs. A port of Python's 'Appdirs' () to R.", "License": "MIT + file LICENSE", - "URL": "https://rprojroot.r-lib.org/, https://github.com/r-lib/rprojroot", - "BugReports": "https://github.com/r-lib/rprojroot/issues", + "URL": "https://rappdirs.r-lib.org, https://github.com/r-lib/rappdirs", + "BugReports": "https://github.com/r-lib/rappdirs/issues", "Depends": [ - "R (>= 3.0.0)" + "R (>= 3.2)" ], "Suggests": [ + "roxygen2", + "testthat (>= 3.0.0)", "covr", - "knitr", - "lifecycle", + "withr" + ], + "Copyright": "Original python appdirs module copyright (c) 2010 ActiveState Software Inc. R port copyright Hadley Wickham, RStudio. See file LICENSE for details.", + "Encoding": "UTF-8", + "RoxygenNote": "7.1.1", + "Config/testthat/edition": "3", + "NeedsCompilation": "yes", + "Author": "Hadley Wickham [trl, cre, cph], RStudio [cph], Sridhar Ratnakumar [aut], Trent Mick [aut], ActiveState [cph] (R/appdir.r, R/cache.r, R/data.r, R/log.r translated from appdirs), Eddy Petrisor [ctb], Trevor Davis [trl, aut], Gabor Csardi [ctb], Gregory Jefferis [ctb]", + "Maintainer": "Hadley Wickham ", + "Repository": "CRAN" + }, + "readr": { + "Package": "readr", + "Version": "2.1.6", + "Source": "Repository", + "Title": "Read Rectangular Text Data", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = \"aut\"), person(\"Jim\", \"Hester\", role = \"aut\"), person(\"Romain\", \"Francois\", role = \"ctb\"), person(\"Jennifer\", \"Bryan\", , \"jenny@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0002-6983-2759\")), person(\"Shelby\", \"Bearrows\", role = \"ctb\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(\"https://github.com/mandreyel/\", role = \"cph\", comment = \"mio library\"), person(\"Jukka\", \"Jylänki\", role = c(\"ctb\", \"cph\"), comment = \"grisu3 implementation\"), person(\"Mikkel\", \"Jørgensen\", role = c(\"ctb\", \"cph\"), comment = \"grisu3 implementation\") )", + "Description": "The goal of 'readr' is to provide a fast and friendly way to read rectangular data (like 'csv', 'tsv', and 'fwf'). It is designed to flexibly parse many types of data found in the wild, while still cleanly failing when data unexpectedly changes.", + "License": "MIT + file LICENSE", + "URL": "https://readr.tidyverse.org, https://github.com/tidyverse/readr", + "BugReports": "https://github.com/tidyverse/readr/issues", + "Depends": [ + "R (>= 3.6)" + ], + "Imports": [ + "cli (>= 3.2.0)", + "clipr", + "crayon", + "hms (>= 0.4.1)", + "lifecycle (>= 0.2.0)", + "methods", + "R6", "rlang", + "tibble", + "utils", + "vroom (>= 1.6.0)" + ], + "Suggests": [ + "covr", + "curl", + "datasets", + "knitr", "rmarkdown", + "spelling", + "stringi", "testthat (>= 3.2.0)", - "withr" + "tzdb (>= 0.1.1)", + "waldo", + "withr", + "xml2" + ], + "LinkingTo": [ + "cpp11", + "tzdb (>= 0.1.1)" ], "VignetteBuilder": "knitr", + "Config/Needs/website": "tidyverse, tidyverse/tidytemplate", "Config/testthat/edition": "3", + "Config/testthat/parallel": "false", "Encoding": "UTF-8", - "RoxygenNote": "7.3.2.9000", - "Config/autostyle/scope": "line_breaks", - "Config/autostyle/strict": "true", - "Config/Needs/website": "tidyverse/tidytemplate", - "NeedsCompilation": "no", - "Author": "Kirill Müller [aut, cre] (ORCID: )", - "Maintainer": "Kirill Müller ", + "Language": "en-US", + "RoxygenNote": "7.3.3", + "NeedsCompilation": "yes", + "Author": "Hadley Wickham [aut], Jim Hester [aut], Romain Francois [ctb], Jennifer Bryan [aut, cre] (ORCID: ), Shelby Bearrows [ctb], Posit Software, PBC [cph, fnd], https://github.com/mandreyel/ [cph] (mio library), Jukka Jylänki [ctb, cph] (grisu3 implementation), Mikkel Jørgensen [ctb, cph] (grisu3 implementation)", + "Maintainer": "Jennifer Bryan ", "Repository": "CRAN" }, - "rstudioapi": { - "Package": "rstudioapi", - "Version": "0.17.1", + "renv": { + "Package": "renv", + "Version": "1.1.4", "Source": "Repository", - "Title": "Safely Access the RStudio API", - "Description": "Access the RStudio API (if available) and provide informative error messages when it's not.", - "Authors@R": "c( person(\"Kevin\", \"Ushey\", role = c(\"aut\", \"cre\"), email = \"kevin@rstudio.com\"), person(\"JJ\", \"Allaire\", role = c(\"aut\"), email = \"jj@posit.co\"), person(\"Hadley\", \"Wickham\", role = c(\"aut\"), email = \"hadley@posit.co\"), person(\"Gary\", \"Ritchie\", role = c(\"aut\"), email = \"gary@posit.co\"), person(family = \"RStudio\", role = \"cph\") )", - "Maintainer": "Kevin Ushey ", + "Type": "Package", + "Title": "Project Environments", + "Authors@R": "c( person(\"Kevin\", \"Ushey\", role = c(\"aut\", \"cre\"), email = \"kevin@rstudio.com\", comment = c(ORCID = \"0000-0003-2880-7407\")), person(\"Hadley\", \"Wickham\", role = c(\"aut\"), email = \"hadley@rstudio.com\", comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "A dependency management toolkit for R. Using 'renv', you can create and manage project-local R libraries, save the state of these libraries to a 'lockfile', and later restore your library as required. Together, these tools can help make your projects more isolated, portable, and reproducible.", "License": "MIT + file LICENSE", - "URL": "https://rstudio.github.io/rstudioapi/, https://github.com/rstudio/rstudioapi", - "BugReports": "https://github.com/rstudio/rstudioapi/issues", - "RoxygenNote": "7.3.2", + "URL": "https://rstudio.github.io/renv/, https://github.com/rstudio/renv", + "BugReports": "https://github.com/rstudio/renv/issues", + "Imports": [ + "utils" + ], "Suggests": [ - "testthat", + "BiocManager", + "cli", + "compiler", + "covr", + "cpp11", + "devtools", + "gitcreds", + "jsonlite", + "jsonvalidate", "knitr", + "miniUI", + "modules", + "packrat", + "pak", + "R6", + "remotes", + "reticulate", "rmarkdown", - "clipr", - "covr" + "rstudioapi", + "shiny", + "testthat", + "uuid", + "waldo", + "yaml", + "webfakes" ], - "VignetteBuilder": "knitr", "Encoding": "UTF-8", + "RoxygenNote": "7.3.2", + "VignetteBuilder": "knitr", + "Config/Needs/website": "tidyverse/tidytemplate", + "Config/testthat/edition": "3", + "Config/testthat/parallel": "true", + "Config/testthat/start-first": "bioconductor,python,install,restore,snapshot,retrieve,remotes", "NeedsCompilation": "no", - "Author": "Kevin Ushey [aut, cre], JJ Allaire [aut], Hadley Wickham [aut], Gary Ritchie [aut], RStudio [cph]", + "Author": "Kevin Ushey [aut, cre] (), Hadley Wickham [aut] (), Posit Software, PBC [cph, fnd]", + "Maintainer": "Kevin Ushey ", "Repository": "CRAN" }, - "rversions": { - "Package": "rversions", - "Version": "2.1.2", + "rlang": { + "Package": "rlang", + "Version": "1.1.6", "Source": "Repository", - "Title": "Query 'R' Versions, Including 'r-release' and 'r-oldrel'", - "Authors@R": "c(person(given = \"Gábor\", family = \"Csárdi\", role = c(\"aut\", \"cre\"), email = \"csardi.gabor@gmail.com\"), person(given = \"Jeroen\", family = \"Ooms\", role = \"ctb\", email = \"jeroen.ooms@stat.ucla.edu\"), person(given = \"R Consortium\", role = \"fnd\"))", - "Description": "Query the main 'R' 'SVN' repository to find the versions 'r-release' and 'r-oldrel' refer to, and also all previous 'R' versions and their release dates.", + "Title": "Functions for Base Types and Core R and 'Tidyverse' Features", + "Description": "A toolbox for working with base types, core R features like the condition system, and core 'Tidyverse' features like tidy evaluation.", + "Authors@R": "c( person(\"Lionel\", \"Henry\", ,\"lionel@posit.co\", c(\"aut\", \"cre\")), person(\"Hadley\", \"Wickham\", ,\"hadley@posit.co\", \"aut\"), person(given = \"mikefc\", email = \"mikefc@coolbutuseless.com\", role = \"cph\", comment = \"Hash implementation based on Mike's xxhashlite\"), person(given = \"Yann\", family = \"Collet\", role = \"cph\", comment = \"Author of the embedded xxHash library\"), person(given = \"Posit, PBC\", role = c(\"cph\", \"fnd\")) )", "License": "MIT + file LICENSE", - "URL": "https://github.com/r-hub/rversions, https://r-hub.github.io/rversions/", - "BugReports": "https://github.com/r-hub/rversions/issues", + "ByteCompile": "true", + "Biarch": "true", + "Depends": [ + "R (>= 3.5.0)" + ], "Imports": [ - "curl", - "utils", - "xml2 (>= 1.0.0)" + "utils" ], "Suggests": [ + "cli (>= 3.1.0)", "covr", - "mockery", - "testthat" + "crayon", + "desc", + "fs", + "glue", + "knitr", + "magrittr", + "methods", + "pillar", + "pkgload", + "rmarkdown", + "stats", + "testthat (>= 3.2.0)", + "tibble", + "usethis", + "vctrs (>= 0.2.3)", + "withr" + ], + "Enhances": [ + "winch" ], "Encoding": "UTF-8", - "RoxygenNote": "7.2.1.9000", - "NeedsCompilation": "no", - "Author": "Gábor Csárdi [aut, cre], Jeroen Ooms [ctb], R Consortium [fnd]", - "Maintainer": "Gábor Csárdi ", + "RoxygenNote": "7.3.2", + "URL": "https://rlang.r-lib.org, https://github.com/r-lib/rlang", + "BugReports": "https://github.com/r-lib/rlang/issues", + "Config/build/compilation-database": "true", + "Config/testthat/edition": "3", + "Config/Needs/website": "dplyr, tidyverse/tidytemplate", + "NeedsCompilation": "yes", + "Author": "Lionel Henry [aut, cre], Hadley Wickham [aut], mikefc [cph] (Hash implementation based on Mike's xxhashlite), Yann Collet [cph] (Author of the embedded xxHash library), Posit, PBC [cph, fnd]", + "Maintainer": "Lionel Henry ", "Repository": "CRAN" }, "rvest": { @@ -2937,42 +1259,6 @@ "Maintainer": "Hadley Wickham ", "Repository": "CRAN" }, - "sass": { - "Package": "sass", - "Version": "0.4.10", - "Source": "Repository", - "Type": "Package", - "Title": "Syntactically Awesome Style Sheets ('Sass')", - "Description": "An 'SCSS' compiler, powered by the 'LibSass' library. With this, R developers can use variables, inheritance, and functions to generate dynamic style sheets. The package uses the 'Sass CSS' extension language, which is stable, powerful, and CSS compatible.", - "Authors@R": "c( person(\"Joe\", \"Cheng\", , \"joe@rstudio.com\", \"aut\"), person(\"Timothy\", \"Mastny\", , \"tim.mastny@gmail.com\", \"aut\"), person(\"Richard\", \"Iannone\", , \"rich@rstudio.com\", \"aut\", comment = c(ORCID = \"0000-0003-3925-190X\")), person(\"Barret\", \"Schloerke\", , \"barret@rstudio.com\", \"aut\", comment = c(ORCID = \"0000-0001-9986-114X\")), person(\"Carson\", \"Sievert\", , \"carson@rstudio.com\", c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0002-4958-2844\")), person(\"Christophe\", \"Dervieux\", , \"cderv@rstudio.com\", c(\"ctb\"), comment = c(ORCID = \"0000-0003-4474-2498\")), person(family = \"RStudio\", role = c(\"cph\", \"fnd\")), person(family = \"Sass Open Source Foundation\", role = c(\"ctb\", \"cph\"), comment = \"LibSass library\"), person(\"Greter\", \"Marcel\", role = c(\"ctb\", \"cph\"), comment = \"LibSass library\"), person(\"Mifsud\", \"Michael\", role = c(\"ctb\", \"cph\"), comment = \"LibSass library\"), person(\"Hampton\", \"Catlin\", role = c(\"ctb\", \"cph\"), comment = \"LibSass library\"), person(\"Natalie\", \"Weizenbaum\", role = c(\"ctb\", \"cph\"), comment = \"LibSass library\"), person(\"Chris\", \"Eppstein\", role = c(\"ctb\", \"cph\"), comment = \"LibSass library\"), person(\"Adams\", \"Joseph\", role = c(\"ctb\", \"cph\"), comment = \"json.cpp\"), person(\"Trifunovic\", \"Nemanja\", role = c(\"ctb\", \"cph\"), comment = \"utf8.h\") )", - "License": "MIT + file LICENSE", - "URL": "https://rstudio.github.io/sass/, https://github.com/rstudio/sass", - "BugReports": "https://github.com/rstudio/sass/issues", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "SystemRequirements": "GNU make", - "Imports": [ - "fs (>= 1.2.4)", - "rlang (>= 0.4.10)", - "htmltools (>= 0.5.1)", - "R6", - "rappdirs" - ], - "Suggests": [ - "testthat", - "knitr", - "rmarkdown", - "withr", - "shiny", - "curl" - ], - "VignetteBuilder": "knitr", - "Config/testthat/edition": "3", - "NeedsCompilation": "yes", - "Author": "Joe Cheng [aut], Timothy Mastny [aut], Richard Iannone [aut] (), Barret Schloerke [aut] (), Carson Sievert [aut, cre] (), Christophe Dervieux [ctb] (), RStudio [cph, fnd], Sass Open Source Foundation [ctb, cph] (LibSass library), Greter Marcel [ctb, cph] (LibSass library), Mifsud Michael [ctb, cph] (LibSass library), Hampton Catlin [ctb, cph] (LibSass library), Natalie Weizenbaum [ctb, cph] (LibSass library), Chris Eppstein [ctb, cph] (LibSass library), Adams Joseph [ctb, cph] (json.cpp), Trifunovic Nemanja [ctb, cph] (utf8.h)", - "Maintainer": "Carson Sievert ", - "Repository": "CRAN" - }, "selectr": { "Package": "selectr", "Version": "0.4-2", @@ -3003,113 +1289,6 @@ "Maintainer": "Simon Potter ", "Repository": "CRAN" }, - "sessioninfo": { - "Package": "sessioninfo", - "Version": "1.2.3", - "Source": "Repository", - "Title": "R Session Information", - "Authors@R": "c( person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = \"cre\"), person(\"Hadley\", \"Wickham\", role = \"aut\"), person(\"Winston\", \"Chang\", role = \"aut\"), person(\"Robert\", \"Flight\", role = \"aut\"), person(\"Kirill\", \"Müller\", role = \"aut\"), person(\"Jim\", \"Hester\", role = \"aut\"), person(\"R Core team\", role = \"ctb\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Maintainer": "Gábor Csárdi ", - "Description": "Query and print information about the current R session. It is similar to 'utils::sessionInfo()', but includes more information about packages, and where they were installed from.", - "License": "GPL-2", - "URL": "https://github.com/r-lib/sessioninfo#readme, https://sessioninfo.r-lib.org", - "BugReports": "https://github.com/r-lib/sessioninfo/issues", - "Depends": [ - "R (>= 3.4)" - ], - "Imports": [ - "cli (>= 3.1.0)", - "tools", - "utils" - ], - "Suggests": [ - "callr", - "covr", - "gh", - "reticulate", - "rmarkdown", - "testthat (>= 3.2.0)", - "withr" - ], - "Config/Needs/website": "pkgdown, tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Config/testthat/parallel": "true", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "NeedsCompilation": "no", - "Author": "Gábor Csárdi [cre], Hadley Wickham [aut], Winston Chang [aut], Robert Flight [aut], Kirill Müller [aut], Jim Hester [aut], R Core team [ctb], Posit Software, PBC [cph, fnd]", - "Repository": "CRAN" - }, - "shiny": { - "Package": "shiny", - "Version": "1.11.1", - "Source": "Repository", - "Type": "Package", - "Title": "Web Application Framework for R", - "Authors@R": "c( person(\"Winston\", \"Chang\", role = c(\"aut\", \"cre\"), email = \"winston@posit.co\", comment = c(ORCID = \"0000-0002-1576-2126\")), person(\"Joe\", \"Cheng\", role = \"aut\", email = \"joe@posit.co\"), person(\"JJ\", \"Allaire\", role = \"aut\", email = \"jj@posit.co\"), person(\"Carson\", \"Sievert\", role = \"aut\", email = \"carson@posit.co\", comment = c(ORCID = \"0000-0002-4958-2844\")), person(\"Barret\", \"Schloerke\", role = \"aut\", email = \"barret@posit.co\", comment = c(ORCID = \"0000-0001-9986-114X\")), person(\"Yihui\", \"Xie\", role = \"aut\", email = \"yihui@posit.co\"), person(\"Jeff\", \"Allen\", role = \"aut\"), person(\"Jonathan\", \"McPherson\", role = \"aut\", email = \"jonathan@posit.co\"), person(\"Alan\", \"Dipert\", role = \"aut\"), person(\"Barbara\", \"Borges\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(family = \"jQuery Foundation\", role = \"cph\", comment = \"jQuery library and jQuery UI library\"), person(family = \"jQuery contributors\", role = c(\"ctb\", \"cph\"), comment = \"jQuery library; authors listed in inst/www/shared/jquery-AUTHORS.txt\"), person(family = \"jQuery UI contributors\", role = c(\"ctb\", \"cph\"), comment = \"jQuery UI library; authors listed in inst/www/shared/jqueryui/AUTHORS.txt\"), person(\"Mark\", \"Otto\", role = \"ctb\", comment = \"Bootstrap library\"), person(\"Jacob\", \"Thornton\", role = \"ctb\", comment = \"Bootstrap library\"), person(family = \"Bootstrap contributors\", role = \"ctb\", comment = \"Bootstrap library\"), person(family = \"Twitter, Inc\", role = \"cph\", comment = \"Bootstrap library\"), person(\"Prem Nawaz\", \"Khan\", role = \"ctb\", comment = \"Bootstrap accessibility plugin\"), person(\"Victor\", \"Tsaran\", role = \"ctb\", comment = \"Bootstrap accessibility plugin\"), person(\"Dennis\", \"Lembree\", role = \"ctb\", comment = \"Bootstrap accessibility plugin\"), person(\"Srinivasu\", \"Chakravarthula\", role = \"ctb\", comment = \"Bootstrap accessibility plugin\"), person(\"Cathy\", \"O'Connor\", role = \"ctb\", comment = \"Bootstrap accessibility plugin\"), person(family = \"PayPal, Inc\", role = \"cph\", comment = \"Bootstrap accessibility plugin\"), person(\"Stefan\", \"Petre\", role = c(\"ctb\", \"cph\"), comment = \"Bootstrap-datepicker library\"), person(\"Andrew\", \"Rowls\", role = c(\"ctb\", \"cph\"), comment = \"Bootstrap-datepicker library\"), person(\"Brian\", \"Reavis\", role = c(\"ctb\", \"cph\"), comment = \"selectize.js library\"), person(\"Salmen\", \"Bejaoui\", role = c(\"ctb\", \"cph\"), comment = \"selectize-plugin-a11y library\"), person(\"Denis\", \"Ineshin\", role = c(\"ctb\", \"cph\"), comment = \"ion.rangeSlider library\"), person(\"Sami\", \"Samhuri\", role = c(\"ctb\", \"cph\"), comment = \"Javascript strftime library\"), person(family = \"SpryMedia Limited\", role = c(\"ctb\", \"cph\"), comment = \"DataTables library\"), person(\"John\", \"Fraser\", role = c(\"ctb\", \"cph\"), comment = \"showdown.js library\"), person(\"John\", \"Gruber\", role = c(\"ctb\", \"cph\"), comment = \"showdown.js library\"), person(\"Ivan\", \"Sagalaev\", role = c(\"ctb\", \"cph\"), comment = \"highlight.js library\"), person(given = \"R Core Team\", role = c(\"ctb\", \"cph\"), comment = \"tar implementation from R\") )", - "Description": "Makes it incredibly easy to build interactive web applications with R. Automatic \"reactive\" binding between inputs and outputs and extensive prebuilt widgets make it possible to build beautiful, responsive, and powerful applications with minimal effort.", - "License": "GPL-3 | file LICENSE", - "Depends": [ - "R (>= 3.0.2)", - "methods" - ], - "Imports": [ - "utils", - "grDevices", - "httpuv (>= 1.5.2)", - "mime (>= 0.3)", - "jsonlite (>= 0.9.16)", - "xtable", - "fontawesome (>= 0.4.0)", - "htmltools (>= 0.5.4)", - "R6 (>= 2.0)", - "sourcetools", - "later (>= 1.0.0)", - "promises (>= 1.3.2)", - "tools", - "cli", - "rlang (>= 0.4.10)", - "fastmap (>= 1.1.1)", - "withr", - "commonmark (>= 1.7)", - "glue (>= 1.3.2)", - "bslib (>= 0.6.0)", - "cachem (>= 1.1.0)", - "lifecycle (>= 0.2.0)" - ], - "Suggests": [ - "coro (>= 1.1.0)", - "datasets", - "DT", - "Cairo (>= 1.5-5)", - "testthat (>= 3.2.1)", - "knitr (>= 1.6)", - "markdown", - "rmarkdown", - "ggplot2", - "reactlog (>= 1.0.0)", - "magrittr", - "yaml", - "mirai", - "future", - "dygraphs", - "ragg", - "showtext", - "sass", - "watcher" - ], - "URL": "https://shiny.posit.co/, https://github.com/rstudio/shiny", - "BugReports": "https://github.com/rstudio/shiny/issues", - "Collate": "'globals.R' 'app-state.R' 'app_template.R' 'bind-cache.R' 'bind-event.R' 'bookmark-state-local.R' 'bookmark-state.R' 'bootstrap-deprecated.R' 'bootstrap-layout.R' 'conditions.R' 'map.R' 'utils.R' 'bootstrap.R' 'busy-indicators-spinners.R' 'busy-indicators.R' 'cache-utils.R' 'deprecated.R' 'devmode.R' 'diagnose.R' 'extended-task.R' 'fileupload.R' 'graph.R' 'reactives.R' 'reactive-domains.R' 'history.R' 'hooks.R' 'html-deps.R' 'image-interact-opts.R' 'image-interact.R' 'imageutils.R' 'input-action.R' 'input-checkbox.R' 'input-checkboxgroup.R' 'input-date.R' 'input-daterange.R' 'input-file.R' 'input-numeric.R' 'input-password.R' 'input-radiobuttons.R' 'input-select.R' 'input-slider.R' 'input-submit.R' 'input-text.R' 'input-textarea.R' 'input-utils.R' 'insert-tab.R' 'insert-ui.R' 'jqueryui.R' 'knitr.R' 'middleware-shiny.R' 'middleware.R' 'timer.R' 'shiny.R' 'mock-session.R' 'modal.R' 'modules.R' 'notifications.R' 'priorityqueue.R' 'progress.R' 'react.R' 'reexports.R' 'render-cached-plot.R' 'render-plot.R' 'render-table.R' 'run-url.R' 'runapp.R' 'serializers.R' 'server-input-handlers.R' 'server-resource-paths.R' 'server.R' 'shiny-options.R' 'shiny-package.R' 'shinyapp.R' 'shinyui.R' 'shinywrappers.R' 'showcase.R' 'snapshot.R' 'staticimports.R' 'tar.R' 'test-export.R' 'test-server.R' 'test.R' 'update-input.R' 'utils-lang.R' 'version_bs_date_picker.R' 'version_ion_range_slider.R' 'version_jquery.R' 'version_jqueryui.R' 'version_selectize.R' 'version_strftime.R' 'viewer.R'", - "RoxygenNote": "7.3.2", - "Encoding": "UTF-8", - "Config/testthat/edition": "3", - "Config/Needs/check": "shinytest2", - "NeedsCompilation": "no", - "Author": "Winston Chang [aut, cre] (ORCID: ), Joe Cheng [aut], JJ Allaire [aut], Carson Sievert [aut] (ORCID: ), Barret Schloerke [aut] (ORCID: ), Yihui Xie [aut], Jeff Allen [aut], Jonathan McPherson [aut], Alan Dipert [aut], Barbara Borges [aut], Posit Software, PBC [cph, fnd], jQuery Foundation [cph] (jQuery library and jQuery UI library), jQuery contributors [ctb, cph] (jQuery library; authors listed in inst/www/shared/jquery-AUTHORS.txt), jQuery UI contributors [ctb, cph] (jQuery UI library; authors listed in inst/www/shared/jqueryui/AUTHORS.txt), Mark Otto [ctb] (Bootstrap library), Jacob Thornton [ctb] (Bootstrap library), Bootstrap contributors [ctb] (Bootstrap library), Twitter, Inc [cph] (Bootstrap library), Prem Nawaz Khan [ctb] (Bootstrap accessibility plugin), Victor Tsaran [ctb] (Bootstrap accessibility plugin), Dennis Lembree [ctb] (Bootstrap accessibility plugin), Srinivasu Chakravarthula [ctb] (Bootstrap accessibility plugin), Cathy O'Connor [ctb] (Bootstrap accessibility plugin), PayPal, Inc [cph] (Bootstrap accessibility plugin), Stefan Petre [ctb, cph] (Bootstrap-datepicker library), Andrew Rowls [ctb, cph] (Bootstrap-datepicker library), Brian Reavis [ctb, cph] (selectize.js library), Salmen Bejaoui [ctb, cph] (selectize-plugin-a11y library), Denis Ineshin [ctb, cph] (ion.rangeSlider library), Sami Samhuri [ctb, cph] (Javascript strftime library), SpryMedia Limited [ctb, cph] (DataTables library), John Fraser [ctb, cph] (showdown.js library), John Gruber [ctb, cph] (showdown.js library), Ivan Sagalaev [ctb, cph] (highlight.js library), R Core Team [ctb, cph] (tar implementation from R)", - "Maintainer": "Winston Chang ", - "Repository": "CRAN" - }, "snakecase": { "Package": "snakecase", "Version": "0.11.1", @@ -3145,28 +1324,6 @@ "Author": "Malte Grosser [aut, cre]", "Repository": "CRAN" }, - "sourcetools": { - "Package": "sourcetools", - "Version": "0.1.7-1", - "Source": "Repository", - "Type": "Package", - "Title": "Tools for Reading, Tokenizing and Parsing R Code", - "Author": "Kevin Ushey", - "Maintainer": "Kevin Ushey ", - "Description": "Tools for the reading and tokenization of R code. The 'sourcetools' package provides both an R and C++ interface for the tokenization of R code, and helpers for interacting with the tokenized representation of R code.", - "License": "MIT + file LICENSE", - "Depends": [ - "R (>= 3.0.2)" - ], - "Suggests": [ - "testthat" - ], - "RoxygenNote": "5.0.1", - "BugReports": "https://github.com/kevinushey/sourcetools/issues", - "Encoding": "UTF-8", - "NeedsCompilation": "yes", - "Repository": "CRAN" - }, "stringi": { "Package": "stringi", "Version": "1.8.7", @@ -3212,205 +1369,57 @@ ], "Imports": [ "cli", - "glue (>= 1.6.1)", - "lifecycle (>= 1.0.3)", - "magrittr", - "rlang (>= 1.0.0)", - "stringi (>= 1.5.3)", - "vctrs (>= 0.4.0)" - ], - "Suggests": [ - "covr", - "dplyr", - "gt", - "htmltools", - "htmlwidgets", - "knitr", - "rmarkdown", - "testthat (>= 3.0.0)", - "tibble" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "LazyData": "true", - "RoxygenNote": "7.2.3", - "NeedsCompilation": "no", - "Author": "Hadley Wickham [aut, cre, cph], Posit Software, PBC [cph, fnd]", - "Maintainer": "Hadley Wickham ", - "Repository": "CRAN" - }, - "sys": { - "Package": "sys", - "Version": "3.4.3", - "Source": "Repository", - "Type": "Package", - "Title": "Powerful and Reliable Tools for Running System Commands in R", - "Authors@R": "c(person(\"Jeroen\", \"Ooms\", role = c(\"aut\", \"cre\"), email = \"jeroenooms@gmail.com\", comment = c(ORCID = \"0000-0002-4035-0289\")), person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = \"ctb\"))", - "Description": "Drop-in replacements for the base system2() function with fine control and consistent behavior across platforms. Supports clean interruption, timeout, background tasks, and streaming STDIN / STDOUT / STDERR over binary or text connections. Arguments on Windows automatically get encoded and quoted to work on different locales.", - "License": "MIT + file LICENSE", - "URL": "https://jeroen.r-universe.dev/sys", - "BugReports": "https://github.com/jeroen/sys/issues", - "Encoding": "UTF-8", - "RoxygenNote": "7.1.1", - "Suggests": [ - "unix (>= 1.4)", - "spelling", - "testthat" - ], - "Language": "en-US", - "NeedsCompilation": "yes", - "Author": "Jeroen Ooms [aut, cre] (), Gábor Csárdi [ctb]", - "Maintainer": "Jeroen Ooms ", - "Repository": "CRAN" - }, - "systemfonts": { - "Package": "systemfonts", - "Version": "1.2.3", - "Source": "Repository", - "Type": "Package", - "Title": "System Native Font Finding", - "Authors@R": "c( person(\"Thomas Lin\", \"Pedersen\", , \"thomas.pedersen@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0002-5147-4711\")), person(\"Jeroen\", \"Ooms\", , \"jeroen@berkeley.edu\", role = \"aut\", comment = c(ORCID = \"0000-0002-4035-0289\")), person(\"Devon\", \"Govett\", role = \"aut\", comment = \"Author of font-manager\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\"), comment = c(ROR = \"03wc8by49\")) )", - "Description": "Provides system native access to the font catalogue. As font handling varies between systems it is difficult to correctly locate installed fonts across different operating systems. The 'systemfonts' package provides bindings to the native libraries on Windows, macOS and Linux for finding font files that can then be used further by e.g. graphic devices. The main use is intended to be from compiled code but 'systemfonts' also provides access from R.", - "License": "MIT + file LICENSE", - "URL": "https://github.com/r-lib/systemfonts, https://systemfonts.r-lib.org", - "BugReports": "https://github.com/r-lib/systemfonts/issues", - "Depends": [ - "R (>= 3.2.0)" - ], - "Imports": [ - "base64enc", - "grid", - "jsonlite", - "lifecycle", - "tools", - "utils" - ], - "Suggests": [ - "covr", - "farver", - "graphics", - "knitr", - "rmarkdown", - "testthat (>= 2.1.0)" - ], - "LinkingTo": [ - "cpp11 (>= 0.2.1)" - ], - "VignetteBuilder": "knitr", - "Config/build/compilation-database": "true", - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/usethis/last-upkeep": "2025-04-23", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "SystemRequirements": "fontconfig, freetype2", - "NeedsCompilation": "yes", - "Author": "Thomas Lin Pedersen [aut, cre] (ORCID: ), Jeroen Ooms [aut] (ORCID: ), Devon Govett [aut] (Author of font-manager), Posit Software, PBC [cph, fnd] (ROR: )", - "Maintainer": "Thomas Lin Pedersen ", - "Repository": "CRAN" - }, - "testthat": { - "Package": "testthat", - "Version": "3.2.3", - "Source": "Repository", - "Title": "Unit Testing for R", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(\"R Core team\", role = \"ctb\", comment = \"Implementation of utils::recover()\") )", - "Description": "Software testing is important, but, in part because it is frustrating and boring, many of us avoid it. 'testthat' is a testing framework for R that is easy to learn and use, and integrates with your existing 'workflow'.", - "License": "MIT + file LICENSE", - "URL": "https://testthat.r-lib.org, https://github.com/r-lib/testthat", - "BugReports": "https://github.com/r-lib/testthat/issues", - "Depends": [ - "R (>= 3.6.0)" - ], - "Imports": [ - "brio (>= 1.1.3)", - "callr (>= 3.7.3)", - "cli (>= 3.6.1)", - "desc (>= 1.4.2)", - "digest (>= 0.6.33)", - "evaluate (>= 1.0.1)", - "jsonlite (>= 1.8.7)", + "glue (>= 1.6.1)", "lifecycle (>= 1.0.3)", - "magrittr (>= 2.0.3)", - "methods", - "pkgload (>= 1.3.2.1)", - "praise (>= 1.0.0)", - "processx (>= 3.8.2)", - "ps (>= 1.7.5)", - "R6 (>= 2.5.1)", - "rlang (>= 1.1.1)", - "utils", - "waldo (>= 0.6.0)", - "withr (>= 3.0.2)" + "magrittr", + "rlang (>= 1.0.0)", + "stringi (>= 1.5.3)", + "vctrs (>= 0.4.0)" ], "Suggests": [ "covr", - "curl (>= 0.9.5)", - "diffviewer (>= 0.1.0)", + "dplyr", + "gt", + "htmltools", + "htmlwidgets", "knitr", "rmarkdown", - "rstudioapi", - "S7", - "shiny", - "usethis", - "vctrs (>= 0.1.0)", - "xml2" + "testthat (>= 3.0.0)", + "tibble" ], "VignetteBuilder": "knitr", "Config/Needs/website": "tidyverse/tidytemplate", "Config/testthat/edition": "3", - "Config/testthat/parallel": "true", - "Config/testthat/start-first": "watcher, parallel*", "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "NeedsCompilation": "yes", - "Author": "Hadley Wickham [aut, cre], Posit Software, PBC [cph, fnd], R Core team [ctb] (Implementation of utils::recover())", + "LazyData": "true", + "RoxygenNote": "7.2.3", + "NeedsCompilation": "no", + "Author": "Hadley Wickham [aut, cre, cph], Posit Software, PBC [cph, fnd]", "Maintainer": "Hadley Wickham ", "Repository": "CRAN" }, - "textshaping": { - "Package": "textshaping", - "Version": "1.0.1", + "sys": { + "Package": "sys", + "Version": "3.4.3", "Source": "Repository", - "Title": "Bindings to the 'HarfBuzz' and 'Fribidi' Libraries for Text Shaping", - "Authors@R": "c( person(\"Thomas Lin\", \"Pedersen\", , \"thomas.pedersen@posit.co\", role = c(\"cre\", \"aut\"), comment = c(ORCID = \"0000-0002-5147-4711\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\"), comment = c(ROR = \"03wc8by49\")) )", - "Description": "Provides access to the text shaping functionality in the 'HarfBuzz' library and the bidirectional algorithm in the 'Fribidi' library. 'textshaping' is a low-level utility package mainly for graphic devices that expands upon the font tool-set provided by the 'systemfonts' package.", + "Type": "Package", + "Title": "Powerful and Reliable Tools for Running System Commands in R", + "Authors@R": "c(person(\"Jeroen\", \"Ooms\", role = c(\"aut\", \"cre\"), email = \"jeroenooms@gmail.com\", comment = c(ORCID = \"0000-0002-4035-0289\")), person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = \"ctb\"))", + "Description": "Drop-in replacements for the base system2() function with fine control and consistent behavior across platforms. Supports clean interruption, timeout, background tasks, and streaming STDIN / STDOUT / STDERR over binary or text connections. Arguments on Windows automatically get encoded and quoted to work on different locales.", "License": "MIT + file LICENSE", - "URL": "https://github.com/r-lib/textshaping", - "BugReports": "https://github.com/r-lib/textshaping/issues", - "Depends": [ - "R (>= 3.2.0)" - ], - "Imports": [ - "lifecycle", - "stats", - "stringi", - "systemfonts (>= 1.1.0)", - "utils" - ], + "URL": "https://jeroen.r-universe.dev/sys", + "BugReports": "https://github.com/jeroen/sys/issues", + "Encoding": "UTF-8", + "RoxygenNote": "7.1.1", "Suggests": [ - "covr", - "grDevices", - "grid", - "knitr", - "rmarkdown", - "testthat (>= 3.0.0)" - ], - "LinkingTo": [ - "cpp11 (>= 0.2.1)", - "systemfonts (>= 1.0.0)" + "unix (>= 1.4)", + "spelling", + "testthat" ], - "VignetteBuilder": "knitr", - "Config/build/compilation-database": "true", - "Config/testthat/edition": "3", - "Config/usethis/last-upkeep": "2025-04-23", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "SystemRequirements": "freetype2, harfbuzz, fribidi", + "Language": "en-US", "NeedsCompilation": "yes", - "Author": "Thomas Lin Pedersen [cre, aut] (ORCID: ), Posit Software, PBC [cph, fnd] (ROR: )", - "Maintainer": "Thomas Lin Pedersen ", + "Author": "Jeroen Ooms [aut, cre] (), Gábor Csárdi [ctb]", + "Maintainer": "Jeroen Ooms ", "Repository": "CRAN" }, "tibble": { @@ -3598,118 +1607,34 @@ "Maintainer": "Vitalie Spinu ", "Repository": "CRAN" }, - "tinytex": { - "Package": "tinytex", - "Version": "0.57", - "Source": "Repository", - "Type": "Package", - "Title": "Helper Functions to Install and Maintain TeX Live, and Compile LaTeX Documents", - "Authors@R": "c( person(\"Yihui\", \"Xie\", role = c(\"aut\", \"cre\", \"cph\"), email = \"xie@yihui.name\", comment = c(ORCID = \"0000-0003-0645-5666\")), person(given = \"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(\"Christophe\", \"Dervieux\", role = \"ctb\", comment = c(ORCID = \"0000-0003-4474-2498\")), person(\"Devon\", \"Ryan\", role = \"ctb\", email = \"dpryan79@gmail.com\", comment = c(ORCID = \"0000-0002-8549-0971\")), person(\"Ethan\", \"Heinzen\", role = \"ctb\"), person(\"Fernando\", \"Cagua\", role = \"ctb\"), person() )", - "Description": "Helper functions to install and maintain the 'LaTeX' distribution named 'TinyTeX' (), a lightweight, cross-platform, portable, and easy-to-maintain version of 'TeX Live'. This package also contains helper functions to compile 'LaTeX' documents, and install missing 'LaTeX' packages automatically.", - "Imports": [ - "xfun (>= 0.48)" - ], - "Suggests": [ - "testit", - "rstudioapi" - ], - "License": "MIT + file LICENSE", - "URL": "https://github.com/rstudio/tinytex", - "BugReports": "https://github.com/rstudio/tinytex/issues", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "NeedsCompilation": "no", - "Author": "Yihui Xie [aut, cre, cph] (), Posit Software, PBC [cph, fnd], Christophe Dervieux [ctb] (), Devon Ryan [ctb] (), Ethan Heinzen [ctb], Fernando Cagua [ctb]", - "Maintainer": "Yihui Xie ", - "Repository": "CRAN" - }, - "urlchecker": { - "Package": "urlchecker", - "Version": "1.0.1", - "Source": "Repository", - "Title": "Run CRAN URL Checks from Older R Versions", - "Authors@R": "c( person(\"R Core team\", role = \"aut\", comment = \"The code in urltools.R adapted from the tools package\"), person(\"Jim\", \"Hester\", role = \"aut\", comment = c(ORCID = \"0000-0002-2739-7082\")), person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = c(\"aut\", \"cre\")), person(\"RStudio\", role = c(\"cph\", \"fnd\")) )", - "Description": "Provide the URL checking tools available in R 4.1+ as a package for earlier versions of R. Also uses concurrent requests so can be much faster than the serial versions.", - "License": "GPL-3", - "URL": "https://github.com/r-lib/urlchecker", - "BugReports": "https://github.com/r-lib/urlchecker/issues", - "Depends": [ - "R (>= 3.3)" - ], - "Imports": [ - "cli", - "curl", - "tools", - "xml2" - ], - "Suggests": [ - "covr" - ], - "Encoding": "UTF-8", - "RoxygenNote": "7.1.2", - "NeedsCompilation": "no", - "Author": "R Core team [aut] (The code in urltools.R adapted from the tools package), Jim Hester [aut] (), Gábor Csárdi [aut, cre], RStudio [cph, fnd]", - "Maintainer": "Gábor Csárdi ", - "Repository": "CRAN" - }, - "usethis": { - "Package": "usethis", - "Version": "3.1.0", + "tzdb": { + "Package": "tzdb", + "Version": "0.5.0", "Source": "Repository", - "Title": "Automate Package and Project Setup", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = \"aut\", comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Jennifer\", \"Bryan\", , \"jenny@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0002-6983-2759\")), person(\"Malcolm\", \"Barrett\", , \"malcolmbarrett@gmail.com\", role = \"aut\", comment = c(ORCID = \"0000-0003-0299-5825\")), person(\"Andy\", \"Teucher\", , \"andy.teucher@posit.co\", role = \"aut\", comment = c(ORCID = \"0000-0002-7840-692X\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "Automate package and project setup tasks that are otherwise performed manually. This includes setting up unit testing, test coverage, continuous integration, Git, 'GitHub', licenses, 'Rcpp', 'RStudio' projects, and more.", + "Title": "Time Zone Database Information", + "Authors@R": "c( person(\"Davis\", \"Vaughan\", , \"davis@posit.co\", role = c(\"aut\", \"cre\")), person(\"Howard\", \"Hinnant\", role = \"cph\", comment = \"Author of the included date library\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "Provides an up-to-date copy of the Internet Assigned Numbers Authority (IANA) Time Zone Database. It is updated periodically to reflect changes made by political bodies to time zone boundaries, UTC offsets, and daylight saving time rules. Additionally, this package provides a C++ interface for working with the 'date' library. 'date' provides comprehensive support for working with dates and date-times, which this package exposes to make it easier for other R packages to utilize. Headers are provided for calendar specific calculations, along with a limited interface for time zone manipulations.", "License": "MIT + file LICENSE", - "URL": "https://usethis.r-lib.org, https://github.com/r-lib/usethis", - "BugReports": "https://github.com/r-lib/usethis/issues", + "URL": "https://tzdb.r-lib.org, https://github.com/r-lib/tzdb", + "BugReports": "https://github.com/r-lib/tzdb/issues", "Depends": [ - "R (>= 3.6)" - ], - "Imports": [ - "cli (>= 3.0.1)", - "clipr (>= 0.3.0)", - "crayon", - "curl (>= 2.7)", - "desc (>= 1.4.2)", - "fs (>= 1.3.0)", - "gert (>= 1.4.1)", - "gh (>= 1.2.1)", - "glue (>= 1.3.0)", - "jsonlite", - "lifecycle (>= 1.0.0)", - "purrr", - "rappdirs", - "rlang (>= 1.1.0)", - "rprojroot (>= 1.2)", - "rstudioapi", - "stats", - "tools", - "utils", - "whisker", - "withr (>= 2.3.0)", - "yaml" + "R (>= 4.0.0)" ], "Suggests": [ "covr", - "knitr", - "magick", - "pkgload (>= 1.3.2.1)", - "rmarkdown", - "roxygen2 (>= 7.1.2)", - "spelling (>= 1.2)", - "styler (>= 1.2.0)", - "testthat (>= 3.1.8)" + "testthat (>= 3.0.0)" ], - "Config/Needs/website": "r-lib/asciicast, tidyverse/tidytemplate, xml2", + "LinkingTo": [ + "cpp11 (>= 0.5.2)" + ], + "Biarch": "yes", + "Config/Needs/website": "tidyverse/tidytemplate", "Config/testthat/edition": "3", - "Config/testthat/parallel": "TRUE", - "Config/testthat/start-first": "github-actions, release", "Encoding": "UTF-8", - "Language": "en-US", "RoxygenNote": "7.3.2", - "NeedsCompilation": "no", - "Author": "Hadley Wickham [aut] (), Jennifer Bryan [aut, cre] (), Malcolm Barrett [aut] (), Andy Teucher [aut] (), Posit Software, PBC [cph, fnd]", - "Maintainer": "Jennifer Bryan ", + "NeedsCompilation": "yes", + "Author": "Davis Vaughan [aut, cre], Howard Hinnant [cph] (Author of the included date library), Posit Software, PBC [cph, fnd]", + "Maintainer": "Davis Vaughan ", "Repository": "CRAN" }, "utf8": { @@ -3790,63 +1715,77 @@ "Maintainer": "Davis Vaughan ", "Repository": "CRAN" }, - "waldo": { - "Package": "waldo", - "Version": "0.6.2", + "vroom": { + "Package": "vroom", + "Version": "1.6.7", "Source": "Repository", - "Title": "Find Differences Between R Objects", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "Compare complex R objects and reveal the key differences. Designed particularly for use in testing packages where being able to quickly isolate key differences makes understanding test failures much easier.", + "Title": "Read and Write Rectangular Text Data Quickly", + "Authors@R": "c( person(\"Jim\", \"Hester\", role = \"aut\", comment = c(ORCID = \"0000-0002-2739-7082\")), person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = \"aut\", comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Jennifer\", \"Bryan\", , \"jenny@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0002-6983-2759\")), person(\"Shelby\", \"Bearrows\", role = \"ctb\"), person(\"https://github.com/mandreyel/\", role = \"cph\", comment = \"mio library\"), person(\"Jukka\", \"Jylänki\", role = \"cph\", comment = \"grisu3 implementation\"), person(\"Mikkel\", \"Jørgensen\", role = \"cph\", comment = \"grisu3 implementation\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\"), comment = c(ROR = \"03wc8by49\")) )", + "Description": "The goal of 'vroom' is to read and write data (like 'csv', 'tsv' and 'fwf') quickly. When reading it uses a quick initial indexing step, then reads the values lazily , so only the data you actually use needs to be read. The writer formats the data in parallel and writes to disk asynchronously from formatting.", "License": "MIT + file LICENSE", - "URL": "https://waldo.r-lib.org, https://github.com/r-lib/waldo", - "BugReports": "https://github.com/r-lib/waldo/issues", + "URL": "https://vroom.r-lib.org, https://github.com/tidyverse/vroom", + "BugReports": "https://github.com/tidyverse/vroom/issues", "Depends": [ - "R (>= 4.0)" + "R (>= 4.1)" ], "Imports": [ - "cli", - "diffobj (>= 0.3.4)", + "bit64", + "cli (>= 3.2.0)", + "crayon", "glue", + "hms", + "lifecycle (>= 1.0.3)", "methods", - "rlang (>= 1.1.0)" + "rlang (>= 0.4.2)", + "stats", + "tibble (>= 2.0.0)", + "tidyselect", + "tzdb (>= 0.1.1)", + "vctrs (>= 0.2.0)", + "withr" ], "Suggests": [ - "bit64", - "R6", - "S7", - "testthat (>= 3.0.0)", - "withr", + "archive", + "bench (>= 1.1.0)", + "covr", + "curl", + "dplyr", + "forcats", + "fs", + "ggplot2", + "knitr", + "patchwork", + "prettyunits", + "purrr", + "rmarkdown", + "rstudioapi", + "scales", + "spelling", + "testthat (>= 2.1.0)", + "tidyr", + "utils", + "waldo", "xml2" ], - "Config/Needs/website": "tidyverse/tidytemplate", + "LinkingTo": [ + "cpp11 (>= 0.2.0)", + "progress (>= 1.2.3)", + "tzdb (>= 0.1.1)" + ], + "VignetteBuilder": "knitr", + "Config/Needs/website": "nycflights13, tidyverse/tidytemplate", "Config/testthat/edition": "3", + "Config/testthat/parallel": "false", + "Config/usethis/last-upkeep": "2025-11-25", + "Copyright": "file COPYRIGHTS", "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "NeedsCompilation": "no", - "Author": "Hadley Wickham [aut, cre], Posit Software, PBC [cph, fnd]", - "Maintainer": "Hadley Wickham ", + "Language": "en-US", + "RoxygenNote": "7.3.3", + "NeedsCompilation": "yes", + "Author": "Jim Hester [aut] (ORCID: ), Hadley Wickham [aut] (ORCID: ), Jennifer Bryan [aut, cre] (ORCID: ), Shelby Bearrows [ctb], https://github.com/mandreyel/ [cph] (mio library), Jukka Jylänki [cph] (grisu3 implementation), Mikkel Jørgensen [cph] (grisu3 implementation), Posit Software, PBC [cph, fnd] (ROR: )", + "Maintainer": "Jennifer Bryan ", "Repository": "CRAN" }, - "whisker": { - "Package": "whisker", - "Version": "0.4.1", - "Source": "Repository", - "Maintainer": "Edwin de Jonge ", - "License": "GPL-3", - "Title": "{{mustache}} for R, Logicless Templating", - "Type": "Package", - "LazyLoad": "yes", - "Author": "Edwin de Jonge", - "Description": "Implements 'Mustache' logicless templating.", - "URL": "https://github.com/edwindj/whisker", - "Suggests": [ - "markdown" - ], - "RoxygenNote": "6.1.1", - "NeedsCompilation": "no", - "Repository": "CRAN", - "Encoding": "UTF-8" - }, "withr": { "Package": "withr", "Version": "3.0.2", @@ -3885,53 +1824,6 @@ "Maintainer": "Lionel Henry ", "Repository": "CRAN" }, - "xfun": { - "Package": "xfun", - "Version": "0.52", - "Source": "Repository", - "Type": "Package", - "Title": "Supporting Functions for Packages Maintained by 'Yihui Xie'", - "Authors@R": "c( person(\"Yihui\", \"Xie\", role = c(\"aut\", \"cre\", \"cph\"), email = \"xie@yihui.name\", comment = c(ORCID = \"0000-0003-0645-5666\", URL = \"https://yihui.org\")), person(\"Wush\", \"Wu\", role = \"ctb\"), person(\"Daijiang\", \"Li\", role = \"ctb\"), person(\"Xianying\", \"Tan\", role = \"ctb\"), person(\"Salim\", \"Brüggemann\", role = \"ctb\", email = \"salim-b@pm.me\", comment = c(ORCID = \"0000-0002-5329-5987\")), person(\"Christophe\", \"Dervieux\", role = \"ctb\"), person() )", - "Description": "Miscellaneous functions commonly used in other packages maintained by 'Yihui Xie'.", - "Depends": [ - "R (>= 3.2.0)" - ], - "Imports": [ - "grDevices", - "stats", - "tools" - ], - "Suggests": [ - "testit", - "parallel", - "codetools", - "methods", - "rstudioapi", - "tinytex (>= 0.30)", - "mime", - "litedown (>= 0.4)", - "commonmark", - "knitr (>= 1.50)", - "remotes", - "pak", - "curl", - "xml2", - "jsonlite", - "magick", - "yaml", - "qs" - ], - "License": "MIT + file LICENSE", - "URL": "https://github.com/yihui/xfun", - "BugReports": "https://github.com/yihui/xfun/issues", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "VignetteBuilder": "litedown", - "NeedsCompilation": "yes", - "Author": "Yihui Xie [aut, cre, cph] (, https://yihui.org), Wush Wu [ctb], Daijiang Li [ctb], Xianying Tan [ctb], Salim Brüggemann [ctb] (), Christophe Dervieux [ctb]", - "Maintainer": "Yihui Xie ", - "Repository": "CRAN" - }, "xml2": { "Package": "xml2", "Version": "1.3.8", @@ -3972,111 +1864,6 @@ "Author": "Hadley Wickham [aut], Jim Hester [aut], Jeroen Ooms [aut, cre], Posit Software, PBC [cph, fnd], R Foundation [ctb] (Copy of R-project homepage cached as example)", "Maintainer": "Jeroen Ooms ", "Repository": "CRAN" - }, - "xopen": { - "Package": "xopen", - "Version": "1.0.1", - "Source": "Repository", - "Title": "Open System Files, 'URLs', Anything", - "Authors@R": "c( person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Fathi\", \"Boudra\", role = \"aut\"), person(\"Rex\", \"Dieter\", role = \"aut\"), person(\"Kevin\", \"Krammer\", role = \"aut\"), person(\"Jeremy\", \"White\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "Cross platform solution to open files, directories or 'URLs' with their associated programs.", - "License": "MIT + file LICENSE", - "URL": "https://github.com/r-lib/xopen#readme, https://r-lib.github.io/xopen/", - "BugReports": "https://github.com/r-lib/xopen/issues", - "Depends": [ - "R (>= 3.1)" - ], - "Imports": [ - "processx" - ], - "Suggests": [ - "ps", - "testthat (>= 3.0.0)" - ], - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "RoxygenNote": "7.2.3", - "NeedsCompilation": "no", - "Author": "Gábor Csárdi [aut, cre], Fathi Boudra [aut], Rex Dieter [aut], Kevin Krammer [aut], Jeremy White [aut], Posit Software, PBC [cph, fnd]", - "Maintainer": "Gábor Csárdi ", - "Repository": "CRAN" - }, - "xtable": { - "Package": "xtable", - "Version": "1.8-4", - "Source": "Repository", - "Date": "2019-04-08", - "Title": "Export Tables to LaTeX or HTML", - "Authors@R": "c(person(\"David B.\", \"Dahl\", role=\"aut\"), person(\"David\", \"Scott\", role=c(\"aut\",\"cre\"), email=\"d.scott@auckland.ac.nz\"), person(\"Charles\", \"Roosen\", role=\"aut\"), person(\"Arni\", \"Magnusson\", role=\"aut\"), person(\"Jonathan\", \"Swinton\", role=\"aut\"), person(\"Ajay\", \"Shah\", role=\"ctb\"), person(\"Arne\", \"Henningsen\", role=\"ctb\"), person(\"Benno\", \"Puetz\", role=\"ctb\"), person(\"Bernhard\", \"Pfaff\", role=\"ctb\"), person(\"Claudio\", \"Agostinelli\", role=\"ctb\"), person(\"Claudius\", \"Loehnert\", role=\"ctb\"), person(\"David\", \"Mitchell\", role=\"ctb\"), person(\"David\", \"Whiting\", role=\"ctb\"), person(\"Fernando da\", \"Rosa\", role=\"ctb\"), person(\"Guido\", \"Gay\", role=\"ctb\"), person(\"Guido\", \"Schulz\", role=\"ctb\"), person(\"Ian\", \"Fellows\", role=\"ctb\"), person(\"Jeff\", \"Laake\", role=\"ctb\"), person(\"John\", \"Walker\", role=\"ctb\"), person(\"Jun\", \"Yan\", role=\"ctb\"), person(\"Liviu\", \"Andronic\", role=\"ctb\"), person(\"Markus\", \"Loecher\", role=\"ctb\"), person(\"Martin\", \"Gubri\", role=\"ctb\"), person(\"Matthieu\", \"Stigler\", role=\"ctb\"), person(\"Robert\", \"Castelo\", role=\"ctb\"), person(\"Seth\", \"Falcon\", role=\"ctb\"), person(\"Stefan\", \"Edwards\", role=\"ctb\"), person(\"Sven\", \"Garbade\", role=\"ctb\"), person(\"Uwe\", \"Ligges\", role=\"ctb\"))", - "Maintainer": "David Scott ", - "Imports": [ - "stats", - "utils" - ], - "Suggests": [ - "knitr", - "plm", - "zoo", - "survival" - ], - "VignetteBuilder": "knitr", - "Description": "Coerce data to LaTeX and HTML tables.", - "URL": "http://xtable.r-forge.r-project.org/", - "Depends": [ - "R (>= 2.10.0)" - ], - "License": "GPL (>= 2)", - "Repository": "CRAN", - "NeedsCompilation": "no", - "Author": "David B. Dahl [aut], David Scott [aut, cre], Charles Roosen [aut], Arni Magnusson [aut], Jonathan Swinton [aut], Ajay Shah [ctb], Arne Henningsen [ctb], Benno Puetz [ctb], Bernhard Pfaff [ctb], Claudio Agostinelli [ctb], Claudius Loehnert [ctb], David Mitchell [ctb], David Whiting [ctb], Fernando da Rosa [ctb], Guido Gay [ctb], Guido Schulz [ctb], Ian Fellows [ctb], Jeff Laake [ctb], John Walker [ctb], Jun Yan [ctb], Liviu Andronic [ctb], Markus Loecher [ctb], Martin Gubri [ctb], Matthieu Stigler [ctb], Robert Castelo [ctb], Seth Falcon [ctb], Stefan Edwards [ctb], Sven Garbade [ctb], Uwe Ligges [ctb]" - }, - "yaml": { - "Package": "yaml", - "Version": "2.3.10", - "Source": "Repository", - "Type": "Package", - "Title": "Methods to Convert R Data to YAML and Back", - "Date": "2024-07-22", - "Suggests": [ - "RUnit" - ], - "Author": "Shawn P Garbett [aut], Jeremy Stephens [aut, cre], Kirill Simonov [aut], Yihui Xie [ctb], Zhuoer Dong [ctb], Hadley Wickham [ctb], Jeffrey Horner [ctb], reikoch [ctb], Will Beasley [ctb], Brendan O'Connor [ctb], Gregory R. Warnes [ctb], Michael Quinn [ctb], Zhian N. Kamvar [ctb], Charlie Gao [ctb]", - "Maintainer": "Shawn Garbett ", - "License": "BSD_3_clause + file LICENSE", - "Description": "Implements the 'libyaml' 'YAML' 1.1 parser and emitter () for R.", - "URL": "https://github.com/vubiostat/r-yaml/", - "BugReports": "https://github.com/vubiostat/r-yaml/issues", - "NeedsCompilation": "yes", - "Repository": "CRAN" - }, - "zip": { - "Package": "zip", - "Version": "2.3.3", - "Source": "Repository", - "Title": "Cross-Platform 'zip' Compression", - "Authors@R": "c( person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Kuba\", \"Podgórski\", role = \"ctb\"), person(\"Rich\", \"Geldreich\", role = \"ctb\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\"), comment = c(ROR = \"03wc8by49\")) )", - "Description": "Cross-Platform 'zip' Compression Library. A replacement for the 'zip' function, that does not require any additional external tools on any platform.", - "License": "MIT + file LICENSE", - "URL": "https://github.com/r-lib/zip, https://r-lib.github.io/zip/", - "BugReports": "https://github.com/r-lib/zip/issues", - "Suggests": [ - "covr", - "pillar", - "processx", - "R6", - "testthat", - "withr" - ], - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Config/usethis/last-upkeep": "2025-05-07", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2.9000", - "NeedsCompilation": "yes", - "Author": "Gábor Csárdi [aut, cre], Kuba Podgórski [ctb], Rich Geldreich [ctb], Posit Software, PBC [cph, fnd] (ROR: )", - "Maintainer": "Gábor Csárdi ", - "Repository": "CRAN" } } } diff --git a/renv/.gitignore b/renv/.gitignore index 0ec0cbb..97817b1 100644 --- a/renv/.gitignore +++ b/renv/.gitignore @@ -1,7 +1,7 @@ -library/ -local/ -cellar/ -lock/ -python/ -sandbox/ -staging/ +library/ +local/ +cellar/ +lock/ +python/ +sandbox/ +staging/ diff --git a/renv/activate.R b/renv/activate.R index 90b251c..256edab 100644 --- a/renv/activate.R +++ b/renv/activate.R @@ -1,1334 +1,1334 @@ - -local({ - - # the requested version of renv - version <- "1.1.4" - attr(version, "sha") <- NULL - - # the project directory - project <- Sys.getenv("RENV_PROJECT") - if (!nzchar(project)) - project <- getwd() - - # use start-up diagnostics if enabled - diagnostics <- Sys.getenv("RENV_STARTUP_DIAGNOSTICS", unset = "FALSE") - if (diagnostics) { - start <- Sys.time() - profile <- tempfile("renv-startup-", fileext = ".Rprof") - utils::Rprof(profile) - on.exit({ - utils::Rprof(NULL) - elapsed <- signif(difftime(Sys.time(), start, units = "auto"), digits = 2L) - writeLines(sprintf("- renv took %s to run the autoloader.", format(elapsed))) - writeLines(sprintf("- Profile: %s", profile)) - print(utils::summaryRprof(profile)) - }, add = TRUE) - } - - # figure out whether the autoloader is enabled - enabled <- local({ - - # first, check config option - override <- getOption("renv.config.autoloader.enabled") - if (!is.null(override)) - return(override) - - # if we're being run in a context where R_LIBS is already set, - # don't load -- presumably we're being run as a sub-process and - # the parent process has already set up library paths for us - rcmd <- Sys.getenv("R_CMD", unset = NA) - rlibs <- Sys.getenv("R_LIBS", unset = NA) - if (!is.na(rlibs) && !is.na(rcmd)) - return(FALSE) - - # next, check environment variables - # prefer using the configuration one in the future - envvars <- c( - "RENV_CONFIG_AUTOLOADER_ENABLED", - "RENV_AUTOLOADER_ENABLED", - "RENV_ACTIVATE_PROJECT" - ) - - for (envvar in envvars) { - envval <- Sys.getenv(envvar, unset = NA) - if (!is.na(envval)) - return(tolower(envval) %in% c("true", "t", "1")) - } - - # enable by default - TRUE - - }) - - # bail if we're not enabled - if (!enabled) { - - # if we're not enabled, we might still need to manually load - # the user profile here - profile <- Sys.getenv("R_PROFILE_USER", unset = "~/.Rprofile") - if (file.exists(profile)) { - cfg <- Sys.getenv("RENV_CONFIG_USER_PROFILE", unset = "TRUE") - if (tolower(cfg) %in% c("true", "t", "1")) - sys.source(profile, envir = globalenv()) - } - - return(FALSE) - - } - - # avoid recursion - if (identical(getOption("renv.autoloader.running"), TRUE)) { - warning("ignoring recursive attempt to run renv autoloader") - return(invisible(TRUE)) - } - - # signal that we're loading renv during R startup - options(renv.autoloader.running = TRUE) - on.exit(options(renv.autoloader.running = NULL), add = TRUE) - - # signal that we've consented to use renv - options(renv.consent = TRUE) - - # load the 'utils' package eagerly -- this ensures that renv shims, which - # mask 'utils' packages, will come first on the search path - library(utils, lib.loc = .Library) - - # unload renv if it's already been loaded - if ("renv" %in% loadedNamespaces()) - unloadNamespace("renv") - - # load bootstrap tools - ansify <- function(text) { - if (renv_ansify_enabled()) - renv_ansify_enhanced(text) - else - renv_ansify_default(text) - } - - renv_ansify_enabled <- function() { - - override <- Sys.getenv("RENV_ANSIFY_ENABLED", unset = NA) - if (!is.na(override)) - return(as.logical(override)) - - pane <- Sys.getenv("RSTUDIO_CHILD_PROCESS_PANE", unset = NA) - if (identical(pane, "build")) - return(FALSE) - - testthat <- Sys.getenv("TESTTHAT", unset = "false") - if (tolower(testthat) %in% "true") - return(FALSE) - - iderun <- Sys.getenv("R_CLI_HAS_HYPERLINK_IDE_RUN", unset = "false") - if (tolower(iderun) %in% "false") - return(FALSE) - - TRUE - - } - - renv_ansify_default <- function(text) { - text - } - - renv_ansify_enhanced <- function(text) { - - # R help links - pattern <- "`\\?(renv::(?:[^`])+)`" - replacement <- "`\033]8;;x-r-help:\\1\a?\\1\033]8;;\a`" - text <- gsub(pattern, replacement, text, perl = TRUE) - - # runnable code - pattern <- "`(renv::(?:[^`])+)`" - replacement <- "`\033]8;;x-r-run:\\1\a\\1\033]8;;\a`" - text <- gsub(pattern, replacement, text, perl = TRUE) - - # return ansified text - text - - } - - renv_ansify_init <- function() { - - envir <- renv_envir_self() - if (renv_ansify_enabled()) - assign("ansify", renv_ansify_enhanced, envir = envir) - else - assign("ansify", renv_ansify_default, envir = envir) - - } - - `%||%` <- function(x, y) { - if (is.null(x)) y else x - } - - catf <- function(fmt, ..., appendLF = TRUE) { - - quiet <- getOption("renv.bootstrap.quiet", default = FALSE) - if (quiet) - return(invisible()) - - msg <- sprintf(fmt, ...) - cat(msg, file = stdout(), sep = if (appendLF) "\n" else "") - - invisible(msg) - - } - - header <- function(label, - ..., - prefix = "#", - suffix = "-", - n = min(getOption("width"), 78)) - { - label <- sprintf(label, ...) - n <- max(n - nchar(label) - nchar(prefix) - 2L, 8L) - if (n <= 0) - return(paste(prefix, label)) - - tail <- paste(rep.int(suffix, n), collapse = "") - paste0(prefix, " ", label, " ", tail) - - } - - heredoc <- function(text, leave = 0) { - - # remove leading, trailing whitespace - trimmed <- gsub("^\\s*\\n|\\n\\s*$", "", text) - - # split into lines - lines <- strsplit(trimmed, "\n", fixed = TRUE)[[1L]] - - # compute common indent - indent <- regexpr("[^[:space:]]", lines) - common <- min(setdiff(indent, -1L)) - leave - text <- paste(substring(lines, common), collapse = "\n") - - # substitute in ANSI links for executable renv code - ansify(text) - - } - - bootstrap <- function(version, library) { - - friendly <- renv_bootstrap_version_friendly(version) - section <- header(sprintf("Bootstrapping renv %s", friendly)) - catf(section) - - # attempt to download renv - catf("- Downloading renv ... ", appendLF = FALSE) - withCallingHandlers( - tarball <- renv_bootstrap_download(version), - error = function(err) { - catf("FAILED") - stop("failed to download:\n", conditionMessage(err)) - } - ) - catf("OK") - on.exit(unlink(tarball), add = TRUE) - - # now attempt to install - catf("- Installing renv ... ", appendLF = FALSE) - withCallingHandlers( - status <- renv_bootstrap_install(version, tarball, library), - error = function(err) { - catf("FAILED") - stop("failed to install:\n", conditionMessage(err)) - } - ) - catf("OK") - - # add empty line to break up bootstrapping from normal output - catf("") - - return(invisible()) - } - - renv_bootstrap_tests_running <- function() { - getOption("renv.tests.running", default = FALSE) - } - - renv_bootstrap_repos <- function() { - - # get CRAN repository - cran <- getOption("renv.repos.cran", "https://cloud.r-project.org") - - # check for repos override - repos <- Sys.getenv("RENV_CONFIG_REPOS_OVERRIDE", unset = NA) - if (!is.na(repos)) { - - # check for RSPM; if set, use a fallback repository for renv - rspm <- Sys.getenv("RSPM", unset = NA) - if (identical(rspm, repos)) - repos <- c(RSPM = rspm, CRAN = cran) - - return(repos) - - } - - # check for lockfile repositories - repos <- tryCatch(renv_bootstrap_repos_lockfile(), error = identity) - if (!inherits(repos, "error") && length(repos)) - return(repos) - - # retrieve current repos - repos <- getOption("repos") - - # ensure @CRAN@ entries are resolved - repos[repos == "@CRAN@"] <- cran - - # add in renv.bootstrap.repos if set - default <- c(FALLBACK = "https://cloud.r-project.org") - extra <- getOption("renv.bootstrap.repos", default = default) - repos <- c(repos, extra) - - # remove duplicates that might've snuck in - dupes <- duplicated(repos) | duplicated(names(repos)) - repos[!dupes] - - } - - renv_bootstrap_repos_lockfile <- function() { - - lockpath <- Sys.getenv("RENV_PATHS_LOCKFILE", unset = "renv.lock") - if (!file.exists(lockpath)) - return(NULL) - - lockfile <- tryCatch(renv_json_read(lockpath), error = identity) - if (inherits(lockfile, "error")) { - warning(lockfile) - return(NULL) - } - - repos <- lockfile$R$Repositories - if (length(repos) == 0) - return(NULL) - - keys <- vapply(repos, `[[`, "Name", FUN.VALUE = character(1)) - vals <- vapply(repos, `[[`, "URL", FUN.VALUE = character(1)) - names(vals) <- keys - - return(vals) - - } - - renv_bootstrap_download <- function(version) { - - sha <- attr(version, "sha", exact = TRUE) - - methods <- if (!is.null(sha)) { - - # attempting to bootstrap a development version of renv - c( - function() renv_bootstrap_download_tarball(sha), - function() renv_bootstrap_download_github(sha) - ) - - } else { - - # attempting to bootstrap a release version of renv - c( - function() renv_bootstrap_download_tarball(version), - function() renv_bootstrap_download_cran_latest(version), - function() renv_bootstrap_download_cran_archive(version) - ) - - } - - for (method in methods) { - path <- tryCatch(method(), error = identity) - if (is.character(path) && file.exists(path)) - return(path) - } - - stop("All download methods failed") - - } - - renv_bootstrap_download_impl <- function(url, destfile) { - - mode <- "wb" - - # https://bugs.r-project.org/bugzilla/show_bug.cgi?id=17715 - fixup <- - Sys.info()[["sysname"]] == "Windows" && - substring(url, 1L, 5L) == "file:" - - if (fixup) - mode <- "w+b" - - args <- list( - url = url, - destfile = destfile, - mode = mode, - quiet = TRUE - ) - - if ("headers" %in% names(formals(utils::download.file))) { - headers <- renv_bootstrap_download_custom_headers(url) - if (length(headers) && is.character(headers)) - args$headers <- headers - } - - do.call(utils::download.file, args) - - } - - renv_bootstrap_download_custom_headers <- function(url) { - - headers <- getOption("renv.download.headers") - if (is.null(headers)) - return(character()) - - if (!is.function(headers)) - stopf("'renv.download.headers' is not a function") - - headers <- headers(url) - if (length(headers) == 0L) - return(character()) - - if (is.list(headers)) - headers <- unlist(headers, recursive = FALSE, use.names = TRUE) - - ok <- - is.character(headers) && - is.character(names(headers)) && - all(nzchar(names(headers))) - - if (!ok) - stop("invocation of 'renv.download.headers' did not return a named character vector") - - headers - - } - - renv_bootstrap_download_cran_latest <- function(version) { - - spec <- renv_bootstrap_download_cran_latest_find(version) - type <- spec$type - repos <- spec$repos - - baseurl <- utils::contrib.url(repos = repos, type = type) - ext <- if (identical(type, "source")) - ".tar.gz" - else if (Sys.info()[["sysname"]] == "Windows") - ".zip" - else - ".tgz" - name <- sprintf("renv_%s%s", version, ext) - url <- paste(baseurl, name, sep = "/") - - destfile <- file.path(tempdir(), name) - status <- tryCatch( - renv_bootstrap_download_impl(url, destfile), - condition = identity - ) - - if (inherits(status, "condition")) - return(FALSE) - - # report success and return - destfile - - } - - renv_bootstrap_download_cran_latest_find <- function(version) { - - # check whether binaries are supported on this system - binary <- - getOption("renv.bootstrap.binary", default = TRUE) && - !identical(.Platform$pkgType, "source") && - !identical(getOption("pkgType"), "source") && - Sys.info()[["sysname"]] %in% c("Darwin", "Windows") - - types <- c(if (binary) "binary", "source") - - # iterate over types + repositories - for (type in types) { - for (repos in renv_bootstrap_repos()) { - - # build arguments for utils::available.packages() call - args <- list(type = type, repos = repos) - - # add custom headers if available -- note that - # utils::available.packages() will pass this to download.file() - if ("headers" %in% names(formals(utils::download.file))) { - headers <- renv_bootstrap_download_custom_headers(repos) - if (length(headers) && is.character(headers)) - args$headers <- headers - } - - # retrieve package database - db <- tryCatch( - as.data.frame( - do.call(utils::available.packages, args), - stringsAsFactors = FALSE - ), - error = identity - ) - - if (inherits(db, "error")) - next - - # check for compatible entry - entry <- db[db$Package %in% "renv" & db$Version %in% version, ] - if (nrow(entry) == 0) - next - - # found it; return spec to caller - spec <- list(entry = entry, type = type, repos = repos) - return(spec) - - } - } - - # if we got here, we failed to find renv - fmt <- "renv %s is not available from your declared package repositories" - stop(sprintf(fmt, version)) - - } - - renv_bootstrap_download_cran_archive <- function(version) { - - name <- sprintf("renv_%s.tar.gz", version) - repos <- renv_bootstrap_repos() - urls <- file.path(repos, "src/contrib/Archive/renv", name) - destfile <- file.path(tempdir(), name) - - for (url in urls) { - - status <- tryCatch( - renv_bootstrap_download_impl(url, destfile), - condition = identity - ) - - if (identical(status, 0L)) - return(destfile) - - } - - return(FALSE) - - } - - renv_bootstrap_download_tarball <- function(version) { - - # if the user has provided the path to a tarball via - # an environment variable, then use it - tarball <- Sys.getenv("RENV_BOOTSTRAP_TARBALL", unset = NA) - if (is.na(tarball)) - return() - - # allow directories - if (dir.exists(tarball)) { - name <- sprintf("renv_%s.tar.gz", version) - tarball <- file.path(tarball, name) - } - - # bail if it doesn't exist - if (!file.exists(tarball)) { - - # let the user know we weren't able to honour their request - fmt <- "- RENV_BOOTSTRAP_TARBALL is set (%s) but does not exist." - msg <- sprintf(fmt, tarball) - warning(msg) - - # bail - return() - - } - - catf("- Using local tarball '%s'.", tarball) - tarball - - } - - renv_bootstrap_github_token <- function() { - for (envvar in c("GITHUB_TOKEN", "GITHUB_PAT", "GH_TOKEN")) { - envval <- Sys.getenv(envvar, unset = NA) - if (!is.na(envval)) - return(envval) - } - } - - renv_bootstrap_download_github <- function(version) { - - enabled <- Sys.getenv("RENV_BOOTSTRAP_FROM_GITHUB", unset = "TRUE") - if (!identical(enabled, "TRUE")) - return(FALSE) - - # prepare download options - token <- renv_bootstrap_github_token() - if (is.null(token)) - token <- "" - - if (nzchar(Sys.which("curl")) && nzchar(token)) { - fmt <- "--location --fail --header \"Authorization: token %s\"" - extra <- sprintf(fmt, token) - saved <- options("download.file.method", "download.file.extra") - options(download.file.method = "curl", download.file.extra = extra) - on.exit(do.call(base::options, saved), add = TRUE) - } else if (nzchar(Sys.which("wget")) && nzchar(token)) { - fmt <- "--header=\"Authorization: token %s\"" - extra <- sprintf(fmt, token) - saved <- options("download.file.method", "download.file.extra") - options(download.file.method = "wget", download.file.extra = extra) - on.exit(do.call(base::options, saved), add = TRUE) - } - - url <- file.path("https://api.github.com/repos/rstudio/renv/tarball", version) - name <- sprintf("renv_%s.tar.gz", version) - destfile <- file.path(tempdir(), name) - - status <- tryCatch( - renv_bootstrap_download_impl(url, destfile), - condition = identity - ) - - if (!identical(status, 0L)) - return(FALSE) - - renv_bootstrap_download_augment(destfile) - - return(destfile) - - } - - # Add Sha to DESCRIPTION. This is stop gap until #890, after which we - # can use renv::install() to fully capture metadata. - renv_bootstrap_download_augment <- function(destfile) { - sha <- renv_bootstrap_git_extract_sha1_tar(destfile) - if (is.null(sha)) { - return() - } - - # Untar - tempdir <- tempfile("renv-github-") - on.exit(unlink(tempdir, recursive = TRUE), add = TRUE) - untar(destfile, exdir = tempdir) - pkgdir <- dir(tempdir, full.names = TRUE)[[1]] - - # Modify description - desc_path <- file.path(pkgdir, "DESCRIPTION") - desc_lines <- readLines(desc_path) - remotes_fields <- c( - "RemoteType: github", - "RemoteHost: api.github.com", - "RemoteRepo: renv", - "RemoteUsername: rstudio", - "RemotePkgRef: rstudio/renv", - paste("RemoteRef: ", sha), - paste("RemoteSha: ", sha) - ) - writeLines(c(desc_lines[desc_lines != ""], remotes_fields), con = desc_path) - - # Re-tar - local({ - old <- setwd(tempdir) - on.exit(setwd(old), add = TRUE) - - tar(destfile, compression = "gzip") - }) - invisible() - } - - # Extract the commit hash from a git archive. Git archives include the SHA1 - # hash as the comment field of the tarball pax extended header - # (see https://www.kernel.org/pub/software/scm/git/docs/git-archive.html) - # For GitHub archives this should be the first header after the default one - # (512 byte) header. - renv_bootstrap_git_extract_sha1_tar <- function(bundle) { - - # open the bundle for reading - # We use gzcon for everything because (from ?gzcon) - # > Reading from a connection which does not supply a 'gzip' magic - # > header is equivalent to reading from the original connection - conn <- gzcon(file(bundle, open = "rb", raw = TRUE)) - on.exit(close(conn)) - - # The default pax header is 512 bytes long and the first pax extended header - # with the comment should be 51 bytes long - # `52 comment=` (11 chars) + 40 byte SHA1 hash - len <- 0x200 + 0x33 - res <- rawToChar(readBin(conn, "raw", n = len)[0x201:len]) - - if (grepl("^52 comment=", res)) { - sub("52 comment=", "", res) - } else { - NULL - } - } - - renv_bootstrap_install <- function(version, tarball, library) { - - # attempt to install it into project library - dir.create(library, showWarnings = FALSE, recursive = TRUE) - output <- renv_bootstrap_install_impl(library, tarball) - - # check for successful install - status <- attr(output, "status") - if (is.null(status) || identical(status, 0L)) - return(status) - - # an error occurred; report it - header <- "installation of renv failed" - lines <- paste(rep.int("=", nchar(header)), collapse = "") - text <- paste(c(header, lines, output), collapse = "\n") - stop(text) - - } - - renv_bootstrap_install_impl <- function(library, tarball) { - - # invoke using system2 so we can capture and report output - bin <- R.home("bin") - exe <- if (Sys.info()[["sysname"]] == "Windows") "R.exe" else "R" - R <- file.path(bin, exe) - - args <- c( - "--vanilla", "CMD", "INSTALL", "--no-multiarch", - "-l", shQuote(path.expand(library)), - shQuote(path.expand(tarball)) - ) - - system2(R, args, stdout = TRUE, stderr = TRUE) - - } - - renv_bootstrap_platform_prefix_default <- function() { - - # read version component - version <- Sys.getenv("RENV_PATHS_VERSION", unset = "R-%v") - - # expand placeholders - placeholders <- list( - list("%v", format(getRversion()[1, 1:2])), - list("%V", format(getRversion()[1, 1:3])) - ) - - for (placeholder in placeholders) - version <- gsub(placeholder[[1L]], placeholder[[2L]], version, fixed = TRUE) - - # include SVN revision for development versions of R - # (to avoid sharing platform-specific artefacts with released versions of R) - devel <- - identical(R.version[["status"]], "Under development (unstable)") || - identical(R.version[["nickname"]], "Unsuffered Consequences") - - if (devel) - version <- paste(version, R.version[["svn rev"]], sep = "-r") - - version - - } - - renv_bootstrap_platform_prefix <- function() { - - # construct version prefix - version <- renv_bootstrap_platform_prefix_default() - - # build list of path components - components <- c(version, R.version$platform) - - # include prefix if provided by user - prefix <- renv_bootstrap_platform_prefix_impl() - if (!is.na(prefix) && nzchar(prefix)) - components <- c(prefix, components) - - # build prefix - paste(components, collapse = "/") - - } - - renv_bootstrap_platform_prefix_impl <- function() { - - # if an explicit prefix has been supplied, use it - prefix <- Sys.getenv("RENV_PATHS_PREFIX", unset = NA) - if (!is.na(prefix)) - return(prefix) - - # if the user has requested an automatic prefix, generate it - auto <- Sys.getenv("RENV_PATHS_PREFIX_AUTO", unset = NA) - if (is.na(auto) && getRversion() >= "4.4.0") - auto <- "TRUE" - - if (auto %in% c("TRUE", "True", "true", "1")) - return(renv_bootstrap_platform_prefix_auto()) - - # empty string on failure - "" - - } - - renv_bootstrap_platform_prefix_auto <- function() { - - prefix <- tryCatch(renv_bootstrap_platform_os(), error = identity) - if (inherits(prefix, "error") || prefix %in% "unknown") { - - msg <- paste( - "failed to infer current operating system", - "please file a bug report at https://github.com/rstudio/renv/issues", - sep = "; " - ) - - warning(msg) - - } - - prefix - - } - - renv_bootstrap_platform_os <- function() { - - sysinfo <- Sys.info() - sysname <- sysinfo[["sysname"]] - - # handle Windows + macOS up front - if (sysname == "Windows") - return("windows") - else if (sysname == "Darwin") - return("macos") - - # check for os-release files - for (file in c("/etc/os-release", "/usr/lib/os-release")) - if (file.exists(file)) - return(renv_bootstrap_platform_os_via_os_release(file, sysinfo)) - - # check for redhat-release files - if (file.exists("/etc/redhat-release")) - return(renv_bootstrap_platform_os_via_redhat_release()) - - "unknown" - - } - - renv_bootstrap_platform_os_via_os_release <- function(file, sysinfo) { - - # read /etc/os-release - release <- utils::read.table( - file = file, - sep = "=", - quote = c("\"", "'"), - col.names = c("Key", "Value"), - comment.char = "#", - stringsAsFactors = FALSE - ) - - vars <- as.list(release$Value) - names(vars) <- release$Key - - # get os name - os <- tolower(sysinfo[["sysname"]]) - - # read id - id <- "unknown" - for (field in c("ID", "ID_LIKE")) { - if (field %in% names(vars) && nzchar(vars[[field]])) { - id <- vars[[field]] - break - } - } - - # read version - version <- "unknown" - for (field in c("UBUNTU_CODENAME", "VERSION_CODENAME", "VERSION_ID", "BUILD_ID")) { - if (field %in% names(vars) && nzchar(vars[[field]])) { - version <- vars[[field]] - break - } - } - - # join together - paste(c(os, id, version), collapse = "-") - - } - - renv_bootstrap_platform_os_via_redhat_release <- function() { - - # read /etc/redhat-release - contents <- readLines("/etc/redhat-release", warn = FALSE) - - # infer id - id <- if (grepl("centos", contents, ignore.case = TRUE)) - "centos" - else if (grepl("redhat", contents, ignore.case = TRUE)) - "redhat" - else - "unknown" - - # try to find a version component (very hacky) - version <- "unknown" - - parts <- strsplit(contents, "[[:space:]]")[[1L]] - for (part in parts) { - - nv <- tryCatch(numeric_version(part), error = identity) - if (inherits(nv, "error")) - next - - version <- nv[1, 1] - break - - } - - paste(c("linux", id, version), collapse = "-") - - } - - renv_bootstrap_library_root_name <- function(project) { - - # use project name as-is if requested - asis <- Sys.getenv("RENV_PATHS_LIBRARY_ROOT_ASIS", unset = "FALSE") - if (asis) - return(basename(project)) - - # otherwise, disambiguate based on project's path - id <- substring(renv_bootstrap_hash_text(project), 1L, 8L) - paste(basename(project), id, sep = "-") - - } - - renv_bootstrap_library_root <- function(project) { - - prefix <- renv_bootstrap_profile_prefix() - - path <- Sys.getenv("RENV_PATHS_LIBRARY", unset = NA) - if (!is.na(path)) - return(paste(c(path, prefix), collapse = "/")) - - path <- renv_bootstrap_library_root_impl(project) - if (!is.null(path)) { - name <- renv_bootstrap_library_root_name(project) - return(paste(c(path, prefix, name), collapse = "/")) - } - - renv_bootstrap_paths_renv("library", project = project) - - } - - renv_bootstrap_library_root_impl <- function(project) { - - root <- Sys.getenv("RENV_PATHS_LIBRARY_ROOT", unset = NA) - if (!is.na(root)) - return(root) - - type <- renv_bootstrap_project_type(project) - if (identical(type, "package")) { - userdir <- renv_bootstrap_user_dir() - return(file.path(userdir, "library")) - } - - } - - renv_bootstrap_validate_version <- function(version, description = NULL) { - - # resolve description file - # - # avoid passing lib.loc to `packageDescription()` below, since R will - # use the loaded version of the package by default anyhow. note that - # this function should only be called after 'renv' is loaded - # https://github.com/rstudio/renv/issues/1625 - description <- description %||% packageDescription("renv") - - # check whether requested version 'version' matches loaded version of renv - sha <- attr(version, "sha", exact = TRUE) - valid <- if (!is.null(sha)) - renv_bootstrap_validate_version_dev(sha, description) - else - renv_bootstrap_validate_version_release(version, description) - - if (valid) - return(TRUE) - - # the loaded version of renv doesn't match the requested version; - # give the user instructions on how to proceed - dev <- identical(description[["RemoteType"]], "github") - remote <- if (dev) - paste("rstudio/renv", description[["RemoteSha"]], sep = "@") - else - paste("renv", description[["Version"]], sep = "@") - - # display both loaded version + sha if available - friendly <- renv_bootstrap_version_friendly( - version = description[["Version"]], - sha = if (dev) description[["RemoteSha"]] - ) - - fmt <- heredoc(" - renv %1$s was loaded from project library, but this project is configured to use renv %2$s. - - Use `renv::record(\"%3$s\")` to record renv %1$s in the lockfile. - - Use `renv::restore(packages = \"renv\")` to install renv %2$s into the project library. - ") - catf(fmt, friendly, renv_bootstrap_version_friendly(version), remote) - - FALSE - - } - - renv_bootstrap_validate_version_dev <- function(version, description) { - - expected <- description[["RemoteSha"]] - if (!is.character(expected)) - return(FALSE) - - pattern <- sprintf("^\\Q%s\\E", version) - grepl(pattern, expected, perl = TRUE) - - } - - renv_bootstrap_validate_version_release <- function(version, description) { - expected <- description[["Version"]] - is.character(expected) && identical(expected, version) - } - - renv_bootstrap_hash_text <- function(text) { - - hashfile <- tempfile("renv-hash-") - on.exit(unlink(hashfile), add = TRUE) - - writeLines(text, con = hashfile) - tools::md5sum(hashfile) - - } - - renv_bootstrap_load <- function(project, libpath, version) { - - # try to load renv from the project library - if (!requireNamespace("renv", lib.loc = libpath, quietly = TRUE)) - return(FALSE) - - # warn if the version of renv loaded does not match - renv_bootstrap_validate_version(version) - - # execute renv load hooks, if any - hooks <- getHook("renv::autoload") - for (hook in hooks) - if (is.function(hook)) - tryCatch(hook(), error = warnify) - - # load the project - renv::load(project) - - TRUE - - } - - renv_bootstrap_profile_load <- function(project) { - - # if RENV_PROFILE is already set, just use that - profile <- Sys.getenv("RENV_PROFILE", unset = NA) - if (!is.na(profile) && nzchar(profile)) - return(profile) - - # check for a profile file (nothing to do if it doesn't exist) - path <- renv_bootstrap_paths_renv("profile", profile = FALSE, project = project) - if (!file.exists(path)) - return(NULL) - - # read the profile, and set it if it exists - contents <- readLines(path, warn = FALSE) - if (length(contents) == 0L) - return(NULL) - - # set RENV_PROFILE - profile <- contents[[1L]] - if (!profile %in% c("", "default")) - Sys.setenv(RENV_PROFILE = profile) - - profile - - } - - renv_bootstrap_profile_prefix <- function() { - profile <- renv_bootstrap_profile_get() - if (!is.null(profile)) - return(file.path("profiles", profile, "renv")) - } - - renv_bootstrap_profile_get <- function() { - profile <- Sys.getenv("RENV_PROFILE", unset = "") - renv_bootstrap_profile_normalize(profile) - } - - renv_bootstrap_profile_set <- function(profile) { - profile <- renv_bootstrap_profile_normalize(profile) - if (is.null(profile)) - Sys.unsetenv("RENV_PROFILE") - else - Sys.setenv(RENV_PROFILE = profile) - } - - renv_bootstrap_profile_normalize <- function(profile) { - - if (is.null(profile) || profile %in% c("", "default")) - return(NULL) - - profile - - } - - renv_bootstrap_path_absolute <- function(path) { - - substr(path, 1L, 1L) %in% c("~", "/", "\\") || ( - substr(path, 1L, 1L) %in% c(letters, LETTERS) && - substr(path, 2L, 3L) %in% c(":/", ":\\") - ) - - } - - renv_bootstrap_paths_renv <- function(..., profile = TRUE, project = NULL) { - renv <- Sys.getenv("RENV_PATHS_RENV", unset = "renv") - root <- if (renv_bootstrap_path_absolute(renv)) NULL else project - prefix <- if (profile) renv_bootstrap_profile_prefix() - components <- c(root, renv, prefix, ...) - paste(components, collapse = "/") - } - - renv_bootstrap_project_type <- function(path) { - - descpath <- file.path(path, "DESCRIPTION") - if (!file.exists(descpath)) - return("unknown") - - desc <- tryCatch( - read.dcf(descpath, all = TRUE), - error = identity - ) - - if (inherits(desc, "error")) - return("unknown") - - type <- desc$Type - if (!is.null(type)) - return(tolower(type)) - - package <- desc$Package - if (!is.null(package)) - return("package") - - "unknown" - - } - - renv_bootstrap_user_dir <- function() { - dir <- renv_bootstrap_user_dir_impl() - path.expand(chartr("\\", "/", dir)) - } - - renv_bootstrap_user_dir_impl <- function() { - - # use local override if set - override <- getOption("renv.userdir.override") - if (!is.null(override)) - return(override) - - # use R_user_dir if available - tools <- asNamespace("tools") - if (is.function(tools$R_user_dir)) - return(tools$R_user_dir("renv", "cache")) - - # try using our own backfill for older versions of R - envvars <- c("R_USER_CACHE_DIR", "XDG_CACHE_HOME") - for (envvar in envvars) { - root <- Sys.getenv(envvar, unset = NA) - if (!is.na(root)) - return(file.path(root, "R/renv")) - } - - # use platform-specific default fallbacks - if (Sys.info()[["sysname"]] == "Windows") - file.path(Sys.getenv("LOCALAPPDATA"), "R/cache/R/renv") - else if (Sys.info()[["sysname"]] == "Darwin") - "~/Library/Caches/org.R-project.R/R/renv" - else - "~/.cache/R/renv" - - } - - renv_bootstrap_version_friendly <- function(version, shafmt = NULL, sha = NULL) { - sha <- sha %||% attr(version, "sha", exact = TRUE) - parts <- c(version, sprintf(shafmt %||% " [sha: %s]", substring(sha, 1L, 7L))) - paste(parts, collapse = "") - } - - renv_bootstrap_exec <- function(project, libpath, version) { - if (!renv_bootstrap_load(project, libpath, version)) - renv_bootstrap_run(project, libpath, version) - } - - renv_bootstrap_run <- function(project, libpath, version) { - - # perform bootstrap - bootstrap(version, libpath) - - # exit early if we're just testing bootstrap - if (!is.na(Sys.getenv("RENV_BOOTSTRAP_INSTALL_ONLY", unset = NA))) - return(TRUE) - - # try again to load - if (requireNamespace("renv", lib.loc = libpath, quietly = TRUE)) { - return(renv::load(project = project)) - } - - # failed to download or load renv; warn the user - msg <- c( - "Failed to find an renv installation: the project will not be loaded.", - "Use `renv::activate()` to re-initialize the project." - ) - - warning(paste(msg, collapse = "\n"), call. = FALSE) - - } - - renv_json_read <- function(file = NULL, text = NULL) { - - jlerr <- NULL - - # if jsonlite is loaded, use that instead - if ("jsonlite" %in% loadedNamespaces()) { - - json <- tryCatch(renv_json_read_jsonlite(file, text), error = identity) - if (!inherits(json, "error")) - return(json) - - jlerr <- json - - } - - # otherwise, fall back to the default JSON reader - json <- tryCatch(renv_json_read_default(file, text), error = identity) - if (!inherits(json, "error")) - return(json) - - # report an error - if (!is.null(jlerr)) - stop(jlerr) - else - stop(json) - - } - - renv_json_read_jsonlite <- function(file = NULL, text = NULL) { - text <- paste(text %||% readLines(file, warn = FALSE), collapse = "\n") - jsonlite::fromJSON(txt = text, simplifyVector = FALSE) - } - - renv_json_read_patterns <- function() { - - list( - - # objects - list("{", "\t\n\tobject(\t\n\t", TRUE), - list("}", "\t\n\t)\t\n\t", TRUE), - - # arrays - list("[", "\t\n\tarray(\t\n\t", TRUE), - list("]", "\n\t\n)\n\t\n", TRUE), - - # maps - list(":", "\t\n\t=\t\n\t", TRUE), - - # newlines - list("\\u000a", "\n", FALSE) - - ) - - } - - renv_json_read_envir <- function() { - - envir <- new.env(parent = emptyenv()) - - envir[["+"]] <- `+` - envir[["-"]] <- `-` - - envir[["object"]] <- function(...) { - result <- list(...) - names(result) <- as.character(names(result)) - result - } - - envir[["array"]] <- list - - envir[["true"]] <- TRUE - envir[["false"]] <- FALSE - envir[["null"]] <- NULL - - envir - - } - - renv_json_read_remap <- function(object, patterns) { - - # repair names if necessary - if (!is.null(names(object))) { - - nms <- names(object) - for (pattern in patterns) - nms <- gsub(pattern[[2L]], pattern[[1L]], nms, fixed = TRUE) - names(object) <- nms - - } - - # repair strings if necessary - if (is.character(object)) { - for (pattern in patterns) - object <- gsub(pattern[[2L]], pattern[[1L]], object, fixed = TRUE) - } - - # recurse for other objects - if (is.recursive(object)) - for (i in seq_along(object)) - object[i] <- list(renv_json_read_remap(object[[i]], patterns)) - - # return remapped object - object - - } - - renv_json_read_default <- function(file = NULL, text = NULL) { - - # read json text - text <- paste(text %||% readLines(file, warn = FALSE), collapse = "\n") - - # convert into something the R parser will understand - patterns <- renv_json_read_patterns() - transformed <- text - for (pattern in patterns) - transformed <- gsub(pattern[[1L]], pattern[[2L]], transformed, fixed = TRUE) - - # parse it - rfile <- tempfile("renv-json-", fileext = ".R") - on.exit(unlink(rfile), add = TRUE) - writeLines(transformed, con = rfile) - json <- parse(rfile, keep.source = FALSE, srcfile = NULL)[[1L]] - - # evaluate in safe environment - result <- eval(json, envir = renv_json_read_envir()) - - # fix up strings if necessary -- do so only with reversible patterns - patterns <- Filter(function(pattern) pattern[[3L]], patterns) - renv_json_read_remap(result, patterns) - - } - - - # load the renv profile, if any - renv_bootstrap_profile_load(project) - - # construct path to library root - root <- renv_bootstrap_library_root(project) - - # construct library prefix for platform - prefix <- renv_bootstrap_platform_prefix() - - # construct full libpath - libpath <- file.path(root, prefix) - - # run bootstrap code - renv_bootstrap_exec(project, libpath, version) - - invisible() - -}) + +local({ + + # the requested version of renv + version <- "1.1.4" + attr(version, "sha") <- NULL + + # the project directory + project <- Sys.getenv("RENV_PROJECT") + if (!nzchar(project)) + project <- getwd() + + # use start-up diagnostics if enabled + diagnostics <- Sys.getenv("RENV_STARTUP_DIAGNOSTICS", unset = "FALSE") + if (diagnostics) { + start <- Sys.time() + profile <- tempfile("renv-startup-", fileext = ".Rprof") + utils::Rprof(profile) + on.exit({ + utils::Rprof(NULL) + elapsed <- signif(difftime(Sys.time(), start, units = "auto"), digits = 2L) + writeLines(sprintf("- renv took %s to run the autoloader.", format(elapsed))) + writeLines(sprintf("- Profile: %s", profile)) + print(utils::summaryRprof(profile)) + }, add = TRUE) + } + + # figure out whether the autoloader is enabled + enabled <- local({ + + # first, check config option + override <- getOption("renv.config.autoloader.enabled") + if (!is.null(override)) + return(override) + + # if we're being run in a context where R_LIBS is already set, + # don't load -- presumably we're being run as a sub-process and + # the parent process has already set up library paths for us + rcmd <- Sys.getenv("R_CMD", unset = NA) + rlibs <- Sys.getenv("R_LIBS", unset = NA) + if (!is.na(rlibs) && !is.na(rcmd)) + return(FALSE) + + # next, check environment variables + # prefer using the configuration one in the future + envvars <- c( + "RENV_CONFIG_AUTOLOADER_ENABLED", + "RENV_AUTOLOADER_ENABLED", + "RENV_ACTIVATE_PROJECT" + ) + + for (envvar in envvars) { + envval <- Sys.getenv(envvar, unset = NA) + if (!is.na(envval)) + return(tolower(envval) %in% c("true", "t", "1")) + } + + # enable by default + TRUE + + }) + + # bail if we're not enabled + if (!enabled) { + + # if we're not enabled, we might still need to manually load + # the user profile here + profile <- Sys.getenv("R_PROFILE_USER", unset = "~/.Rprofile") + if (file.exists(profile)) { + cfg <- Sys.getenv("RENV_CONFIG_USER_PROFILE", unset = "TRUE") + if (tolower(cfg) %in% c("true", "t", "1")) + sys.source(profile, envir = globalenv()) + } + + return(FALSE) + + } + + # avoid recursion + if (identical(getOption("renv.autoloader.running"), TRUE)) { + warning("ignoring recursive attempt to run renv autoloader") + return(invisible(TRUE)) + } + + # signal that we're loading renv during R startup + options(renv.autoloader.running = TRUE) + on.exit(options(renv.autoloader.running = NULL), add = TRUE) + + # signal that we've consented to use renv + options(renv.consent = TRUE) + + # load the 'utils' package eagerly -- this ensures that renv shims, which + # mask 'utils' packages, will come first on the search path + library(utils, lib.loc = .Library) + + # unload renv if it's already been loaded + if ("renv" %in% loadedNamespaces()) + unloadNamespace("renv") + + # load bootstrap tools + ansify <- function(text) { + if (renv_ansify_enabled()) + renv_ansify_enhanced(text) + else + renv_ansify_default(text) + } + + renv_ansify_enabled <- function() { + + override <- Sys.getenv("RENV_ANSIFY_ENABLED", unset = NA) + if (!is.na(override)) + return(as.logical(override)) + + pane <- Sys.getenv("RSTUDIO_CHILD_PROCESS_PANE", unset = NA) + if (identical(pane, "build")) + return(FALSE) + + testthat <- Sys.getenv("TESTTHAT", unset = "false") + if (tolower(testthat) %in% "true") + return(FALSE) + + iderun <- Sys.getenv("R_CLI_HAS_HYPERLINK_IDE_RUN", unset = "false") + if (tolower(iderun) %in% "false") + return(FALSE) + + TRUE + + } + + renv_ansify_default <- function(text) { + text + } + + renv_ansify_enhanced <- function(text) { + + # R help links + pattern <- "`\\?(renv::(?:[^`])+)`" + replacement <- "`\033]8;;x-r-help:\\1\a?\\1\033]8;;\a`" + text <- gsub(pattern, replacement, text, perl = TRUE) + + # runnable code + pattern <- "`(renv::(?:[^`])+)`" + replacement <- "`\033]8;;x-r-run:\\1\a\\1\033]8;;\a`" + text <- gsub(pattern, replacement, text, perl = TRUE) + + # return ansified text + text + + } + + renv_ansify_init <- function() { + + envir <- renv_envir_self() + if (renv_ansify_enabled()) + assign("ansify", renv_ansify_enhanced, envir = envir) + else + assign("ansify", renv_ansify_default, envir = envir) + + } + + `%||%` <- function(x, y) { + if (is.null(x)) y else x + } + + catf <- function(fmt, ..., appendLF = TRUE) { + + quiet <- getOption("renv.bootstrap.quiet", default = FALSE) + if (quiet) + return(invisible()) + + msg <- sprintf(fmt, ...) + cat(msg, file = stdout(), sep = if (appendLF) "\n" else "") + + invisible(msg) + + } + + header <- function(label, + ..., + prefix = "#", + suffix = "-", + n = min(getOption("width"), 78)) + { + label <- sprintf(label, ...) + n <- max(n - nchar(label) - nchar(prefix) - 2L, 8L) + if (n <= 0) + return(paste(prefix, label)) + + tail <- paste(rep.int(suffix, n), collapse = "") + paste0(prefix, " ", label, " ", tail) + + } + + heredoc <- function(text, leave = 0) { + + # remove leading, trailing whitespace + trimmed <- gsub("^\\s*\\n|\\n\\s*$", "", text) + + # split into lines + lines <- strsplit(trimmed, "\n", fixed = TRUE)[[1L]] + + # compute common indent + indent <- regexpr("[^[:space:]]", lines) + common <- min(setdiff(indent, -1L)) - leave + text <- paste(substring(lines, common), collapse = "\n") + + # substitute in ANSI links for executable renv code + ansify(text) + + } + + bootstrap <- function(version, library) { + + friendly <- renv_bootstrap_version_friendly(version) + section <- header(sprintf("Bootstrapping renv %s", friendly)) + catf(section) + + # attempt to download renv + catf("- Downloading renv ... ", appendLF = FALSE) + withCallingHandlers( + tarball <- renv_bootstrap_download(version), + error = function(err) { + catf("FAILED") + stop("failed to download:\n", conditionMessage(err)) + } + ) + catf("OK") + on.exit(unlink(tarball), add = TRUE) + + # now attempt to install + catf("- Installing renv ... ", appendLF = FALSE) + withCallingHandlers( + status <- renv_bootstrap_install(version, tarball, library), + error = function(err) { + catf("FAILED") + stop("failed to install:\n", conditionMessage(err)) + } + ) + catf("OK") + + # add empty line to break up bootstrapping from normal output + catf("") + + return(invisible()) + } + + renv_bootstrap_tests_running <- function() { + getOption("renv.tests.running", default = FALSE) + } + + renv_bootstrap_repos <- function() { + + # get CRAN repository + cran <- getOption("renv.repos.cran", "https://cloud.r-project.org") + + # check for repos override + repos <- Sys.getenv("RENV_CONFIG_REPOS_OVERRIDE", unset = NA) + if (!is.na(repos)) { + + # check for RSPM; if set, use a fallback repository for renv + rspm <- Sys.getenv("RSPM", unset = NA) + if (identical(rspm, repos)) + repos <- c(RSPM = rspm, CRAN = cran) + + return(repos) + + } + + # check for lockfile repositories + repos <- tryCatch(renv_bootstrap_repos_lockfile(), error = identity) + if (!inherits(repos, "error") && length(repos)) + return(repos) + + # retrieve current repos + repos <- getOption("repos") + + # ensure @CRAN@ entries are resolved + repos[repos == "@CRAN@"] <- cran + + # add in renv.bootstrap.repos if set + default <- c(FALLBACK = "https://cloud.r-project.org") + extra <- getOption("renv.bootstrap.repos", default = default) + repos <- c(repos, extra) + + # remove duplicates that might've snuck in + dupes <- duplicated(repos) | duplicated(names(repos)) + repos[!dupes] + + } + + renv_bootstrap_repos_lockfile <- function() { + + lockpath <- Sys.getenv("RENV_PATHS_LOCKFILE", unset = "renv.lock") + if (!file.exists(lockpath)) + return(NULL) + + lockfile <- tryCatch(renv_json_read(lockpath), error = identity) + if (inherits(lockfile, "error")) { + warning(lockfile) + return(NULL) + } + + repos <- lockfile$R$Repositories + if (length(repos) == 0) + return(NULL) + + keys <- vapply(repos, `[[`, "Name", FUN.VALUE = character(1)) + vals <- vapply(repos, `[[`, "URL", FUN.VALUE = character(1)) + names(vals) <- keys + + return(vals) + + } + + renv_bootstrap_download <- function(version) { + + sha <- attr(version, "sha", exact = TRUE) + + methods <- if (!is.null(sha)) { + + # attempting to bootstrap a development version of renv + c( + function() renv_bootstrap_download_tarball(sha), + function() renv_bootstrap_download_github(sha) + ) + + } else { + + # attempting to bootstrap a release version of renv + c( + function() renv_bootstrap_download_tarball(version), + function() renv_bootstrap_download_cran_latest(version), + function() renv_bootstrap_download_cran_archive(version) + ) + + } + + for (method in methods) { + path <- tryCatch(method(), error = identity) + if (is.character(path) && file.exists(path)) + return(path) + } + + stop("All download methods failed") + + } + + renv_bootstrap_download_impl <- function(url, destfile) { + + mode <- "wb" + + # https://bugs.r-project.org/bugzilla/show_bug.cgi?id=17715 + fixup <- + Sys.info()[["sysname"]] == "Windows" && + substring(url, 1L, 5L) == "file:" + + if (fixup) + mode <- "w+b" + + args <- list( + url = url, + destfile = destfile, + mode = mode, + quiet = TRUE + ) + + if ("headers" %in% names(formals(utils::download.file))) { + headers <- renv_bootstrap_download_custom_headers(url) + if (length(headers) && is.character(headers)) + args$headers <- headers + } + + do.call(utils::download.file, args) + + } + + renv_bootstrap_download_custom_headers <- function(url) { + + headers <- getOption("renv.download.headers") + if (is.null(headers)) + return(character()) + + if (!is.function(headers)) + stopf("'renv.download.headers' is not a function") + + headers <- headers(url) + if (length(headers) == 0L) + return(character()) + + if (is.list(headers)) + headers <- unlist(headers, recursive = FALSE, use.names = TRUE) + + ok <- + is.character(headers) && + is.character(names(headers)) && + all(nzchar(names(headers))) + + if (!ok) + stop("invocation of 'renv.download.headers' did not return a named character vector") + + headers + + } + + renv_bootstrap_download_cran_latest <- function(version) { + + spec <- renv_bootstrap_download_cran_latest_find(version) + type <- spec$type + repos <- spec$repos + + baseurl <- utils::contrib.url(repos = repos, type = type) + ext <- if (identical(type, "source")) + ".tar.gz" + else if (Sys.info()[["sysname"]] == "Windows") + ".zip" + else + ".tgz" + name <- sprintf("renv_%s%s", version, ext) + url <- paste(baseurl, name, sep = "/") + + destfile <- file.path(tempdir(), name) + status <- tryCatch( + renv_bootstrap_download_impl(url, destfile), + condition = identity + ) + + if (inherits(status, "condition")) + return(FALSE) + + # report success and return + destfile + + } + + renv_bootstrap_download_cran_latest_find <- function(version) { + + # check whether binaries are supported on this system + binary <- + getOption("renv.bootstrap.binary", default = TRUE) && + !identical(.Platform$pkgType, "source") && + !identical(getOption("pkgType"), "source") && + Sys.info()[["sysname"]] %in% c("Darwin", "Windows") + + types <- c(if (binary) "binary", "source") + + # iterate over types + repositories + for (type in types) { + for (repos in renv_bootstrap_repos()) { + + # build arguments for utils::available.packages() call + args <- list(type = type, repos = repos) + + # add custom headers if available -- note that + # utils::available.packages() will pass this to download.file() + if ("headers" %in% names(formals(utils::download.file))) { + headers <- renv_bootstrap_download_custom_headers(repos) + if (length(headers) && is.character(headers)) + args$headers <- headers + } + + # retrieve package database + db <- tryCatch( + as.data.frame( + do.call(utils::available.packages, args), + stringsAsFactors = FALSE + ), + error = identity + ) + + if (inherits(db, "error")) + next + + # check for compatible entry + entry <- db[db$Package %in% "renv" & db$Version %in% version, ] + if (nrow(entry) == 0) + next + + # found it; return spec to caller + spec <- list(entry = entry, type = type, repos = repos) + return(spec) + + } + } + + # if we got here, we failed to find renv + fmt <- "renv %s is not available from your declared package repositories" + stop(sprintf(fmt, version)) + + } + + renv_bootstrap_download_cran_archive <- function(version) { + + name <- sprintf("renv_%s.tar.gz", version) + repos <- renv_bootstrap_repos() + urls <- file.path(repos, "src/contrib/Archive/renv", name) + destfile <- file.path(tempdir(), name) + + for (url in urls) { + + status <- tryCatch( + renv_bootstrap_download_impl(url, destfile), + condition = identity + ) + + if (identical(status, 0L)) + return(destfile) + + } + + return(FALSE) + + } + + renv_bootstrap_download_tarball <- function(version) { + + # if the user has provided the path to a tarball via + # an environment variable, then use it + tarball <- Sys.getenv("RENV_BOOTSTRAP_TARBALL", unset = NA) + if (is.na(tarball)) + return() + + # allow directories + if (dir.exists(tarball)) { + name <- sprintf("renv_%s.tar.gz", version) + tarball <- file.path(tarball, name) + } + + # bail if it doesn't exist + if (!file.exists(tarball)) { + + # let the user know we weren't able to honour their request + fmt <- "- RENV_BOOTSTRAP_TARBALL is set (%s) but does not exist." + msg <- sprintf(fmt, tarball) + warning(msg) + + # bail + return() + + } + + catf("- Using local tarball '%s'.", tarball) + tarball + + } + + renv_bootstrap_github_token <- function() { + for (envvar in c("GITHUB_TOKEN", "GITHUB_PAT", "GH_TOKEN")) { + envval <- Sys.getenv(envvar, unset = NA) + if (!is.na(envval)) + return(envval) + } + } + + renv_bootstrap_download_github <- function(version) { + + enabled <- Sys.getenv("RENV_BOOTSTRAP_FROM_GITHUB", unset = "TRUE") + if (!identical(enabled, "TRUE")) + return(FALSE) + + # prepare download options + token <- renv_bootstrap_github_token() + if (is.null(token)) + token <- "" + + if (nzchar(Sys.which("curl")) && nzchar(token)) { + fmt <- "--location --fail --header \"Authorization: token %s\"" + extra <- sprintf(fmt, token) + saved <- options("download.file.method", "download.file.extra") + options(download.file.method = "curl", download.file.extra = extra) + on.exit(do.call(base::options, saved), add = TRUE) + } else if (nzchar(Sys.which("wget")) && nzchar(token)) { + fmt <- "--header=\"Authorization: token %s\"" + extra <- sprintf(fmt, token) + saved <- options("download.file.method", "download.file.extra") + options(download.file.method = "wget", download.file.extra = extra) + on.exit(do.call(base::options, saved), add = TRUE) + } + + url <- file.path("https://api.github.com/repos/rstudio/renv/tarball", version) + name <- sprintf("renv_%s.tar.gz", version) + destfile <- file.path(tempdir(), name) + + status <- tryCatch( + renv_bootstrap_download_impl(url, destfile), + condition = identity + ) + + if (!identical(status, 0L)) + return(FALSE) + + renv_bootstrap_download_augment(destfile) + + return(destfile) + + } + + # Add Sha to DESCRIPTION. This is stop gap until #890, after which we + # can use renv::install() to fully capture metadata. + renv_bootstrap_download_augment <- function(destfile) { + sha <- renv_bootstrap_git_extract_sha1_tar(destfile) + if (is.null(sha)) { + return() + } + + # Untar + tempdir <- tempfile("renv-github-") + on.exit(unlink(tempdir, recursive = TRUE), add = TRUE) + untar(destfile, exdir = tempdir) + pkgdir <- dir(tempdir, full.names = TRUE)[[1]] + + # Modify description + desc_path <- file.path(pkgdir, "DESCRIPTION") + desc_lines <- readLines(desc_path) + remotes_fields <- c( + "RemoteType: github", + "RemoteHost: api.github.com", + "RemoteRepo: renv", + "RemoteUsername: rstudio", + "RemotePkgRef: rstudio/renv", + paste("RemoteRef: ", sha), + paste("RemoteSha: ", sha) + ) + writeLines(c(desc_lines[desc_lines != ""], remotes_fields), con = desc_path) + + # Re-tar + local({ + old <- setwd(tempdir) + on.exit(setwd(old), add = TRUE) + + tar(destfile, compression = "gzip") + }) + invisible() + } + + # Extract the commit hash from a git archive. Git archives include the SHA1 + # hash as the comment field of the tarball pax extended header + # (see https://www.kernel.org/pub/software/scm/git/docs/git-archive.html) + # For GitHub archives this should be the first header after the default one + # (512 byte) header. + renv_bootstrap_git_extract_sha1_tar <- function(bundle) { + + # open the bundle for reading + # We use gzcon for everything because (from ?gzcon) + # > Reading from a connection which does not supply a 'gzip' magic + # > header is equivalent to reading from the original connection + conn <- gzcon(file(bundle, open = "rb", raw = TRUE)) + on.exit(close(conn)) + + # The default pax header is 512 bytes long and the first pax extended header + # with the comment should be 51 bytes long + # `52 comment=` (11 chars) + 40 byte SHA1 hash + len <- 0x200 + 0x33 + res <- rawToChar(readBin(conn, "raw", n = len)[0x201:len]) + + if (grepl("^52 comment=", res)) { + sub("52 comment=", "", res) + } else { + NULL + } + } + + renv_bootstrap_install <- function(version, tarball, library) { + + # attempt to install it into project library + dir.create(library, showWarnings = FALSE, recursive = TRUE) + output <- renv_bootstrap_install_impl(library, tarball) + + # check for successful install + status <- attr(output, "status") + if (is.null(status) || identical(status, 0L)) + return(status) + + # an error occurred; report it + header <- "installation of renv failed" + lines <- paste(rep.int("=", nchar(header)), collapse = "") + text <- paste(c(header, lines, output), collapse = "\n") + stop(text) + + } + + renv_bootstrap_install_impl <- function(library, tarball) { + + # invoke using system2 so we can capture and report output + bin <- R.home("bin") + exe <- if (Sys.info()[["sysname"]] == "Windows") "R.exe" else "R" + R <- file.path(bin, exe) + + args <- c( + "--vanilla", "CMD", "INSTALL", "--no-multiarch", + "-l", shQuote(path.expand(library)), + shQuote(path.expand(tarball)) + ) + + system2(R, args, stdout = TRUE, stderr = TRUE) + + } + + renv_bootstrap_platform_prefix_default <- function() { + + # read version component + version <- Sys.getenv("RENV_PATHS_VERSION", unset = "R-%v") + + # expand placeholders + placeholders <- list( + list("%v", format(getRversion()[1, 1:2])), + list("%V", format(getRversion()[1, 1:3])) + ) + + for (placeholder in placeholders) + version <- gsub(placeholder[[1L]], placeholder[[2L]], version, fixed = TRUE) + + # include SVN revision for development versions of R + # (to avoid sharing platform-specific artefacts with released versions of R) + devel <- + identical(R.version[["status"]], "Under development (unstable)") || + identical(R.version[["nickname"]], "Unsuffered Consequences") + + if (devel) + version <- paste(version, R.version[["svn rev"]], sep = "-r") + + version + + } + + renv_bootstrap_platform_prefix <- function() { + + # construct version prefix + version <- renv_bootstrap_platform_prefix_default() + + # build list of path components + components <- c(version, R.version$platform) + + # include prefix if provided by user + prefix <- renv_bootstrap_platform_prefix_impl() + if (!is.na(prefix) && nzchar(prefix)) + components <- c(prefix, components) + + # build prefix + paste(components, collapse = "/") + + } + + renv_bootstrap_platform_prefix_impl <- function() { + + # if an explicit prefix has been supplied, use it + prefix <- Sys.getenv("RENV_PATHS_PREFIX", unset = NA) + if (!is.na(prefix)) + return(prefix) + + # if the user has requested an automatic prefix, generate it + auto <- Sys.getenv("RENV_PATHS_PREFIX_AUTO", unset = NA) + if (is.na(auto) && getRversion() >= "4.4.0") + auto <- "TRUE" + + if (auto %in% c("TRUE", "True", "true", "1")) + return(renv_bootstrap_platform_prefix_auto()) + + # empty string on failure + "" + + } + + renv_bootstrap_platform_prefix_auto <- function() { + + prefix <- tryCatch(renv_bootstrap_platform_os(), error = identity) + if (inherits(prefix, "error") || prefix %in% "unknown") { + + msg <- paste( + "failed to infer current operating system", + "please file a bug report at https://github.com/rstudio/renv/issues", + sep = "; " + ) + + warning(msg) + + } + + prefix + + } + + renv_bootstrap_platform_os <- function() { + + sysinfo <- Sys.info() + sysname <- sysinfo[["sysname"]] + + # handle Windows + macOS up front + if (sysname == "Windows") + return("windows") + else if (sysname == "Darwin") + return("macos") + + # check for os-release files + for (file in c("/etc/os-release", "/usr/lib/os-release")) + if (file.exists(file)) + return(renv_bootstrap_platform_os_via_os_release(file, sysinfo)) + + # check for redhat-release files + if (file.exists("/etc/redhat-release")) + return(renv_bootstrap_platform_os_via_redhat_release()) + + "unknown" + + } + + renv_bootstrap_platform_os_via_os_release <- function(file, sysinfo) { + + # read /etc/os-release + release <- utils::read.table( + file = file, + sep = "=", + quote = c("\"", "'"), + col.names = c("Key", "Value"), + comment.char = "#", + stringsAsFactors = FALSE + ) + + vars <- as.list(release$Value) + names(vars) <- release$Key + + # get os name + os <- tolower(sysinfo[["sysname"]]) + + # read id + id <- "unknown" + for (field in c("ID", "ID_LIKE")) { + if (field %in% names(vars) && nzchar(vars[[field]])) { + id <- vars[[field]] + break + } + } + + # read version + version <- "unknown" + for (field in c("UBUNTU_CODENAME", "VERSION_CODENAME", "VERSION_ID", "BUILD_ID")) { + if (field %in% names(vars) && nzchar(vars[[field]])) { + version <- vars[[field]] + break + } + } + + # join together + paste(c(os, id, version), collapse = "-") + + } + + renv_bootstrap_platform_os_via_redhat_release <- function() { + + # read /etc/redhat-release + contents <- readLines("/etc/redhat-release", warn = FALSE) + + # infer id + id <- if (grepl("centos", contents, ignore.case = TRUE)) + "centos" + else if (grepl("redhat", contents, ignore.case = TRUE)) + "redhat" + else + "unknown" + + # try to find a version component (very hacky) + version <- "unknown" + + parts <- strsplit(contents, "[[:space:]]")[[1L]] + for (part in parts) { + + nv <- tryCatch(numeric_version(part), error = identity) + if (inherits(nv, "error")) + next + + version <- nv[1, 1] + break + + } + + paste(c("linux", id, version), collapse = "-") + + } + + renv_bootstrap_library_root_name <- function(project) { + + # use project name as-is if requested + asis <- Sys.getenv("RENV_PATHS_LIBRARY_ROOT_ASIS", unset = "FALSE") + if (asis) + return(basename(project)) + + # otherwise, disambiguate based on project's path + id <- substring(renv_bootstrap_hash_text(project), 1L, 8L) + paste(basename(project), id, sep = "-") + + } + + renv_bootstrap_library_root <- function(project) { + + prefix <- renv_bootstrap_profile_prefix() + + path <- Sys.getenv("RENV_PATHS_LIBRARY", unset = NA) + if (!is.na(path)) + return(paste(c(path, prefix), collapse = "/")) + + path <- renv_bootstrap_library_root_impl(project) + if (!is.null(path)) { + name <- renv_bootstrap_library_root_name(project) + return(paste(c(path, prefix, name), collapse = "/")) + } + + renv_bootstrap_paths_renv("library", project = project) + + } + + renv_bootstrap_library_root_impl <- function(project) { + + root <- Sys.getenv("RENV_PATHS_LIBRARY_ROOT", unset = NA) + if (!is.na(root)) + return(root) + + type <- renv_bootstrap_project_type(project) + if (identical(type, "package")) { + userdir <- renv_bootstrap_user_dir() + return(file.path(userdir, "library")) + } + + } + + renv_bootstrap_validate_version <- function(version, description = NULL) { + + # resolve description file + # + # avoid passing lib.loc to `packageDescription()` below, since R will + # use the loaded version of the package by default anyhow. note that + # this function should only be called after 'renv' is loaded + # https://github.com/rstudio/renv/issues/1625 + description <- description %||% packageDescription("renv") + + # check whether requested version 'version' matches loaded version of renv + sha <- attr(version, "sha", exact = TRUE) + valid <- if (!is.null(sha)) + renv_bootstrap_validate_version_dev(sha, description) + else + renv_bootstrap_validate_version_release(version, description) + + if (valid) + return(TRUE) + + # the loaded version of renv doesn't match the requested version; + # give the user instructions on how to proceed + dev <- identical(description[["RemoteType"]], "github") + remote <- if (dev) + paste("rstudio/renv", description[["RemoteSha"]], sep = "@") + else + paste("renv", description[["Version"]], sep = "@") + + # display both loaded version + sha if available + friendly <- renv_bootstrap_version_friendly( + version = description[["Version"]], + sha = if (dev) description[["RemoteSha"]] + ) + + fmt <- heredoc(" + renv %1$s was loaded from project library, but this project is configured to use renv %2$s. + - Use `renv::record(\"%3$s\")` to record renv %1$s in the lockfile. + - Use `renv::restore(packages = \"renv\")` to install renv %2$s into the project library. + ") + catf(fmt, friendly, renv_bootstrap_version_friendly(version), remote) + + FALSE + + } + + renv_bootstrap_validate_version_dev <- function(version, description) { + + expected <- description[["RemoteSha"]] + if (!is.character(expected)) + return(FALSE) + + pattern <- sprintf("^\\Q%s\\E", version) + grepl(pattern, expected, perl = TRUE) + + } + + renv_bootstrap_validate_version_release <- function(version, description) { + expected <- description[["Version"]] + is.character(expected) && identical(expected, version) + } + + renv_bootstrap_hash_text <- function(text) { + + hashfile <- tempfile("renv-hash-") + on.exit(unlink(hashfile), add = TRUE) + + writeLines(text, con = hashfile) + tools::md5sum(hashfile) + + } + + renv_bootstrap_load <- function(project, libpath, version) { + + # try to load renv from the project library + if (!requireNamespace("renv", lib.loc = libpath, quietly = TRUE)) + return(FALSE) + + # warn if the version of renv loaded does not match + renv_bootstrap_validate_version(version) + + # execute renv load hooks, if any + hooks <- getHook("renv::autoload") + for (hook in hooks) + if (is.function(hook)) + tryCatch(hook(), error = warnify) + + # load the project + renv::load(project) + + TRUE + + } + + renv_bootstrap_profile_load <- function(project) { + + # if RENV_PROFILE is already set, just use that + profile <- Sys.getenv("RENV_PROFILE", unset = NA) + if (!is.na(profile) && nzchar(profile)) + return(profile) + + # check for a profile file (nothing to do if it doesn't exist) + path <- renv_bootstrap_paths_renv("profile", profile = FALSE, project = project) + if (!file.exists(path)) + return(NULL) + + # read the profile, and set it if it exists + contents <- readLines(path, warn = FALSE) + if (length(contents) == 0L) + return(NULL) + + # set RENV_PROFILE + profile <- contents[[1L]] + if (!profile %in% c("", "default")) + Sys.setenv(RENV_PROFILE = profile) + + profile + + } + + renv_bootstrap_profile_prefix <- function() { + profile <- renv_bootstrap_profile_get() + if (!is.null(profile)) + return(file.path("profiles", profile, "renv")) + } + + renv_bootstrap_profile_get <- function() { + profile <- Sys.getenv("RENV_PROFILE", unset = "") + renv_bootstrap_profile_normalize(profile) + } + + renv_bootstrap_profile_set <- function(profile) { + profile <- renv_bootstrap_profile_normalize(profile) + if (is.null(profile)) + Sys.unsetenv("RENV_PROFILE") + else + Sys.setenv(RENV_PROFILE = profile) + } + + renv_bootstrap_profile_normalize <- function(profile) { + + if (is.null(profile) || profile %in% c("", "default")) + return(NULL) + + profile + + } + + renv_bootstrap_path_absolute <- function(path) { + + substr(path, 1L, 1L) %in% c("~", "/", "\\") || ( + substr(path, 1L, 1L) %in% c(letters, LETTERS) && + substr(path, 2L, 3L) %in% c(":/", ":\\") + ) + + } + + renv_bootstrap_paths_renv <- function(..., profile = TRUE, project = NULL) { + renv <- Sys.getenv("RENV_PATHS_RENV", unset = "renv") + root <- if (renv_bootstrap_path_absolute(renv)) NULL else project + prefix <- if (profile) renv_bootstrap_profile_prefix() + components <- c(root, renv, prefix, ...) + paste(components, collapse = "/") + } + + renv_bootstrap_project_type <- function(path) { + + descpath <- file.path(path, "DESCRIPTION") + if (!file.exists(descpath)) + return("unknown") + + desc <- tryCatch( + read.dcf(descpath, all = TRUE), + error = identity + ) + + if (inherits(desc, "error")) + return("unknown") + + type <- desc$Type + if (!is.null(type)) + return(tolower(type)) + + package <- desc$Package + if (!is.null(package)) + return("package") + + "unknown" + + } + + renv_bootstrap_user_dir <- function() { + dir <- renv_bootstrap_user_dir_impl() + path.expand(chartr("\\", "/", dir)) + } + + renv_bootstrap_user_dir_impl <- function() { + + # use local override if set + override <- getOption("renv.userdir.override") + if (!is.null(override)) + return(override) + + # use R_user_dir if available + tools <- asNamespace("tools") + if (is.function(tools$R_user_dir)) + return(tools$R_user_dir("renv", "cache")) + + # try using our own backfill for older versions of R + envvars <- c("R_USER_CACHE_DIR", "XDG_CACHE_HOME") + for (envvar in envvars) { + root <- Sys.getenv(envvar, unset = NA) + if (!is.na(root)) + return(file.path(root, "R/renv")) + } + + # use platform-specific default fallbacks + if (Sys.info()[["sysname"]] == "Windows") + file.path(Sys.getenv("LOCALAPPDATA"), "R/cache/R/renv") + else if (Sys.info()[["sysname"]] == "Darwin") + "~/Library/Caches/org.R-project.R/R/renv" + else + "~/.cache/R/renv" + + } + + renv_bootstrap_version_friendly <- function(version, shafmt = NULL, sha = NULL) { + sha <- sha %||% attr(version, "sha", exact = TRUE) + parts <- c(version, sprintf(shafmt %||% " [sha: %s]", substring(sha, 1L, 7L))) + paste(parts, collapse = "") + } + + renv_bootstrap_exec <- function(project, libpath, version) { + if (!renv_bootstrap_load(project, libpath, version)) + renv_bootstrap_run(project, libpath, version) + } + + renv_bootstrap_run <- function(project, libpath, version) { + + # perform bootstrap + bootstrap(version, libpath) + + # exit early if we're just testing bootstrap + if (!is.na(Sys.getenv("RENV_BOOTSTRAP_INSTALL_ONLY", unset = NA))) + return(TRUE) + + # try again to load + if (requireNamespace("renv", lib.loc = libpath, quietly = TRUE)) { + return(renv::load(project = project)) + } + + # failed to download or load renv; warn the user + msg <- c( + "Failed to find an renv installation: the project will not be loaded.", + "Use `renv::activate()` to re-initialize the project." + ) + + warning(paste(msg, collapse = "\n"), call. = FALSE) + + } + + renv_json_read <- function(file = NULL, text = NULL) { + + jlerr <- NULL + + # if jsonlite is loaded, use that instead + if ("jsonlite" %in% loadedNamespaces()) { + + json <- tryCatch(renv_json_read_jsonlite(file, text), error = identity) + if (!inherits(json, "error")) + return(json) + + jlerr <- json + + } + + # otherwise, fall back to the default JSON reader + json <- tryCatch(renv_json_read_default(file, text), error = identity) + if (!inherits(json, "error")) + return(json) + + # report an error + if (!is.null(jlerr)) + stop(jlerr) + else + stop(json) + + } + + renv_json_read_jsonlite <- function(file = NULL, text = NULL) { + text <- paste(text %||% readLines(file, warn = FALSE), collapse = "\n") + jsonlite::fromJSON(txt = text, simplifyVector = FALSE) + } + + renv_json_read_patterns <- function() { + + list( + + # objects + list("{", "\t\n\tobject(\t\n\t", TRUE), + list("}", "\t\n\t)\t\n\t", TRUE), + + # arrays + list("[", "\t\n\tarray(\t\n\t", TRUE), + list("]", "\n\t\n)\n\t\n", TRUE), + + # maps + list(":", "\t\n\t=\t\n\t", TRUE), + + # newlines + list("\\u000a", "\n", FALSE) + + ) + + } + + renv_json_read_envir <- function() { + + envir <- new.env(parent = emptyenv()) + + envir[["+"]] <- `+` + envir[["-"]] <- `-` + + envir[["object"]] <- function(...) { + result <- list(...) + names(result) <- as.character(names(result)) + result + } + + envir[["array"]] <- list + + envir[["true"]] <- TRUE + envir[["false"]] <- FALSE + envir[["null"]] <- NULL + + envir + + } + + renv_json_read_remap <- function(object, patterns) { + + # repair names if necessary + if (!is.null(names(object))) { + + nms <- names(object) + for (pattern in patterns) + nms <- gsub(pattern[[2L]], pattern[[1L]], nms, fixed = TRUE) + names(object) <- nms + + } + + # repair strings if necessary + if (is.character(object)) { + for (pattern in patterns) + object <- gsub(pattern[[2L]], pattern[[1L]], object, fixed = TRUE) + } + + # recurse for other objects + if (is.recursive(object)) + for (i in seq_along(object)) + object[i] <- list(renv_json_read_remap(object[[i]], patterns)) + + # return remapped object + object + + } + + renv_json_read_default <- function(file = NULL, text = NULL) { + + # read json text + text <- paste(text %||% readLines(file, warn = FALSE), collapse = "\n") + + # convert into something the R parser will understand + patterns <- renv_json_read_patterns() + transformed <- text + for (pattern in patterns) + transformed <- gsub(pattern[[1L]], pattern[[2L]], transformed, fixed = TRUE) + + # parse it + rfile <- tempfile("renv-json-", fileext = ".R") + on.exit(unlink(rfile), add = TRUE) + writeLines(transformed, con = rfile) + json <- parse(rfile, keep.source = FALSE, srcfile = NULL)[[1L]] + + # evaluate in safe environment + result <- eval(json, envir = renv_json_read_envir()) + + # fix up strings if necessary -- do so only with reversible patterns + patterns <- Filter(function(pattern) pattern[[3L]], patterns) + renv_json_read_remap(result, patterns) + + } + + + # load the renv profile, if any + renv_bootstrap_profile_load(project) + + # construct path to library root + root <- renv_bootstrap_library_root(project) + + # construct library prefix for platform + prefix <- renv_bootstrap_platform_prefix() + + # construct full libpath + libpath <- file.path(root, prefix) + + # run bootstrap code + renv_bootstrap_exec(project, libpath, version) + + invisible() + +}) diff --git a/renv/settings.json b/renv/settings.json index 74c1d4b..8156db8 100644 --- a/renv/settings.json +++ b/renv/settings.json @@ -1,19 +1,19 @@ -{ - "bioconductor.version": null, - "external.libraries": [], - "ignored.packages": [], - "package.dependency.fields": [ - "Imports", - "Depends", - "LinkingTo" - ], - "ppm.enabled": null, - "ppm.ignored.urls": [], - "r.version": null, - "snapshot.type": "explicit", - "use.cache": true, - "vcs.ignore.cellar": true, - "vcs.ignore.library": true, - "vcs.ignore.local": true, - "vcs.manage.ignores": true -} +{ + "bioconductor.version": null, + "external.libraries": [], + "ignored.packages": [], + "package.dependency.fields": [ + "Imports", + "Depends", + "LinkingTo" + ], + "ppm.enabled": null, + "ppm.ignored.urls": [], + "r.version": null, + "snapshot.type": "explicit", + "use.cache": true, + "vcs.ignore.cellar": true, + "vcs.ignore.library": true, + "vcs.ignore.local": true, + "vcs.manage.ignores": true +} diff --git a/tests/testthat.R b/tests/testthat.R new file mode 100644 index 0000000..8d98970 --- /dev/null +++ b/tests/testthat.R @@ -0,0 +1,4 @@ +library(testthat) +library(crosswalk) + +test_check("crosswalk") diff --git a/tests/testthat/test-noncensus-crosswalks.R b/tests/testthat/test-noncensus-crosswalks.R new file mode 100644 index 0000000..e392533 --- /dev/null +++ b/tests/testthat/test-noncensus-crosswalks.R @@ -0,0 +1,348 @@ +# Tests for non-census year crosswalk functionality + +# ============================================================================== +# list_nhgis_crosswalks() tests +# ============================================================================== + +test_that("list_nhgis_crosswalks includes non-census target years", { + crosswalks <- list_nhgis_crosswalks() + + expect_s3_class(crosswalks, "tbl_df") + expect_true("target_year" %in% colnames(crosswalks)) + + target_years <- unique(crosswalks$target_year) + + expect_true("2011" %in% target_years) + expect_true("2012" %in% target_years) + expect_true("2014" %in% target_years) + expect_true("2015" %in% target_years) + expect_true("2022" %in% target_years) +}) + +test_that("list_nhgis_crosswalks non-census years only have bg/tr/co targets", { + crosswalks <- list_nhgis_crosswalks() + + noncensus_years <- c("2011", "2012", "2014", "2015", "2022") + + noncensus_crosswalks <- crosswalks |> + dplyr::filter(target_year %in% noncensus_years) + + target_geogs <- unique(noncensus_crosswalks$target_geography) + + expect_true(all(target_geogs %in% c("block_group", "tract", "county"))) + expect_false("place" %in% target_geogs) + expect_false("zcta" %in% target_geogs) + expect_false("puma" %in% target_geogs) +}) + +test_that("list_nhgis_crosswalks includes 2010 to 2022 crosswalks", { + crosswalks <- list_nhgis_crosswalks() + + crosswalks_2010_to_2022 <- crosswalks |> + dplyr::filter(source_year == "2010", target_year == "2022") + + expect_gt(nrow(crosswalks_2010_to_2022), 0) + + expect_true("block_group" %in% crosswalks_2010_to_2022$target_geography) + expect_true("tract" %in% crosswalks_2010_to_2022$target_geography) + expect_true("county" %in% crosswalks_2010_to_2022$target_geography) +}) + +# ============================================================================== +# get_ctdata_crosswalk() tests +# ============================================================================== + +test_that("get_ctdata_crosswalk returns correct structure for tracts", { + skip_if_offline() + + result <- get_ctdata_crosswalk(geography = "tract") + + expect_s3_class(result, "tbl_df") + + expected_cols <- c( + "source_geoid", "target_geoid", + "source_geography_name", "target_geography_name", + "source_year", "target_year", + "allocation_factor_source_to_target", + "weighting_factor", "state_fips") + + expect_true(all(expected_cols %in% colnames(result))) +}) +test_that("get_ctdata_crosswalk tract data has correct values", { + skip_if_offline() + + result <- get_ctdata_crosswalk(geography = "tract") + + expect_equal(unique(result$source_year), "2020") + expect_equal(unique(result$target_year), "2022") + expect_equal(unique(result$state_fips), "09") + expect_equal(unique(result$weighting_factor), "identity") + expect_true(all(result$allocation_factor_source_to_target == 1)) + + expect_equal(unique(result$source_geography_name), "tract") + expect_equal(unique(result$target_geography_name), "tract") +}) + +test_that("get_ctdata_crosswalk returns 879 CT tracts", +{ + skip_if_offline() + + result <- get_ctdata_crosswalk(geography = "tract") + + expect_equal(nrow(result), 879) +}) + +test_that("get_ctdata_crosswalk handles block_group geography", { + skip_if_offline() + + result <- get_ctdata_crosswalk(geography = "block_group") + + expect_s3_class(result, "tbl_df") + expect_equal(unique(result$source_geography_name), "block_group") + expect_equal(unique(result$target_geography_name), "block_group") + + expect_true(all(stringr::str_length(result$source_geoid) == 12)) + expect_true(all(stringr::str_length(result$target_geoid) == 12)) +}) + +test_that("get_ctdata_crosswalk handles county geography", { + skip_if_offline() + skip_if_not_installed("tidycensus") + skip_if(Sys.getenv("CENSUS_API_KEY") == "", "CENSUS_API_KEY not set") + + result <- get_ctdata_crosswalk(geography = "county") + + expect_s3_class(result, "tbl_df") + expect_equal(unique(result$source_geography_name), "county") + + n_source_counties <- length(unique(result$source_geoid)) + n_target_regions <- length(unique(result$target_geoid)) + expect_equal(n_source_counties, 8) + expect_equal(n_target_regions, 9) + + expect_equal(unique(result$weighting_factor), "population") + + allocation_sums <- result |> + dplyr::summarize( + total = sum(allocation_factor_source_to_target), + .by = "source_geoid") + expect_true(all(abs(allocation_sums$total - 1) < 0.001)) +}) + +test_that("get_ctdata_crosswalk errors on unsupported geography", { + expect_error( + get_ctdata_crosswalk(geography = "zcta"), + regexp = "not supported") + + expect_error( + get_ctdata_crosswalk(geography = "place"), + regexp = "not supported") +}) + + +test_that("get_ctdata_crosswalk accepts various geography spellings", { + skip_if_offline() + + result1 <- get_ctdata_crosswalk(geography = "tract") + result2 <- get_ctdata_crosswalk(geography = "tracts") + result3 <- get_ctdata_crosswalk(geography = "tr") + + expect_equal(nrow(result1), nrow(result2)) + expect_equal(nrow(result1), nrow(result3)) +}) + +test_that("get_ctdata_crosswalk caching works", { + skip_if_offline() + + cache_dir <- tempfile("crosswalk_cache_") + dir.create(cache_dir) + on.exit(unlink(cache_dir, recursive = TRUE)) + + result1 <- get_ctdata_crosswalk(geography = "tract", cache = cache_dir) + + cached_file <- file.path(cache_dir, "crosswalk_ctdata_2020_to_2022_tract.csv") + expect_true(file.exists(cached_file)) + + result2 <- get_ctdata_crosswalk(geography = "tract", cache = cache_dir) + expect_equal(result1, result2) +}) + +# ============================================================================== +# get_crosswalk() routing tests +# ============================================================================== + +test_that("get_crosswalk routes 2020-2022 to CTData", { + skip_if_offline() + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "tract", + source_year = 2020, + target_year = 2022) + + expect_s3_class(result, "tbl_df") + + metadata <- attr(result, "crosswalk_metadata") + expect_true("ctdata" %in% metadata$source) +}) + +test_that("get_crosswalk attaches metadata attribute", { + skip_if_offline() + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "tract", + source_year = 2020, + target_year = 2022) + + metadata <- attr(result, "crosswalk_metadata") + + expect_type(metadata, "list") + expect_true("source" %in% names(metadata)) + expect_true("source_year" %in% names(metadata)) + expect_true("target_year" %in% names(metadata)) + expect_true("source_geography" %in% names(metadata)) + expect_true("target_geography" %in% names(metadata)) + expect_true("notes" %in% names(metadata)) +}) + +test_that("get_crosswalk 2020-2022 metadata contains correct info", { + skip_if_offline() + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "tract", + source_year = 2020, + target_year = 2022) + + metadata <- attr(result, "crosswalk_metadata") + + expect_equal(metadata$source_year, "2020") + expect_equal(metadata$target_year, "2022") + expect_true(length(metadata$notes) > 0) +}) + +test_that("get_crosswalk 2020-2022 only returns Connecticut data", { + skip_if_offline() + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "tract", + source_year = 2020, + target_year = 2022) + + state_fips <- unique(result$state_fips) + expect_equal(state_fips, "09") +}) + +test_that("get_crosswalk 2020-2022 errors on unsupported geography", { + expect_error( + get_crosswalk( + source_geography = "zcta", + target_geography = "zcta", + source_year = 2020, + target_year = 2022), + regexp = "not supported") +}) + +# ============================================================================== +# get_crosswalk_2020_2022() tests +# ============================================================================== + +test_that("get_crosswalk_2020_2022 returns CT crosswalk with attributes", { + skip_if_offline() + + result <- crosswalk:::get_crosswalk_2020_2022(geography = "tract") + + expect_s3_class(result, "tbl_df") + + sources_attr <- attr(result, "crosswalk_sources") + expect_type(sources_attr, "list") + expect_equal(sources_attr$connecticut, "ctdata") + expect_equal(sources_attr$other_states, "identity_mapping") + + note_attr <- attr(result, "identity_states_note") + expect_type(note_attr, "character") +}) + +test_that("get_crosswalk_2020_2022 errors on invalid geography", { + expect_error( + crosswalk:::get_crosswalk_2020_2022(geography = "puma"), + regexp = "not supported") +}) + +# ============================================================================== +# get_nhgis_crosswalk() validation tests +# ============================================================================== + +test_that("get_nhgis_crosswalk validates non-census year geography restrictions", { + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + expect_error( + get_nhgis_crosswalk( + source_year = 2010, + source_geography = "block", + target_year = 2022, + target_geography = "zcta"), + regexp = "Non-census year crosswalks.*only available") + + expect_error( + get_nhgis_crosswalk( + source_year = 2010, + source_geography = "block", + target_year = 2011, + target_geography = "place"), + regexp = "Non-census year crosswalks.*only available") +}) + +test_that("get_nhgis_crosswalk accepts valid non-census year requests", { + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + skip_if_offline() + + expect_no_error({ + result <- get_nhgis_crosswalk( + source_year = 2010, + source_geography = "block", + target_year = 2022, + target_geography = "tract") + }) +}) + +# ============================================================================== +# Integration tests +# ============================================================================== + +test_that("CT tract GEOIDs have correct format changes", { + skip_if_offline() + + result <- get_ctdata_crosswalk(geography = "tract") + + expect_true(all(stringr::str_starts(result$source_geoid, "09"))) + expect_true(all(stringr::str_starts(result$target_geoid, "09"))) + + expect_true(all(stringr::str_length(result$source_geoid) == 11)) + expect_true(all(stringr::str_length(result$target_geoid) == 11)) + + source_counties <- stringr::str_sub(result$source_geoid, 3, 5) + target_counties <- stringr::str_sub(result$target_geoid, 3, 5) + expect_false(all(source_counties == target_counties)) +}) + +test_that("CT county crosswalk maps 8 old counties to 9 planning regions", { + skip_if_offline() + skip_if_not_installed("tidycensus") + skip_if(Sys.getenv("CENSUS_API_KEY") == "", "CENSUS_API_KEY not set") + + result <- get_ctdata_crosswalk(geography = "county") + + n_source_counties <- length(unique(result$source_geoid)) + n_target_regions <- length(unique(result$target_geoid)) + + expect_equal(n_source_counties, 8) + expect_equal(n_target_regions, 9) + + expect_gt(nrow(result), 8) + + expect_true(all(result$allocation_factor_source_to_target > 0)) + expect_true(all(result$allocation_factor_source_to_target <= 1)) +}) From f7ffca2133bb376143e3a92dae31e77da137dcc3 Mon Sep 17 00:00:00 2001 From: Will Curran-Groome Date: Sat, 17 Jan 2026 18:53:22 -0500 Subject: [PATCH 2/3] testing updates, adding support for ~all crosswalks from NHGIS --- R/get_crosswalk.R | 133 ++++- R/get_ctdata_crosswalk.R | 86 ++- R/get_geocorr_crosswalk.R | 30 ++ R/get_nhgis_crosswalk.R | 591 ++++++++++++++++----- tests/testthat/test-nhgis-crosswalk.R | 217 ++++++++ tests/testthat/test-noncensus-crosswalks.R | 154 +++++- 6 files changed, 1022 insertions(+), 189 deletions(-) create mode 100644 tests/testthat/test-nhgis-crosswalk.R diff --git a/R/get_crosswalk.R b/R/get_crosswalk.R index 5825b64..587c753 100644 --- a/R/get_crosswalk.R +++ b/R/get_crosswalk.R @@ -44,14 +44,33 @@ #' Data are tidy-formatted, with each observation reflecting a unique #' source-target-weighting factor combination. #' -#' The returned tibble includes an attribute `crosswalk_metadata` containing: +#' The returned tibble includes an attribute `crosswalk_metadata` (access via +#' `attr(result, "crosswalk_metadata")`) containing comprehensive information +#' about how the crosswalk was produced: #' \describe{ -#' \item{source}{Character vector of data sources used (e.g., "nhgis", "ctdata")} -#' \item{source_year}{The source year} -#' \item{target_year}{The target year} -#' \item{source_geography}{The source geography} -#' \item{target_geography}{The target geography} -#' \item{notes}{Any relevant notes about the crosswalk construction} +#' \item{call_parameters}{List of the parameters passed to get_crosswalk()} +#' \item{data_source}{Short identifier for the data source (e.g., "nhgis", "geocorr", "ctdata")} +#' \item{data_source_full_name}{Full name of the data source} +#' \item{download_url}{URL from which the crosswalk was downloaded (NHGIS, CTData)} +#' \item{api_endpoint}{API endpoint used (Geocorr)} +#' \item{documentation_url}{URL to documentation for the crosswalk source} +#' \item{citation_url}{URL to citation requirements (NHGIS)} +#' \item{github_repository}{GitHub repository URL (CTData)} +#' \item{source_geography}{Source geography as specified by user} +#' \item{source_geography_standardized}{Standardized source geography code} +#' \item{target_geography}{Target geography as specified by user} +#' \item{target_geography_standardized}{Standardized target geography code} +#' \item{source_year}{Source year (if applicable)} +#' \item{target_year}{Target year (if applicable)} +#' \item{reference_year}{Reference year for same-year crosswalks (Geocorr)} +#' \item{weighting_variable}{Variable used to calculate allocation factors} +#' \item{state_coverage}{Geographic coverage notes (e.g., "Connecticut only")} +#' \item{notes}{Additional notes about the crosswalk} +#' \item{retrieved_at}{Timestamp when crosswalk was retrieved} +#' \item{cached}{Logical indicating if result was cached} +#' \item{cache_path}{Path to cached file (if applicable)} +#' \item{read_from_cache}{Logical indicating if result was read from cache} +#' \item{crosswalk_package_version}{Version of the crosswalk package used} #' } #' #' Columns in the returned dataframe (some may not be present depending on source): @@ -109,10 +128,11 @@ get_crosswalk <- function( weight = NULL) { if ( - source_geography == "block" & target_geography %in% c("block group", "tract", "county", "core_based_statistical_area") | + (source_geography == "block" & target_geography %in% c("block group", "tract", "county", "core_based_statistical_area") | source_geography == "block group" & target_geography %in% c("tract", "county", "core_based_statistical_area") | source_geography == "tract" & target_geography %in% c("county", "core_based_statistical_area") | - source_geography == "county" & target_geography == "core_based_statistical_area" + source_geography == "county" & target_geography == "core_based_statistical_area") & + ((is.null(source_year) & is.null(target_year)) | (source_year == target_year)) ) { warning( "The source geography is nested within the target geography and an empty result @@ -133,22 +153,10 @@ simply aggregate your data to the desired geography.") crosswalk_source <- "nhgis" } - metadata <- list( - source = character(), - source_year = source_year_chr, - target_year = target_year_chr, - source_geography = source_geography, - target_geography = target_geography, - notes = character()) - if (crosswalk_source == "ctdata_2020_2022") { result <- get_crosswalk_2020_2022( geography = source_geography, cache = cache) - metadata$source <- c("ctdata", "identity") - metadata$notes <- c( - "Connecticut: CTData Collaborative 2020-2022 crosswalk (identity mapping, FIPS code change only)", - "Other states: Identity mapping (no geographic changes between 2020 and 2022)") } else if (crosswalk_source == "nhgis") { result <- get_nhgis_crosswalk( @@ -157,7 +165,6 @@ simply aggregate your data to the desired geography.") target_year = target_year, target_geography = target_geography, cache = cache) - metadata$source <- "nhgis" } else { result <- get_geocorr_crosswalk( @@ -165,11 +172,91 @@ simply aggregate your data to the desired geography.") target_geography = target_geography, weight = weight, cache = cache) - metadata$source <- "geocorr" } + # Retrieve metadata from internal function (if present) + internal_metadata <- attr(result, "crosswalk_metadata") + + # Build comprehensive metadata object + metadata <- list( + # Call parameters + call_parameters = list( + source_geography = source_geography, + target_geography = target_geography, + source_year = source_year_chr, + target_year = target_year_chr, + weight = weight, + cache = cache), + + # Data source information + data_source = if (!is.null(internal_metadata$data_source)) { + internal_metadata$data_source + } else { + crosswalk_source + }, + data_source_full_name = if (!is.null(internal_metadata$data_source_full_name)) { + internal_metadata$data_source_full_name + } else { + switch(crosswalk_source, + "nhgis" = "IPUMS NHGIS (National Historical Geographic Information System)", + "geocorr" = "Geocorr 2022 (Missouri Census Data Center)", + "ctdata_2020_2022" = "CT Data Collaborative", + crosswalk_source) + }, + + # URLs and documentation + download_url = internal_metadata$download_url, + api_endpoint = internal_metadata$api_endpoint, + documentation_url = internal_metadata$documentation_url, + citation_url = internal_metadata$citation_url, + github_repository = internal_metadata$github_repository, + + # Geography and year details + source_geography = source_geography, + source_geography_standardized = internal_metadata$source_geography_standardized, + target_geography = target_geography, + target_geography_standardized = internal_metadata$target_geography_standardized, + source_year = source_year_chr, + target_year = target_year_chr, + reference_year = internal_metadata$reference_year, + + # Weighting + weighting_variable = if (!is.null(internal_metadata$weighting_variable)) { + internal_metadata$weighting_variable + } else { + weight + }, + + # Coverage and notes + state_coverage = internal_metadata$state_coverage, + notes = if (crosswalk_source == "ctdata_2020_2022") { + c("Connecticut: CTData Collaborative 2020-2022 crosswalk", + "Other states: No geographic changes between 2020 and 2022; use identity mapping", + internal_metadata$notes) + } else { + internal_metadata$notes + }, + + # Retrieval information + retrieved_at = internal_metadata$retrieved_at, + cached = internal_metadata$cached, + cache_path = internal_metadata$cache_path, + read_from_cache = internal_metadata$read_from_cache, + + # Package information + crosswalk_package_version = as.character(utils::packageVersion("crosswalk"))) + attr(result, "crosswalk_metadata") <- metadata + result = result |> + dplyr::mutate( + dplyr::across( + .cols = -allocation_factor_source_to_target, + .fns = as.character), + dplyr::across( + .cols = allocation_factor_source_to_target, + as.numeric)) + return(result) } diff --git a/R/get_ctdata_crosswalk.R b/R/get_ctdata_crosswalk.R index fa11e23..93d89f5 100644 --- a/R/get_ctdata_crosswalk.R +++ b/R/get_ctdata_crosswalk.R @@ -56,18 +56,47 @@ The provided geography '", geography, "' is not supported.")} cache_path, stringr::str_c("crosswalk_ctdata_2020_to_2022_", geography_standardized, ".csv")) + ctdata_urls <- list( + block = "https://raw.githubusercontent.com/CT-Data-Collaborative/2022-block-crosswalk/main/2022blockcrosswalk.csv", + tract = "https://raw.githubusercontent.com/CT-Data-Collaborative/2022-tract-crosswalk/main/2022tractcrosswalk.csv") + + # Determine which URL will be used based on geography + download_url <- if (geography_standardized %in% c("block", "block_group")) { + ctdata_urls$block + } else { + ctdata_urls$tract + } + if (file.exists(csv_path) & !is.null(cache)) { message("Reading CTData crosswalk from cache.") - return(readr::read_csv( + result <- readr::read_csv( csv_path, col_types = readr::cols(.default = readr::col_character(), - allocation_factor_source_to_target = readr::col_double()))) + allocation_factor_source_to_target = readr::col_double())) + + # Attach metadata to cached result + attr(result, "crosswalk_metadata") <- list( + data_source = "ctdata", + data_source_full_name = "CT Data Collaborative", + download_url = download_url, + github_repository = "https://github.com/CT-Data-Collaborative", + documentation_url = "https://github.com/CT-Data-Collaborative/2022-tract-crosswalk", + source_year = "2020", + target_year = "2022", + source_geography = geography, + source_geography_standardized = geography_standardized, + target_geography = geography, + target_geography_standardized = geography_standardized, + state_coverage = "Connecticut only (FIPS 09)", + notes = "Connecticut replaced 8 historical counties with 9 planning regions in 2022. Physical boundaries unchanged; only FIPS codes changed.", + retrieved_at = NA, + cached = TRUE, + cache_path = csv_path, + read_from_cache = TRUE) + + return(result) } - ctdata_urls <- list( - block = "https://raw.githubusercontent.com/CT-Data-Collaborative/2022-block-crosswalk/main/2022blockcrosswalk.csv", - tract = "https://raw.githubusercontent.com/CT-Data-Collaborative/2022-tract-crosswalk/main/2022tractcrosswalk.csv") - if (geography_standardized == "block") { raw_df <- readr::read_csv(ctdata_urls$block, show_col_types = FALSE) @@ -116,14 +145,6 @@ The provided geography '", geography, "' is not supported.")} state_fips = "09") } else if (geography_standardized == "county") { - if (!requireNamespace("tidycensus", quietly = TRUE)) { - stop( -"The tidycensus package is required for Connecticut county crosswalks because -allocation factors must be calculated based on population. Install it with: -install.packages('tidycensus') -You will also need a Census API key: tidycensus::census_api_key('YOUR_KEY')") - } - raw_df <- readr::read_csv(ctdata_urls$tract, show_col_types = FALSE) |> janitor::clean_names() |> dplyr::select( @@ -134,11 +155,11 @@ You will also need a Census API key: tidycensus::census_api_key('YOUR_KEY')") ct_tract_populations <- suppressMessages({ tidycensus::get_acs( - year = 2021, - geography = "tract", - state = "CT", - variables = "B01003_001", - output = "wide") |> + year = 2021, + geography = "tract", + state = "CT", + variables = "B01003_001", + output = "wide") |> dplyr::select( tract_fips_2020 = GEOID, population_2020 = B01003_001E) @@ -181,6 +202,33 @@ You will also need a Census API key: tidycensus::census_api_key('YOUR_KEY')") "Connecticut 2020-2022 crosswalk sourced from CT Data Collaborative. See https://github.com/CT-Data-Collaborative for more information.") + # Attach metadata to result + weighting_note <- if (geography_standardized == "county") { + "County crosswalk uses population-weighted allocation factors from ACS 2021 tract populations." + } else { + "Identity mapping (allocation_factor = 1) - physical boundaries unchanged, only FIPS codes changed." + } + + attr(result, "crosswalk_metadata") <- list( + data_source = "ctdata", + data_source_full_name = "CT Data Collaborative", + download_url = download_url, + github_repository = "https://github.com/CT-Data-Collaborative", + documentation_url = "https://github.com/CT-Data-Collaborative/2022-tract-crosswalk", + source_year = "2020", + target_year = "2022", + source_geography = geography, + source_geography_standardized = geography_standardized, + target_geography = geography, + target_geography_standardized = geography_standardized, + state_coverage = "Connecticut only (FIPS 09)", + notes = c( + "Connecticut replaced 8 historical counties with 9 planning regions in 2022.", + weighting_note), + retrieved_at = Sys.time(), + cached = !is.null(cache), + cache_path = if (!is.null(cache)) csv_path else NULL) + return(result) } diff --git a/R/get_geocorr_crosswalk.R b/R/get_geocorr_crosswalk.R index a0a72e9..dbe0932 100644 --- a/R/get_geocorr_crosswalk.R +++ b/R/get_geocorr_crosswalk.R @@ -57,6 +57,21 @@ get_geocorr_crosswalk <- function( message("Reading file from cache.") + # Attach metadata to cached result + attr(result, "crosswalk_metadata") <- list( + data_source = "geocorr", + data_source_full_name = "Geocorr 2022 (Missouri Census Data Center)", + api_endpoint = "https://mcdc.missouri.edu/cgi-bin/broker", + documentation_url = "https://mcdc.missouri.edu/applications/geocorr2022.html", + source_geography = source_geography, + target_geography = target_geography, + weighting_variable = weight, + reference_year = "2022", + retrieved_at = NA, + cached = TRUE, + cache_path = outpath, + read_from_cache = TRUE) + return(result) } # Base API URL for geocorr2022 @@ -295,6 +310,21 @@ get_geocorr_crosswalk <- function( readr::write_csv(df2, outpath) } } + + # Attach metadata to result + attr(df2, "crosswalk_metadata") <- list( + data_source = "geocorr", + data_source_full_name = "Geocorr 2022 (Missouri Census Data Center)", + api_endpoint = base_url, + documentation_url = "https://mcdc.missouri.edu/applications/geocorr2022.html", + source_geography = source_geography, + target_geography = target_geography, + weighting_variable = weight, + reference_year = "2022", + retrieved_at = Sys.time(), + cached = !is.null(cache), + cache_path = if (!is.null(cache)) outpath else NULL) + return(df2) } diff --git a/R/get_nhgis_crosswalk.R b/R/get_nhgis_crosswalk.R index 49277e8..5f2d13c 100644 --- a/R/get_nhgis_crosswalk.R +++ b/R/get_nhgis_crosswalk.R @@ -96,7 +96,7 @@ standardize_geography <- function(geography, context = "source") { return(standardized) } } else if (context == "target") { - valid_geogs = c("blk", "bg", "tr", "co", "ua", "zcta", "puma", "cbsa") + valid_geogs = c("blk", "bg", "tr", "co", "pl", "ua", "zcta", "puma", "cbsa") if (standardized %in% valid_geogs) { return(standardized) } @@ -124,144 +124,258 @@ available crosswalks.") #' @export list_nhgis_crosswalks <- function() { nhgis_crosswalks_vector = c( - ## from 1990 to 2010 - ## blk source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_ua2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_zcta2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_puma2010.zip", + ## ========================================================================= + ## BLOCK-TO-BLOCK CROSSWALKS (decennial years only) + ## ========================================================================= "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_blk2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_pl2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_bg2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_tr2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_co2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_cbsa2010.zip", - ## bgp source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bgp1990_bg2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bgp1990_tr2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bgp1990_co2010.zip", - ## tr source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr1990_tr2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr1990_co2010.zip", - - ## from 2000 to 2010 - ## blk source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_ua2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_zcta2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_puma2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_pl2010.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_blk2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_bg2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_tr2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_co2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_cbsa2010.zip", - ## bgp source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bgp2000_bg2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bgp2000_tr2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bgp2000_co2010.zip", - ## tr source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2000_tr2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2000_co2010.zip", - - ## from 2010 to 2020 - ## blk source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_ua2020.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_zcta2020.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_puma2020.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_pl2020.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_cbsa2020.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_blk2020.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_bg2020.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_tr2020.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_co2020.zip", - ## bg source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2010_bg2020.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2010_tr2020.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2010_co2020.zip", - ## tr source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2010_tr2020.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2010_co2020.zip", - - ## from 2020 to 2010 - ## blk source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_ua2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_zcta2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_puma2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_pl2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_cbsa2010.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_blk2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_bg2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_tr2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_co2010.zip", - ## bg source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_bg2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_tr2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_co2010.zip", - ## tr source - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2020_tr2010.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2020_co2010.zip", ## ========================================================================= - ## NON-CENSUS YEAR CROSSWALKS - ## Available for block groups, tracts, and counties only - ## Years with boundary changes: 2011, 2012, 2014, 2015, 2022 + ## BLOCK → BLOCK GROUP ## ========================================================================= - - ## from 1990 to non-census years (2011, 2012, 2014, 2015) - ## blk source to bg + ## from 1990 + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_bg2010.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_bg2011.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_bg2012.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_bg2014.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_bg2015.zip", - ## blk source to tr - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_tr2011.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_tr2012.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_tr2014.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_tr2015.zip", - ## blk source to co - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_co2011.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_co2012.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_co2014.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_co2015.zip", - - ## from 2000 to non-census years (2011, 2012, 2014, 2015) - ## blk source to bg + ## from 2000 + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_bg2010.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_bg2011.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_bg2012.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_bg2014.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_bg2015.zip", - ## blk source to tr - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_tr2011.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_tr2012.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_tr2014.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_tr2015.zip", - ## blk source to co - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_co2011.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_co2012.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_co2014.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_co2015.zip", - - ## from 2010 to 2022 - ## blk source + ## from 2010 + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_bg2020.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_bg2022.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_tr2022.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_co2022.zip", - - ## from 2020 to non-census years (2011, 2012, 2014, 2015) - ## blk source to bg + ## from 2020 + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_bg2010.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_bg2011.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_bg2012.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_bg2014.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_bg2015.zip", - ## blk source to tr + + ## ========================================================================= + ## BLOCK GROUP ↔ BLOCK GROUP (bidirectional) + ## ========================================================================= + ## from 2010s + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2010_bg2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2010_bg2022.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2011_bg2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2011_bg2022.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2012_bg2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2012_bg2022.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2014_bg2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2014_bg2022.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2015_bg2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2015_bg2022.zip", + ## from 2020s + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_bg2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_bg2011.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_bg2012.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_bg2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_bg2015.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2022_bg2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2022_bg2011.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2022_bg2012.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2022_bg2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2022_bg2015.zip", + + ## ========================================================================= + ## BLOCK → TRACT + ## ========================================================================= + ## from 1990 + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_tr2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_tr2011.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_tr2012.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_tr2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_tr2015.zip", + ## from 2000 + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_tr2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_tr2011.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_tr2012.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_tr2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_tr2015.zip", + ## from 2010 + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_tr2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_tr2022.zip", + ## from 2020 + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_tr2010.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_tr2011.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_tr2012.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_tr2014.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_tr2015.zip", - ## blk source to co + + ## ========================================================================= + ## BLOCK GROUP → TRACT + ## ========================================================================= + ## from 2010s + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2010_tr2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2010_tr2022.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2011_tr2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2011_tr2022.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2012_tr2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2012_tr2022.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2014_tr2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2014_tr2022.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2015_tr2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2015_tr2022.zip", + ## from 2020s + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_tr2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_tr2011.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_tr2012.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_tr2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_tr2015.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2022_tr2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2022_tr2011.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2022_tr2012.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2022_tr2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2022_tr2015.zip", + + ## ========================================================================= + ## TRACT ↔ TRACT (bidirectional) + ## ========================================================================= + ## from 1990 + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr1990_tr2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr1990_tr2011.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr1990_tr2012.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr1990_tr2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr1990_tr2015.zip", + ## from 2000 + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2000_tr2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2000_tr2011.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2000_tr2012.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2000_tr2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2000_tr2015.zip", + ## from 2010s + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2010_tr2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2010_tr2022.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2011_tr2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2011_tr2022.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2012_tr2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2012_tr2022.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2014_tr2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2014_tr2022.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2015_tr2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2015_tr2022.zip", + ## from 2020s + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2020_tr2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2020_tr2011.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2020_tr2012.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2020_tr2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2020_tr2015.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2022_tr2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2022_tr2011.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2022_tr2012.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2022_tr2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2022_tr2015.zip", + + ## ========================================================================= + ## BLOCK → COUNTY + ## Note: 2011/2012 targets only available from 2020 source (not 1990/2000) + ## ========================================================================= + ## from 1990 (to 2010, 2014, 2015 only) + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_co2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_co2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_co2015.zip", + ## from 2000 (to 2010, 2014, 2015 only) + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_co2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_co2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_co2015.zip", + ## from 2010 + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_co2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_co2022.zip", + ## from 2020 (all 2010s targets available) + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_co2010.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_co2011.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_co2012.zip", "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_co2014.zip", - "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_co2015.zip") + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_co2015.zip", + + ## ========================================================================= + ## BLOCK GROUP → COUNTY + ## Note: bg source to co only available for 2010, 2014, 2015 sources + ## (NOT 2011 or 2012 sources) + ## ========================================================================= + ## from 2010 + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2010_co2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2010_co2022.zip", + ## from 2014 + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2014_co2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2014_co2022.zip", + ## from 2015 + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2015_co2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2015_co2022.zip", + ## from 2020 (to 2010, 2014, 2015 only) + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_co2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_co2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2020_co2015.zip", + ## from 2022 (to 2010, 2014, 2015 only) + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2022_co2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2022_co2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2022_co2015.zip", + + ## ========================================================================= + ## TRACT → COUNTY + ## Note: tr source to co only available for 1990, 2000, 2010, 2014, 2015, + ## 2020, 2022 sources (NOT 2011 or 2012 sources) + ## ========================================================================= + ## from 1990 (to 2010, 2014, 2015 only) + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr1990_co2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr1990_co2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr1990_co2015.zip", + ## from 2000 (to 2010, 2014, 2015 only) + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2000_co2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2000_co2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2000_co2015.zip", + ## from 2010 + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2010_co2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2010_co2022.zip", + ## from 2014 + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2014_co2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2014_co2022.zip", + ## from 2015 + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2015_co2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2015_co2022.zip", + ## from 2020 (to 2010, 2014, 2015 only) + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2020_co2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2020_co2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2020_co2015.zip", + ## from 2022 (to 2010, 2014, 2015 only) + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2022_co2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2022_co2014.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2022_co2015.zip", + + ## ========================================================================= + ## BLOCK → OTHER GEOGRAPHIES (decennial years only) + ## ========================================================================= + ## CBSA + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_cbsa2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_cbsa2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_cbsa2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_cbsa2010.zip", + ## Place + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_pl2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_pl2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_pl2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_pl2010.zip", + ## PUMA + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_puma2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_puma2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_puma2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_puma2010.zip", + ## Urban Area + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_ua2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_ua2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_ua2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_ua2010.zip", + ## ZCTA + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_zcta2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2000_zcta2010.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2010_zcta2020.zip", + "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_zcta2010.zip") ## for the time being, not supporting block group parts nhgis_crosswalks_vector = nhgis_crosswalks_vector[!stringr::str_detect(nhgis_crosswalks_vector, "bgp")] @@ -280,6 +394,7 @@ list_nhgis_crosswalks <- function() { dplyr::across( .cols = dplyr::matches("geography"), .fns = ~ .x |> stringr::str_replace_all(c( + "pl" = "place", "blk" = "block", "bgp" = "block_group_part", "bg" = "block_group", @@ -369,37 +484,113 @@ get_nhgis_crosswalk <- function( See https://www.nhgis.org/citation-and-use-nhgis-data.") message("Reading file from cache.") + # Attach metadata to cached result + attr(result, "crosswalk_metadata") <- list( + data_source = "nhgis", + data_source_full_name = "IPUMS NHGIS (National Historical Geographic Information System)", + download_url = crosswalk_path, + citation_url = "https://www.nhgis.org/citation-and-use-nhgis-data", + documentation_url = "https://www.nhgis.org/geographic-crosswalks", + source_year = source_year, + target_year = target_year, + source_geography = source_geography, + source_geography_standardized = source_geography_standardized, + target_geography = target_geography, + target_geography_standardized = target_geography_standardized, + retrieved_at = NA, + cached = TRUE, + cache_path = csv_path, + read_from_cache = TRUE) + return(result) } # Validate inputs + + # Define valid years + valid_decennial_years <- c("1990", "2000", "2010", "2020") valid_noncensus_years <- c("2011", "2012", "2014", "2015", "2022") - valid_source_years <- valid_decennial_years - valid_target_years <- c(valid_decennial_years, valid_noncensus_years) + valid_years <- c(valid_decennial_years, valid_noncensus_years) valid_source_geogs <- c("blk", "bg", "tr") valid_target_geogs <- c("blk", "bg", "tr", "co", "ua", "zcta", "puma", "cbsa") - noncensus_target_geogs <- c("bg", "tr", "co") + noncensus_geogs <- c("bg", "tr", "co") + + # Helper to determine decade for a year + get_decade <- function(year) { + dplyr::case_when( + year == "1990" ~ "1990s", + year == "2000" ~ "2000s", + year %in% c("2010", "2011", "2012", "2014", "2015") ~ "2010s", + year %in% c("2020", "2022") ~ "2020s", + TRUE ~ NA_character_) + } + + # Validate source and target years are recognized + if (!source_year %in% valid_years) { + stop("source_year must be one of: ", paste(valid_years, collapse = ", "))} - if (source_year == "1990" & target_year == "2000") { + if (!target_year %in% valid_years) { + stop("target_year must be one of: ", paste(valid_years, collapse = ", "))} + + # NHGIS only provides cross-decade crosswalks + source_decade <- get_decade(source_year) + target_decade <- get_decade(target_year) + + if (source_decade == target_decade) { stop( -"There are no crosswalks from 1990 to 2000; 1990 source geography crosswalks are -available only to 2010 geographies.")} +"NHGIS only provides cross-decade crosswalks. The requested combination (", +source_year, " to ", target_year, ") is within the same decade (", +source_decade, "). For within-decade crosswalks like 2020 to 2022, use +get_crosswalk() which handles special cases like Connecticut.")} - if (!source_year %in% valid_source_years) { - stop("source_year must be one of: ", paste(valid_source_years, collapse = ", "))} + # 1990 can only go to 2010s (not 2000s or 2020s) + if (source_year == "1990" & target_decade != "2010s") { + stop( +"Crosswalks from 1990 are only available to 2010s geographies (2010, 2011, +2012, 2014, 2015). Target year ", target_year, " is not supported.")} - if (!target_year %in% valid_target_years) { - stop("target_year must be one of: ", paste(valid_target_years, collapse = ", "))} + # 2000 can only go to 2010s + if (source_year == "2000" & target_decade != "2010s") { + stop( +"Crosswalks from 2000 are only available to 2010s geographies (2010, 2011, +2012, 2014, 2015). Target year ", target_year, " is not supported.")} + + # Non-census years have geography restrictions (applies to both source and target) + if (source_year %in% valid_noncensus_years) { + if (!source_geography_standardized %in% noncensus_geogs) { + stop( +"Non-census year crosswalks (2011, 2012, 2014, 2015, 2022) are only available +for block groups, tracts, and counties. The requested source geography '", +source_geography, "' is not supported for source year ", source_year, ".") + } + } if (target_year %in% valid_noncensus_years) { - if (!target_geography_standardized %in% noncensus_target_geogs) { + if (!target_geography_standardized %in% noncensus_geogs) { stop( "Non-census year crosswalks (2011, 2012, 2014, 2015, 2022) are only available -for block groups, tracts, and counties. The requested geography '", +for block groups, tracts, and counties. The requested target geography '", target_geography, "' is not supported for target year ", target_year, ".") } } + # County target restrictions: 2011 and 2012 source years don't have county targets + if (source_year %in% c("2011", "2012") & target_geography_standardized == "co") { + stop( +"County crosswalks are not available from source years 2011 or 2012. +County targets are only available from source years: 1990, 2000, 2010, 2014, +2015, 2020, 2022.") + } + + # County target restrictions: 1990/2000 to county only has 2010, 2014, 2015 targets + if (source_year %in% c("1990", "2000") & + target_geography_standardized == "co" & + target_year %in% c("2011", "2012")) { + stop( +"Crosswalks from ", source_year, " to county are only available for target +years 2010, 2014, and 2015 (not ", target_year, ").") + } + if (is.null(source_geography_standardized)) { stop( "source_geography '", source_geography, "' is not valid. Must be one of: blocks, @@ -420,13 +611,33 @@ block groups, tracts, or counties (various spellings accepted)")} "API key required. Save your API key to the IPUMS_API_KEY environment variable. Get your key at https://account.ipums.org/api_keys") } + # Helper function to safely check zip contents + + safe_unzip_list = function(zip_file) { + tryCatch( + utils::unzip(zip_file, list = TRUE), + error = function(e) NULL + ) + } + + # Helper function to safely extract zip + safe_unzip_extract = function(zip_file, exdir) { + tryCatch({ + utils::unzip(zipfile = zip_file, exdir = exdir) + TRUE + }, + error = function(e) FALSE + ) + } + crosswalk_df1 = tryCatch({ - zip_path = file.path(cache_path, stringr::str_c(crosswalk_sub_path, ".zip")) - csv_path_temporary = file.path(cache_path, stringr::str_c("nhgis_", crosswalk_sub_path, ".csv")) + # Use a unique temporary directory for downloading and extracting + temp_dir = file.path(tempdir(), stringr::str_c("nhgis_", crosswalk_sub_path, "_", format(Sys.time(), "%Y%m%d%H%M%S"))) + dir.create(temp_dir, recursive = TRUE) + on.exit(unlink(temp_dir, recursive = TRUE), add = TRUE) - ## if the specified directory doesn't yet exist, create it - if (!dir.exists(cache_path)) { dir.create(cache_path) } + zip_path = file.path(temp_dir, stringr::str_c(crosswalk_sub_path, ".zip")) # Download the crosswalk file response = httr::GET( @@ -434,22 +645,94 @@ variable. Get your key at https://account.ipums.org/api_keys") } httr::add_headers(Authorization = api_key), httr::write_disk(zip_path, overwrite = TRUE), overwrite = TRUE) - # Unzip the .zip - utils::unzip( - zipfile = zip_path, - exdir = file.path(cache_path)) + # Check what's in the zip before extracting + zip_contents = safe_unzip_list(zip_path) - crosswalk_df = readr::read_csv(csv_path_temporary) |> - janitor::clean_names() + if (is.null(zip_contents) || nrow(zip_contents) == 0) { + warning( + "The downloaded zip file for crosswalk ", crosswalk_sub_path, + " is empty or cannot be opened. This crosswalk may not be available from NHGIS. ", + "Returning an empty tibble.") + return(tibble::tibble()) + } + + # Extract the outer zip to temp directory + extract_success = safe_unzip_extract(zip_path, temp_dir) + if (!extract_success) { + warning( + "Failed to extract the downloaded zip file for crosswalk ", crosswalk_sub_path, + ". The file may be corrupted. Returning an empty tibble.") + return(tibble::tibble()) + } + + # List extracted files (excluding the original zip) + all_files = list.files(temp_dir, full.names = TRUE) + all_files = all_files[all_files != zip_path] - # Remove the zipped folder and the raw CSV file - file.remove(zip_path) - file.remove(csv_path_temporary) + # Look for CSV files first (some crosswalks may not be nested) + csv_files = all_files[stringr::str_detect(all_files, "\\.csv$")] + + # If no CSV found directly, look for nested zip and extract it + if (length(csv_files) == 0) { + nested_zips = all_files[stringr::str_detect(all_files, "\\.zip$")] + + if (length(nested_zips) == 0) { + warning( + "No CSV or nested zip file found in the downloaded archive for ", + crosswalk_sub_path, ". Returning an empty tibble.") + return(tibble::tibble()) + } + + # Check if nested zip can be opened + nested_zip = nested_zips[1] + nested_contents = safe_unzip_list(nested_zip) + + if (is.null(nested_contents) || nrow(nested_contents) == 0) { + warning( + "The nested zip file for crosswalk ", crosswalk_sub_path, + " is empty or cannot be opened. This crosswalk may not be available from NHGIS. ", + "Returning an empty tibble.") + return(tibble::tibble()) + } + + # Extract the nested zip + nested_extract_success = safe_unzip_extract(nested_zip, temp_dir) + if (!nested_extract_success) { + warning( + "Failed to extract the nested zip file for crosswalk ", crosswalk_sub_path, + ". The file may be corrupted. Returning an empty tibble.") + return(tibble::tibble()) + } + + # Now look for CSV files again + all_files = list.files(temp_dir, full.names = TRUE) + csv_files = all_files[stringr::str_detect(all_files, "\\.csv$")] + } + + if (length(csv_files) == 0) { + warning( + "No CSV file found after extracting zip archive(s) for ", crosswalk_sub_path, + ". Returning an empty tibble.") + return(tibble::tibble()) + } + + crosswalk_df = readr::read_csv( + csv_files[1], + col_types = readr::cols(.default = readr::col_character())) |> + janitor::clean_names() crosswalk_df - }, + }, error = function(e) { - stop("Failed to retrieve crosswalk: ", e$message) }) + warning("Failed to retrieve crosswalk ", crosswalk_sub_path, ": ", e$message, + ". Returning an empty tibble.") + return(tibble::tibble()) + }) + + # Handle case where empty tibble was returned due to empty zip + if (nrow(crosswalk_df1) == 0) { + return(tibble::tibble()) + } crosswalk_df = crosswalk_df1 |> dplyr::select(-dplyr::matches("gj")) |> @@ -466,6 +749,11 @@ variable. Get your key at https://account.ipums.org/api_keys") } "_hu" = "_housing_all", "ownhu" = "housing_owned", "renthu" = "housing_rented"))) |> + # Convert weight columns to numeric before pivoting + dplyr::mutate( + dplyr::across( + .cols = dplyr::matches("^weight_"), + .fns = as.numeric)) |> dplyr::rename( source_geoid = !!(stringr::str_c(source_geography_standardized, source_year, "ge")), target_geoid = !!(stringr::str_c(target_geography_standardized, target_year, "ge"))) |> @@ -488,13 +776,34 @@ variable. Get your key at https://account.ipums.org/api_keys") } values_to = "allocation_factor_source_to_target") ## if the file does not already exist and cache is not NULL - if (!file.exists(csv_path) & !is.null(cache) ) { - readr::write_csv(crosswalk_df, csv_path) } + if (!file.exists(csv_path) & !is.null(cache)) { + if (!dir.exists(cache)) { + dir.create(cache, recursive = TRUE) + } + readr::write_csv(crosswalk_df, csv_path) + } message( "Use of NHGIS crosswalks is subject to the same conditions as for all NHGIS data. See https://www.nhgis.org/citation-and-use-nhgis-data.") + # Attach metadata to result + attr(crosswalk_df, "crosswalk_metadata") <- list( + data_source = "nhgis", + data_source_full_name = "IPUMS NHGIS (National Historical Geographic Information System)", + download_url = crosswalk_path, + citation_url = "https://www.nhgis.org/citation-and-use-nhgis-data", + documentation_url = "https://www.nhgis.org/geographic-crosswalks", + source_year = source_year, + target_year = target_year, + source_geography = source_geography, + source_geography_standardized = source_geography_standardized, + target_geography = target_geography, + target_geography_standardized = target_geography_standardized, + retrieved_at = Sys.time(), + cached = !is.null(cache), + cache_path = if (!is.null(cache)) csv_path else NULL) + return(crosswalk_df) } diff --git a/tests/testthat/test-nhgis-crosswalk.R b/tests/testthat/test-nhgis-crosswalk.R new file mode 100644 index 0000000..d597ae5 --- /dev/null +++ b/tests/testthat/test-nhgis-crosswalk.R @@ -0,0 +1,217 @@ +# Test comprehensive NHGIS crosswalk coverage +# These tests verify that the package can successfully retrieve a random sample +# of crosswalks from the NHGIS API. + +test_that("list_nhgis_crosswalks returns expected structure", { + crosswalks <- list_nhgis_crosswalks() + + expect_s3_class(crosswalks, "tbl_df") + expect_true(all(c("source_geography", "source_year", "target_geography", + "target_year", "crosswalk_path") %in% names(crosswalks))) + expect_true(nrow(crosswalks) > 100) # Should have 140+ crosswalks +}) + +test_that("list_nhgis_crosswalks includes all expected crosswalk categories", { + crosswalks <- list_nhgis_crosswalks() + + # Block-to-block crosswalks exist + blk_to_blk <- crosswalks |> + dplyr::filter(source_geography == "block", target_geography == "block") + expect_equal(nrow(blk_to_blk), 4) + + # BG-to-BG bidirectional crosswalks exist + bg_to_bg <- crosswalks |> + dplyr::filter(source_geography == "block_group", target_geography == "block_group") + expect_equal(nrow(bg_to_bg), 20) + + # Tract-to-tract bidirectional crosswalks exist + tr_to_tr <- crosswalks |> + dplyr::filter(source_geography == "tract", target_geography == "tract") + expect_equal(nrow(tr_to_tr), 30) + + # Block to other geographies exist (cbsa, pl, puma, ua, zcta) + blk_to_other <- crosswalks |> + dplyr::filter(source_geography == "block", + target_geography %in% c("core_based_statistical_area", "place", + "puma", "urban_area", "zcta")) + expect_equal(nrow(blk_to_other), 20) +}) + +test_that("list_nhgis_crosswalks includes non-decadal source years", { + crosswalks <- list_nhgis_crosswalks() + source_years <- unique(crosswalks$source_year) + + expect_true("2011" %in% source_years) + expect_true("2012" %in% source_years) + expect_true("2014" %in% source_years) + expect_true("2015" %in% source_years) + expect_true("2022" %in% source_years) +}) + +test_that("list_nhgis_crosswalks includes non-census target years", { + crosswalks <- list_nhgis_crosswalks() + target_years <- unique(crosswalks$target_year) + + expect_true("2011" %in% target_years) + expect_true("2012" %in% target_years) + expect_true("2014" %in% target_years) + expect_true("2015" %in% target_years) + expect_true("2022" %in% target_years) +}) + +test_that("county crosswalks correctly exclude 2011/2012 source years", { + crosswalks <- list_nhgis_crosswalks() + + # 2011 and 2012 should NOT have county targets + co_from_2011 <- crosswalks |> + dplyr::filter(source_year == "2011", target_geography == "county") + expect_equal(nrow(co_from_2011), 0) + + co_from_2012 <- crosswalks |> + dplyr::filter(source_year == "2012", target_geography == "county") + expect_equal(nrow(co_from_2012), 0) + + # But 2014, 2015, 2022 SHOULD have county targets + co_from_2014 <- crosswalks |> + dplyr::filter(source_year == "2014", target_geography == "county") + expect_true(nrow(co_from_2014) > 0) + + co_from_2022 <- crosswalks |> + dplyr::filter(source_year == "2022", target_geography == "county") + expect_true(nrow(co_from_2022) > 0) +}) + +test_that("1990/2000 to county crosswalks only include 2010, 2014, 2015 targets", { + crosswalks <- list_nhgis_crosswalks() + + # From 1990 to county + co_from_1990 <- crosswalks |> + dplyr::filter(source_year == "1990", target_geography == "county") + expect_true(all(co_from_1990$target_year %in% c("2010", "2014", "2015"))) + expect_false(any(co_from_1990$target_year %in% c("2011", "2012"))) + + # From 2000 to county + co_from_2000 <- crosswalks |> + dplyr::filter(source_year == "2000", target_geography == "county") + expect_true(all(co_from_2000$target_year %in% c("2010", "2014", "2015"))) + expect_false(any(co_from_2000$target_year %in% c("2011", "2012"))) +}) + +test_that("get_nhgis_crosswalk rejects 2011/2012 source years to county", { + skip_if_not(Sys.getenv("IPUMS_API_KEY") != "", "IPUMS_API_KEY not set") + + expect_error( + get_nhgis_crosswalk( + source_year = 2011, + source_geography = "block group", + target_year = 2020, + target_geography = "county"), + regexp = "County crosswalks are not available from source years 2011 or 2012") + + expect_error( + get_nhgis_crosswalk( + source_year = 2012, + source_geography = "tract", + target_year = 2020, + target_geography = "county"), + regexp = "County crosswalks are not available from source years 2011 or 2012") +}) + +test_that("get_nhgis_crosswalk rejects 1990/2000 to county 2011/2012", { + skip_if_not(Sys.getenv("IPUMS_API_KEY") != "", "IPUMS_API_KEY not set") + + expect_error( + get_nhgis_crosswalk( + source_year = 1990, + source_geography = "block", + target_year = 2011, + target_geography = "county"), + regexp = "years 2010, 2014, and 2015") + + expect_error( + get_nhgis_crosswalk( + source_year = 2000, + source_geography = "block", + target_year = 2012, + target_geography = "county"), + regexp = "years 2010, 2014, and 2015") +}) + + +# Integration tests that actually query the NHGIS API +# These are slow tests that should only run when IPUMS_API_KEY is available +# and when explicitly requested via an environment variable + +test_that("random sample of 20 NHGIS crosswalks can be retrieved", { + skip_if_not(Sys.getenv("IPUMS_API_KEY") != "", "IPUMS_API_KEY not set") + skip_if_not( + Sys.getenv("CROSSWALK_RUN_SLOW_TESTS") == "true", + "Set CROSSWALK_RUN_SLOW_TESTS=true to run slow integration tests") + + # Set seed for reproducibility + set.seed(12345) + + # Get all available crosswalks and select a sample of 20 + sample_crosswalks <- list_nhgis_crosswalks() |> + dplyr::slice_sample(n = 20) + + test_results = purrr::pmap( + sample_crosswalks |> dplyr::select(-crosswalk_path), + get_nhgis_crosswalk) + + valid_queries = purrr::map( + test_results, + function(crosswalk) { + metadata = attr(crosswalk, "crosswalk_metadata") + + ## if this is null, the query failed + if (is.null(metadata$retrieved_at)) 0 else 1 + }) |> + purrr::reduce(sum) + + expect_equal(valid_queries, 20) + +}) + +test_that("specific crosswalk types work correctly", { + skip_if_not(Sys.getenv("IPUMS_API_KEY") != "", "IPUMS_API_KEY not set") + skip_if_not( + Sys.getenv("CROSSWALK_RUN_SLOW_TESTS") == "true", + "Set CROSSWALK_RUN_SLOW_TESTS=true to run slow integration tests") + + # Test block-to-block (decennial) + blk_blk <- get_nhgis_crosswalk( + source_year = 2010, + source_geography = "block", + target_year = 2020, + target_geography = "block") + expect_s3_class(blk_blk, "tbl_df") + expect_true(nrow(blk_blk) > 0) + + # Test bg-to-bg with non-census source year + bg_bg_noncensus <- get_nhgis_crosswalk( + source_year = 2014, + source_geography = "block group", + target_year = 2020, + target_geography = "block group") + expect_s3_class(bg_bg_noncensus, "tbl_df") + expect_true(nrow(bg_bg_noncensus) > 0) + + # Test tract-to-tract backwards (2020s -> 2010s) + tr_tr_backwards <- get_nhgis_crosswalk( + source_year = 2022, + source_geography = "tract", + target_year = 2014, + target_geography = "tract") + expect_s3_class(tr_tr_backwards, "tbl_df") + expect_true(nrow(tr_tr_backwards) > 0) + + # Test block to ZCTA (decennial only) + blk_zcta <- get_nhgis_crosswalk( + source_year = 2010, + source_geography = "block", + target_year = 2020, + target_geography = "zcta") + expect_s3_class(blk_zcta, "tbl_df") + expect_true(nrow(blk_zcta) > 0) +}) diff --git a/tests/testthat/test-noncensus-crosswalks.R b/tests/testthat/test-noncensus-crosswalks.R index e392533..7c9e341 100644 --- a/tests/testthat/test-noncensus-crosswalks.R +++ b/tests/testthat/test-noncensus-crosswalks.R @@ -48,6 +48,50 @@ test_that("list_nhgis_crosswalks includes 2010 to 2022 crosswalks", { expect_true("county" %in% crosswalks_2010_to_2022$target_geography) }) +test_that("list_nhgis_crosswalks includes non-census SOURCE years", { + crosswalks <- list_nhgis_crosswalks() + + source_years <- unique(crosswalks$source_year) + + # Non-census years should be valid as source years + expect_true("2011" %in% source_years) + expect_true("2012" %in% source_years) + expect_true("2014" %in% source_years) + expect_true("2015" %in% source_years) + expect_true("2022" %in% source_years) +}) + +test_that("list_nhgis_crosswalks non-census source years only have bg/tr sources", { + crosswalks <- list_nhgis_crosswalks() + + noncensus_years <- c("2011", "2012", "2014", "2015", "2022") + + noncensus_source_crosswalks <- crosswalks |> + dplyr::filter(source_year %in% noncensus_years) + + source_geogs <- unique(noncensus_source_crosswalks$source_geography) + + # Non-census source years only support bg and tr (not block) + expect_true(all(source_geogs %in% c("block_group", "tract"))) + expect_false("block" %in% source_geogs) +}) + +test_that("list_nhgis_crosswalks includes bidirectional crosswalks", { + crosswalks <- list_nhgis_crosswalks() + + # 2014 to 2020 should exist + expect_gt(nrow(crosswalks |> + dplyr::filter(source_year == "2014", target_year == "2020")), 0) + + # 2022 to 2010 should exist + expect_gt(nrow(crosswalks |> + dplyr::filter(source_year == "2022", target_year == "2010")), 0) + + # 2011 to 2022 should exist (both non-census, different decades) + expect_gt(nrow(crosswalks |> + dplyr::filter(source_year == "2011", target_year == "2022")), 0) +}) + # ============================================================================== # get_ctdata_crosswalk() tests # ============================================================================== @@ -183,10 +227,10 @@ test_that("get_crosswalk routes 2020-2022 to CTData", { expect_s3_class(result, "tbl_df") metadata <- attr(result, "crosswalk_metadata") - expect_true("ctdata" %in% metadata$source) + expect_equal(metadata$data_source, "ctdata") }) -test_that("get_crosswalk attaches metadata attribute", { +test_that("get_crosswalk attaches comprehensive metadata attribute", { skip_if_offline() result <- get_crosswalk( @@ -198,12 +242,41 @@ test_that("get_crosswalk attaches metadata attribute", { metadata <- attr(result, "crosswalk_metadata") expect_type(metadata, "list") - expect_true("source" %in% names(metadata)) - expect_true("source_year" %in% names(metadata)) - expect_true("target_year" %in% names(metadata)) + + # Check for key metadata fields + expect_true("call_parameters" %in% names(metadata)) + expect_true("data_source" %in% names(metadata)) + expect_true("data_source_full_name" %in% names(metadata)) expect_true("source_geography" %in% names(metadata)) expect_true("target_geography" %in% names(metadata)) - expect_true("notes" %in% names(metadata)) + expect_true("source_year" %in% names(metadata)) + expect_true("target_year" %in% names(metadata)) + expect_true("crosswalk_package_version" %in% names(metadata)) + + # Call parameters should be a nested list + expect_type(metadata$call_parameters, "list") + expect_equal(metadata$call_parameters$source_geography, "tract") + expect_equal(metadata$call_parameters$target_geography, "tract") + expect_equal(metadata$call_parameters$source_year, "2020") + expect_equal(metadata$call_parameters$target_year, "2022") +}) + +test_that("CTData crosswalk metadata includes download URL", { + skip_if_offline() + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "tract", + source_year = 2020, + target_year = 2022) + + metadata <- attr(result, "crosswalk_metadata") + + expect_equal(metadata$data_source, "ctdata") + expect_true(stringr::str_detect( + metadata$download_url, + "github.com/CT-Data-Collaborative")) + expect_true("github_repository" %in% names(metadata)) }) test_that("get_crosswalk 2020-2022 metadata contains correct info", { @@ -219,6 +292,8 @@ test_that("get_crosswalk 2020-2022 metadata contains correct info", { expect_equal(metadata$source_year, "2020") expect_equal(metadata$target_year, "2022") + expect_equal(metadata$data_source, "ctdata") + expect_equal(metadata$data_source_full_name, "CT Data Collaborative") expect_true(length(metadata$notes) > 0) }) @@ -295,6 +370,50 @@ test_that("get_nhgis_crosswalk validates non-census year geography restrictions" regexp = "Non-census year crosswalks.*only available") }) +test_that("get_nhgis_crosswalk validates non-census SOURCE year geography restrictions", { + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + # Non-census source years only support bg, tr, co - not block + expect_error( + get_nhgis_crosswalk( + source_year = 2014, + source_geography = "block", + target_year = 2020, + target_geography = "tract"), + regexp = "Non-census year crosswalks.*only available") +}) + +test_that("get_nhgis_crosswalk rejects within-decade crosswalks", { + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + # 2010 to 2014 is within-decade (both 2010s) + expect_error( + get_nhgis_crosswalk( + source_year = 2010, + source_geography = "tract", + target_year = 2014, + target_geography = "tract"), + regexp = "cross-decade") + + # 2020 to 2022 is within-decade (both 2020s) + expect_error( + get_nhgis_crosswalk( + source_year = 2020, + source_geography = "tract", + target_year = 2022, + target_geography = "tract"), + regexp = "cross-decade") + + # 2011 to 2015 is within-decade (both 2010s) + expect_error( + get_nhgis_crosswalk( + source_year = 2011, + source_geography = "tract", + target_year = 2015, + target_geography = "tract"), + regexp = "cross-decade") +}) + test_that("get_nhgis_crosswalk accepts valid non-census year requests", { skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") skip_if_offline() @@ -308,6 +427,29 @@ test_that("get_nhgis_crosswalk accepts valid non-census year requests", { }) }) +test_that("get_nhgis_crosswalk accepts non-census SOURCE years", { + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + skip_if_offline() + + # 2014 (non-census) to 2020 (decennial) - cross-decade + expect_no_error({ + result <- get_nhgis_crosswalk( + source_year = 2014, + source_geography = "tract", + target_year = 2020, + target_geography = "tract") + }) + + # 2022 (non-census) to 2010 (decennial) - cross-decade + expect_no_error({ + result <- get_nhgis_crosswalk( + source_year = 2022, + source_geography = "block_group", + target_year = 2010, + target_geography = "block_group") + }) +}) + # ============================================================================== # Integration tests # ============================================================================== From 0a9eba16a647094e17716fe37c4f53eab54549fe Mon Sep 17 00:00:00 2001 From: Will Curran-Groome Date: Mon, 19 Jan 2026 22:22:45 -0500 Subject: [PATCH 3/3] major updates to supported crosswalks, testing, and actual interpolation --- DESCRIPTION | 69 +- LICENSE | 2 + LICENSE.md | 42 +- NAMESPACE | 2 + R/crosswalk_data.R | 326 ++ R/get_crosswalk.R | 257 +- R/get_crosswalk_chain.R | 100 + R/get_ctdata_crosswalk.R | 298 +- R/get_geocorr_crosswalk.R | 57 +- R/get_nhgis_crosswalk.R | 32 +- R/plan_crosswalk_chain.R | 267 ++ man/crosswalk_data.Rd | 123 + man/get_crosswalk.Rd | 74 +- man/get_crosswalk_chain.Rd | 63 + man/standardize_geography.Rd | 20 - renv.lock | 3806 ++++++++++--------- tests/testthat/test-crosswalk_data.R | 614 +++ tests/testthat/test-get_crosswalk.R | 322 ++ tests/testthat/test-get_crosswalk_chain.R | 238 ++ tests/testthat/test-get_ctdata_crosswalk.R | 281 ++ tests/testthat/test-get_geocorr_crosswalk.R | 327 ++ tests/testthat/test-get_nhgis_crosswalk.R | 482 +++ tests/testthat/test-nhgis-crosswalk.R | 217 -- tests/testthat/test-noncensus-crosswalks.R | 490 --- tests/testthat/test-plan_crosswalk_chain.R | 216 ++ 25 files changed, 5754 insertions(+), 2971 deletions(-) create mode 100644 LICENSE create mode 100644 R/crosswalk_data.R create mode 100644 R/get_crosswalk_chain.R create mode 100644 R/plan_crosswalk_chain.R create mode 100644 man/crosswalk_data.Rd create mode 100644 man/get_crosswalk_chain.Rd delete mode 100644 man/standardize_geography.Rd create mode 100644 tests/testthat/test-crosswalk_data.R create mode 100644 tests/testthat/test-get_crosswalk.R create mode 100644 tests/testthat/test-get_crosswalk_chain.R create mode 100644 tests/testthat/test-get_ctdata_crosswalk.R create mode 100644 tests/testthat/test-get_geocorr_crosswalk.R create mode 100644 tests/testthat/test-get_nhgis_crosswalk.R delete mode 100644 tests/testthat/test-nhgis-crosswalk.R delete mode 100644 tests/testthat/test-noncensus-crosswalks.R create mode 100644 tests/testthat/test-plan_crosswalk_chain.R diff --git a/DESCRIPTION b/DESCRIPTION index a68f35b..51c56b5 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,34 +1,35 @@ -Package: crosswalk -Type: Package -Title: Simple interface to inter-temporal and inter-geography crosswalks -Version: 0.0.0.9001 -Description: An R package providing a simple interface to access geographic crosswalks. -License: MIT + file LICENSE.md -Authors@R: - person(given = "Will", family = "Curran-Groome", email = "wcurrangroome@urban.org", role = c("aut", "cre")) -Encoding: UTF-8 -LazyData: true -Roxygen: list(markdown = TRUE) -RoxygenNote: 7.3.2 -Depends: - R (>= 4.1.0) -Imports: - dplyr, - httr, - httr2, - janitor, - purrr, - readr, - rvest, - stringr, - tibble, - tidyr, - utils -Suggests: - testthat (>= 3.0.0), - tidycensus, - knitr, - rmarkdown -Config/testthat/edition: 3 -VignetteBuilder: knitr -URL: https://ui-research.github.io/crosswalk/ +Package: crosswalk +Type: Package +Title: Simple interface to inter-temporal and inter-geography crosswalks +Version: 0.0.0.9001 +Description: An R package providing a simple interface to access geographic crosswalks. +License: MIT + file LICENSE +Authors@R: + person(given = "Will", family = "Curran-Groome", email = "wcurrangroome@urban.org", role = c("aut", "cre")) +Encoding: UTF-8 +LazyData: true +Roxygen: list(markdown = TRUE) +RoxygenNote: 7.3.3 +Depends: + R (>= 4.1.0) +Imports: + dplyr, + httr, + httr2, + janitor, + purrr, + readr, + rvest, + stringr, + tibble, + tidyr, + tidytable, + utils +Suggests: + testthat (>= 3.0.0), + tidycensus, + knitr, + rmarkdown +Config/testthat/edition: 3 +VignetteBuilder: knitr +URL: https://ui-research.github.io/crosswalk/ diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..59003bb --- /dev/null +++ b/LICENSE @@ -0,0 +1,2 @@ +YEAR: 2026 +COPYRIGHT HOLDER: crosswalk authors diff --git a/LICENSE.md b/LICENSE.md index 27caa79..b46731e 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,21 +1,21 @@ -# MIT License - -Copyright (c) 2025 crosswalk authors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +# MIT License + +Copyright (c) 2026 crosswalk authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/NAMESPACE b/NAMESPACE index 0269bb9..5172ca0 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -1,4 +1,6 @@ # Generated by roxygen2: do not edit by hand +export(crosswalk_data) export(get_crosswalk) +export(get_crosswalk_chain) export(list_nhgis_crosswalks) diff --git a/R/crosswalk_data.R b/R/crosswalk_data.R new file mode 100644 index 0000000..917e5f5 --- /dev/null +++ b/R/crosswalk_data.R @@ -0,0 +1,326 @@ +## explicitly enable/acknowledge data.table (used by tidytable) +.datatable.aware = TRUE + +#' Apply a Crosswalk to Transform Data +#' +#' Applies geographic crosswalk weights to transform data from a source geography +#' to a target geography. Accepts the output from `get_crosswalk()` and automatically +#' applies all crosswalk steps sequentially for multi-step transformations. +#' +#' @param data A data frame or tibble containing the data to crosswalk. +#' @param crosswalk The output from `get_crosswalk()` - a list containing: +#' \describe{ +#' \item{crosswalks}{A named list of crosswalk tibbles (step_1, step_2, etc.)} +#' \item{plan}{The crosswalk plan} +#' \item{message}{Description of the crosswalk chain} +#' } +#' Alternatively, a single crosswalk tibble can be provided for backwards +#' compatibility. +#' @param geoid_column Character. The name of the column in `data` containing +#' the source geography identifiers (GEOIDs). Default is "geoid". +#' @param count_columns Character vector or NULL. Column names in `data` that represent +#' count variables. These will be summed after multiplying by the allocation factor. +#' If NULL (default), automatically detects columns with the prefix "count_". +#' @param non_count_columns Character vector or NULL. Column names in `data` that represent +#' mean, median, percentage, and ratio variables. These will be calculated as weighted +#' means using the allocation factor as weights. If NULL (default), automatically +#' detects columns with prefixes "mean_", "median_", "percent_", or "ratio_". +#' @param return_intermediate Logical. If TRUE and crosswalk has multiple steps, +#' returns a list containing both the final result and intermediate results +#' from each step. Default is FALSE, which returns only the final result. +#' +#' @return If `return_intermediate = FALSE` (default), a tibble with data summarized +#' to the final target geography. +#' +#' If `return_intermediate = TRUE` and there are multiple crosswalk steps, a list with: +#' \describe{ +#' \item{final}{The final crosswalked data} +#' \item{intermediate}{A named list of intermediate results (step_1, step_2, etc.)} +#' } +#' +#' The returned tibble(s) include an attribute `crosswalk_metadata` from the +#' underlying crosswalk (access via `attr(result, "crosswalk_metadata")`). +#' +#' @details +#' **Count variables** (specified in `count_columns`) are interpolated by summing +#' the product of the value and the allocation factor across all source geographies +#' that overlap with each target geography. +#' +#' **Non-count variables** (specified in `non_count_columns`) are interpolated using +#' a weighted mean, with the allocation factor serving as the weight. +#' +#' **Automatic column detection**: If `count_columns` and `non_count_columns` are +#' both NULL, the function will automatically detect columns based on naming prefixes: +#' - Columns starting with "count_" are treated as count variables +#' - Columns starting with "mean_", "median_", "percent_", or "ratio_" are treated +#' as non-count variables +#' +#' **Multi-step crosswalks**: When `get_crosswalk()` returns multiple crosswalks +#' (for transformations that change both geography and year), this function +#' automatically applies them in sequence. +#' +#' @export +#' @examples +#' \dontrun{ +#' # Single-step crosswalk +#' crosswalk <- get_crosswalk( +#' source_geography = "tract", +#' target_geography = "zcta", +#' weight = "population") +#' +#' result <- crosswalk_data( +#' data = my_tract_data, +#' crosswalk = crosswalk, +#' geoid_column = "tract_geoid", +#' count_columns = c("count_population", "count_housing_units")) +#' +#' # Multi-step crosswalk (geography + year change) +#' crosswalk <- get_crosswalk( +#' source_geography = "tract", +#' target_geography = "zcta", +#' source_year = 2010, +#' target_year = 2020, +#' weight = "population") +#' +#' # Automatically applies both steps +#' result <- crosswalk_data( +#' data = my_data, +#' crosswalk = crosswalk, +#' geoid_column = "tract_geoid", +#' count_columns = "count_population") +#' +#' # To get intermediate results +#' result <- crosswalk_data( +#' data = my_data, +#' crosswalk = crosswalk, +#' geoid_column = "tract_geoid", +#' count_columns = "count_population", +#' return_intermediate = TRUE) +#' +#' # Access intermediate and final +#' result$intermediate$step_1 # After first crosswalk +#' result$final # Final result +#' } + +crosswalk_data <- function( + data, + crosswalk, + geoid_column = "geoid", + count_columns = NULL, + non_count_columns = NULL, + return_intermediate = FALSE) { + + # Determine if crosswalk is a list (from get_crosswalk) or a single tibble + crosswalk_list <- extract_crosswalk_list(crosswalk) + + # Auto-detect columns if not specified + data_columns <- names(data) + + if (is.null(count_columns)) { + count_columns <- data_columns[stringr::str_starts(data_columns, "count_")] + } + + if (is.null(non_count_columns)) { + non_count_columns <- data_columns[ + stringr::str_starts(data_columns, "mean_") | + stringr::str_starts(data_columns, "median_") | + stringr::str_starts(data_columns, "percent_") | + stringr::str_starts(data_columns, "ratio_")] + } + + if (length(count_columns) == 0 & length(non_count_columns) == 0) { + stop( + "No columns to crosswalk. Either specify `count_columns` or `non_count_columns`, ", + "or ensure your data has columns with prefixes: count_, mean_, median_, percent_, or ratio_.") + } + + # Check that specified columns exist in original data + all_value_columns <- c(count_columns, non_count_columns) + missing_columns <- setdiff(all_value_columns, names(data)) + if (length(missing_columns) > 0) { + stop( + "The following columns were not found in data: ", + paste(missing_columns, collapse = ", ")) + } + + # Validate geoid_column exists in original data + if (!geoid_column %in% names(data)) { + stop("Column '", geoid_column, "' not found in data.") + } + + # Apply crosswalks sequentially + n_steps <- length(crosswalk_list) + intermediate_results <- list() + current_data <- data + current_geoid_column <- geoid_column + + for (i in seq_len(n_steps)) { + step_name <- names(crosswalk_list)[i] + step_crosswalk <- crosswalk_list[[i]] + + message(stringr::str_c("Applying crosswalk step ", i, " of ", n_steps, "...")) + + # Apply single crosswalk step + current_data <- apply_single_crosswalk( + data = current_data, + crosswalk = step_crosswalk, + geoid_column = current_geoid_column, + count_columns = count_columns, + non_count_columns = non_count_columns) + + # Store intermediate result if requested + if (return_intermediate) { + intermediate_results[[step_name]] <- current_data + } + + # After first step, geoid column is renamed to "geoid" + current_geoid_column <- "geoid" + } + + # Return based on return_intermediate flag + if (return_intermediate && n_steps > 1) { + return(list( + final = current_data, + intermediate = intermediate_results)) + } + + return(current_data) +} + + +#' Extract Crosswalk List from Various Input Formats +#' +#' Internal function that normalizes crosswalk input to a list of crosswalk tibbles. +#' +#' @param crosswalk Either a list from get_crosswalk() or a single tibble +#' @return A named list of crosswalk tibbles (step_1, step_2, etc.) +#' @keywords internal +#' @noRd +extract_crosswalk_list <- function(crosswalk) { + + # If it's a list with a "crosswalks" element (from get_crosswalk) + if (is.list(crosswalk) && "crosswalks" %in% names(crosswalk)) { + crosswalk_list <- crosswalk$crosswalks + + # Validate each crosswalk in the list + for (name in names(crosswalk_list)) { + xwalk <- crosswalk_list[[name]] + validate_crosswalk_tibble(xwalk, name) + } + + return(crosswalk_list) + } + + # If it's a data frame directly (backwards compatibility or manual input) + if (is.data.frame(crosswalk)) { + validate_crosswalk_tibble(crosswalk, "crosswalk") + return(list(step_1 = crosswalk)) + } + + # Otherwise, invalid input + + stop( + "Invalid crosswalk input. Expected either:\n", + " 1. Output from get_crosswalk() (a list with $crosswalks element), or\n", + " 2. A single crosswalk tibble with columns: source_geoid, target_geoid, allocation_factor_source_to_target") +} + + +#' Validate a Crosswalk Tibble +#' +#' Internal function that checks a crosswalk tibble has required columns. +#' +#' @param crosswalk A tibble to validate +#' @param name Name to use in error messages +#' @keywords internal +#' @noRd +validate_crosswalk_tibble <- function(crosswalk, name) { + required_cols <- c("source_geoid", "target_geoid", "allocation_factor_source_to_target") + missing_cols <- setdiff(required_cols, names(crosswalk)) + + if (length(missing_cols) > 0) { + stop( + "Crosswalk '", name, "' is missing required columns: ", + paste(missing_cols, collapse = ", ")) + } +} + + +#' Apply a Single Crosswalk Step +#' +#' Internal function that applies one crosswalk tibble to data. +#' +#' @param data Data to crosswalk +#' @param crosswalk A single crosswalk tibble +#' @param geoid_column Column name for source geoid +#' @param count_columns Count variable columns +#' @param non_count_columns Non-count variable columns +#' @return Crosswalked data +#' @keywords internal +#' @noRd +apply_single_crosswalk <- function( + data, + crosswalk, + geoid_column, + count_columns, + non_count_columns) { + + # Check if crosswalk is empty + if (nrow(crosswalk) == 0) { + warning( + "Crosswalk is empty. If source geography is nested within target geography, ", + "consider aggregating your data directly instead.") + return(tibble::tibble()) + } + + # Store metadata for later attachment + crosswalk_metadata <- attr(crosswalk, "crosswalk_metadata") + + # Determine grouping columns (target_geography_name may not always be present) + group_cols <- "target_geoid" + if ("target_geography_name" %in% names(crosswalk)) { + group_cols <- c("target_geoid", "target_geography_name") + } + + # Filter to columns that exist in current data (intermediate steps may have fewer) + current_count_cols <- intersect(count_columns, names(data)) + current_non_count_cols <- intersect(non_count_columns, names(data)) + + # Join crosswalk to data + result <- data |> + dplyr::mutate( + dplyr::across(dplyr::all_of(geoid_column), as.character)) |> + dplyr::left_join( + crosswalk, + by = stats::setNames("source_geoid", geoid_column), + relationship = "one-to-many") |> + tidytable::summarize( + .by = dplyr::all_of(group_cols), + ## count variables we take the sum of the weighted count variable + dplyr::across( + .cols = dplyr::all_of(current_count_cols), + .fns = ~ sum(.x * allocation_factor_source_to_target, na.rm = TRUE)), + ## non-count variables--means, medians, percentages, ratios, etc.-- + ## we take the weighted mean of the variable, weighted by the allocation factor + tidytable::across( + .cols = tidytable::all_of(current_non_count_cols), + .fns = ~ stats::weighted.mean(.x, allocation_factor_source_to_target, na.rm = TRUE)), + tidytable::across( + .cols = tidytable::all_of(c(current_count_cols, current_non_count_cols)), + .fns = ~ sum(!is.na(.x)), + .names = "{.col}_validx")) |> + tidytable::mutate( + tidytable::across( + .cols = tidytable::all_of(c(current_count_cols, current_non_count_cols)), + .fns = ~ tidytable::if_else(get(stringr::str_c(tidytable::cur_column(), "_validx$")) > 0, .x, NA))) |> + dplyr::select(-dplyr::matches("_validx$")) |> + dplyr::rename_with( + .cols = dplyr::everything(), + .fn = ~ stringr::str_remove_all(.x, "target_")) |> + tibble::as_tibble() + + # Attach metadata + attr(result, "crosswalk_metadata") <- crosswalk_metadata + + return(result) +} diff --git a/R/get_crosswalk.R b/R/get_crosswalk.R index 587c753..add135b 100644 --- a/R/get_crosswalk.R +++ b/R/get_crosswalk.R @@ -1,13 +1,20 @@ -#' Get an inter-temporal or inter-geography crosswalk +#' Get a Geographic Crosswalk #' #' Retrieves a crosswalk with interpolation values from a source geography to a target -#' geography or from a source year to a target year. +#' geography, optionally across different years. Always returns a list with a consistent +#' structure containing one or more crosswalk tibbles. #' #' @details This function sources crosswalks from Geocorr 2022, IPUMS NHGIS, and #' CT Data Collaborative. Crosswalk weights are from the original sources and #' have not been modified; this function merely standardizes the format of the #' returned crosswalks and enables easy programmatic access and caching. #' +#' **Multi-step crosswalks**: When both geography AND year change (e.g., +#' 2010 tracts to 2020 ZCTAs), no single crosswalk source provides this directly. +#' This function returns multiple crosswalks that should be applied sequentially: +#' 1. First crosswalk changes year (via NHGIS): source_geog(source_year) -> source_geog(target_year) +#' 2. Second crosswalk changes geography (via Geocorr): source_geog(target_year) -> target_geog(target_year) +#' #' **Non-census year support**: For target years 2011, 2012, 2014, 2015, and 2022, #' crosswalks are available only for block groups, tracts, and counties. These #' years correspond to American Community Survey geography changes. @@ -40,40 +47,19 @@ #' crosswalk is returned but not saved to disk. Individual component crosswalks #' are cached separately when provided. #' -#' @return A tibble containing the crosswalk between the specified geographies. -#' Data are tidy-formatted, with each observation reflecting a unique -#' source-target-weighting factor combination. -#' -#' The returned tibble includes an attribute `crosswalk_metadata` (access via -#' `attr(result, "crosswalk_metadata")`) containing comprehensive information -#' about how the crosswalk was produced: +#' @return A list with a consistent structure: #' \describe{ -#' \item{call_parameters}{List of the parameters passed to get_crosswalk()} -#' \item{data_source}{Short identifier for the data source (e.g., "nhgis", "geocorr", "ctdata")} -#' \item{data_source_full_name}{Full name of the data source} -#' \item{download_url}{URL from which the crosswalk was downloaded (NHGIS, CTData)} -#' \item{api_endpoint}{API endpoint used (Geocorr)} -#' \item{documentation_url}{URL to documentation for the crosswalk source} -#' \item{citation_url}{URL to citation requirements (NHGIS)} -#' \item{github_repository}{GitHub repository URL (CTData)} -#' \item{source_geography}{Source geography as specified by user} -#' \item{source_geography_standardized}{Standardized source geography code} -#' \item{target_geography}{Target geography as specified by user} -#' \item{target_geography_standardized}{Standardized target geography code} -#' \item{source_year}{Source year (if applicable)} -#' \item{target_year}{Target year (if applicable)} -#' \item{reference_year}{Reference year for same-year crosswalks (Geocorr)} -#' \item{weighting_variable}{Variable used to calculate allocation factors} -#' \item{state_coverage}{Geographic coverage notes (e.g., "Connecticut only")} -#' \item{notes}{Additional notes about the crosswalk} -#' \item{retrieved_at}{Timestamp when crosswalk was retrieved} -#' \item{cached}{Logical indicating if result was cached} -#' \item{cache_path}{Path to cached file (if applicable)} -#' \item{read_from_cache}{Logical indicating if result was read from cache} -#' \item{crosswalk_package_version}{Version of the crosswalk package used} +#' \item{crosswalks}{A named list of crosswalk tibbles (step_1, step_2, etc.). +#' Single-step transformations have one crosswalk; multi-step have two or more.} +#' \item{plan}{The crosswalk plan describing the transformation steps} +#' \item{message}{A formatted message describing the crosswalk chain} #' } #' -#' Columns in the returned dataframe (some may not be present depending on source): +#' Each crosswalk tibble includes an attribute `crosswalk_metadata` (access via +#' `attr(result$crosswalks$step_1, "crosswalk_metadata")`) containing comprehensive +#' information about how the crosswalk was produced. +#' +#' Columns in returned crosswalk dataframes (some may not be present depending on source): #' \describe{ #' \item{source_geoid}{A unique identifier for the source geography} #' \item{target_geoid}{A unique identifier for the target geography} @@ -96,27 +82,40 @@ #' @examples #' \dontrun{ #' # Same-year crosswalk between geographies (uses Geocorr) -#' get_crosswalk( +#' # Returns list with one crosswalk in crosswalks$step_1 +#' result <- get_crosswalk( #' source_geography = "zcta", #' target_geography = "puma22", #' weight = "population", #' cache = here::here("crosswalks-cache")) #' -#' # Inter-temporal crosswalk (uses NHGIS) -#' get_crosswalk( +#' # Apply to data using crosswalk_data() +#' output <- crosswalk_data( +#' data = my_data, +#' crosswalk = result, +#' count_columns = "count_population") +#' +#' # Multi-step crosswalk: both geography AND year change +#' # Returns list with two crosswalks in crosswalks$step_1 and crosswalks$step_2 +#' result <- get_crosswalk( #' source_geography = "tract", -#' target_geography = "tract", +#' target_geography = "zcta", #' source_year = 2010, #' target_year = 2020, -#' cache = here::here("crosswalks-cache")) +#' weight = "population") #' -#' # Non-census year crosswalk (2020 to 2022, CT changes) -#' get_crosswalk( -#' source_geography = "tract", -#' target_geography = "tract", -#' source_year = 2020, -#' target_year = 2022, -#' cache = here::here("crosswalks-cache")) +#' # crosswalk_data() automatically applies all steps +#' output <- crosswalk_data( +#' data = my_data, +#' crosswalk = result, +#' count_columns = "count_population") +#' +#' # To get intermediate results, set return_intermediate = TRUE +#' output <- crosswalk_data( +#' data = my_data, +#' crosswalk = result, +#' count_columns = "count_population", +#' return_intermediate = TRUE) #' } get_crosswalk <- function( @@ -125,34 +124,98 @@ get_crosswalk <- function( source_year = NULL, target_year = NULL, cache = NULL, - weight = NULL) { - - if ( - (source_geography == "block" & target_geography %in% c("block group", "tract", "county", "core_based_statistical_area") | - source_geography == "block group" & target_geography %in% c("tract", "county", "core_based_statistical_area") | - source_geography == "tract" & target_geography %in% c("county", "core_based_statistical_area") | - source_geography == "county" & target_geography == "core_based_statistical_area") & - ((is.null(source_year) & is.null(target_year)) | (source_year == target_year)) - ) { + weight = "population") { + + # Check for nested geographies (no crosswalk needed) + # Determine if years match (both NULL, or both non-NULL and equal) + years_match <- (is.null(source_year) && is.null(target_year)) || + (!is.null(source_year) && !is.null(target_year) && isTRUE(source_year == target_year)) + + is_nested <- (source_geography == "block" && target_geography %in% c("block group", "tract", "county", "core_based_statistical_area")) || + (source_geography == "block group" && target_geography %in% c("tract", "county", "core_based_statistical_area")) || + (source_geography == "tract" && target_geography %in% c("county", "core_based_statistical_area")) || + (source_geography == "county" && target_geography == "core_based_statistical_area") + + if (is_nested && years_match) { warning( "The source geography is nested within the target geography and an empty result will be returned. No crosswalk is needed to translate data between nested geographies; simply aggregate your data to the desired geography.") - return(tibble::tibble()) + # Return empty list structure for consistency + return(list( + crosswalks = list(step_1 = tibble::tibble()), + plan = NULL, + message = "No crosswalk needed for nested geographies")) + } + + # Plan the crosswalk chain to determine if multi-step is needed + plan <- plan_crosswalk_chain( + source_geography = source_geography, + target_geography = target_geography, + source_year = source_year, + target_year = target_year, + weight = weight) + + # Check for planning errors + if (!is.null(plan$error)) { + stop(plan$error) } + # Use get_crosswalk_chain for both single and multi-step + # (it handles both cases and returns consistent structure) + result <- get_crosswalk_chain( + source_geography = source_geography, + target_geography = target_geography, + source_year = source_year, + target_year = target_year, + weight = weight, + cache = cache) + + return(result) +} + + +#' Get a Single-Step Crosswalk (Internal) +#' +#' Internal function that retrieves a single crosswalk from the appropriate source. +#' This handles routing to Geocorr, NHGIS, or CTData based on the parameters. +#' +#' @inheritParams get_crosswalk +#' @return A tibble containing the crosswalk. +#' @keywords internal +#' @noRd +get_crosswalk_single <- function( + source_geography, + target_geography, + source_year = NULL, + target_year = NULL, + weight = "population", + cache = NULL) { + + # Convert years to character for consistent processing source_year_chr <- if (!is.null(source_year)) as.character(source_year) else NULL target_year_chr <- if (!is.null(target_year)) as.character(target_year) else NULL - if (is.null(source_year) | is.null(target_year)) { + # Determine which source to use + + # Use Geocorr for: no years specified, or same year + use_geocorr <- is.null(source_year) || is.null(target_year) || + (!is.null(source_year) && !is.null(target_year) && isTRUE(source_year == target_year)) + + # Use CTData for 2020 to 2022 (Connecticut planning region changes) + use_ctdata <- !is.null(source_year_chr) && !is.null(target_year_chr) && + source_year_chr == "2020" && target_year_chr == "2022" + + if (use_geocorr) { crosswalk_source <- "geocorr" - } else if (source_year_chr == "2020" & target_year_chr == "2022") { + } else if (use_ctdata) { crosswalk_source <- "ctdata_2020_2022" } else { crosswalk_source <- "nhgis" } + # Fetch the crosswalk from the appropriate source if (crosswalk_source == "ctdata_2020_2022") { result <- get_crosswalk_2020_2022( geography = source_geography, @@ -179,7 +242,6 @@ simply aggregate your data to the desired geography.") # Build comprehensive metadata object metadata <- list( - # Call parameters call_parameters = list( source_geography = source_geography, target_geography = target_geography, @@ -188,7 +250,6 @@ simply aggregate your data to the desired geography.") weight = weight, cache = cache), - # Data source information data_source = if (!is.null(internal_metadata$data_source)) { internal_metadata$data_source } else { @@ -204,14 +265,12 @@ simply aggregate your data to the desired geography.") crosswalk_source) }, - # URLs and documentation download_url = internal_metadata$download_url, api_endpoint = internal_metadata$api_endpoint, documentation_url = internal_metadata$documentation_url, citation_url = internal_metadata$citation_url, github_repository = internal_metadata$github_repository, - # Geography and year details source_geography = source_geography, source_geography_standardized = internal_metadata$source_geography_standardized, target_geography = target_geography, @@ -220,14 +279,12 @@ simply aggregate your data to the desired geography.") target_year = target_year_chr, reference_year = internal_metadata$reference_year, - # Weighting weighting_variable = if (!is.null(internal_metadata$weighting_variable)) { internal_metadata$weighting_variable } else { weight }, - # Coverage and notes state_coverage = internal_metadata$state_coverage, notes = if (crosswalk_source == "ctdata_2020_2022") { c("Connecticut: CTData Collaborative 2020-2022 crosswalk", @@ -237,18 +294,17 @@ simply aggregate your data to the desired geography.") internal_metadata$notes }, - # Retrieval information retrieved_at = internal_metadata$retrieved_at, cached = internal_metadata$cached, cache_path = internal_metadata$cache_path, read_from_cache = internal_metadata$read_from_cache, - # Package information + is_multi_step = FALSE, crosswalk_package_version = as.character(utils::packageVersion("crosswalk"))) attr(result, "crosswalk_metadata") <- metadata - result = result |> + result <- result |> dplyr::mutate( dplyr::across( .cols = -allocation_factor_source_to_target, @@ -261,18 +317,20 @@ simply aggregate your data to the desired geography.") } -#' Get 2020 to 2022 Crosswalk (Connecticut + Identity Mapping) +#' Get 2020 to 2022 Crosswalk (National) #' #' Internal function that handles the special case of 2020 to 2022 crosswalks. -#' Connecticut changed from historical counties to planning regions in 2022, -#' while all other states had no geographic changes. +#' Returns a nationally comprehensive crosswalk with Connecticut data from +#' CT Data Collaborative (handling the planning region changes) and identity +#' mappings for all other states (where no changes occurred). #' #' @param geography Character. Geography type: one of "block", "block_group", #' "tract", or "county". #' @param cache Directory path for caching component crosswalks. #' -#' @return A tibble containing the national crosswalk with Connecticut from CTData -#' and identity mappings for other states. +#' @return A tibble containing the national 2020-2022 crosswalk with Connecticut +#' from CTData and identity mappings for other states. +#' @keywords internal #' @noRd get_crosswalk_2020_2022 <- function(geography, cache = NULL) { @@ -293,61 +351,12 @@ get_crosswalk_2020_2022 <- function(geography, cache = NULL) { "2020 to 2022 crosswalks are only available for blocks, block groups, tracts, and counties. The provided geography '", geography, "' is not supported.")} - message( -"Constructing 2020 to 2022 crosswalk: -- Connecticut: Using CT Data Collaborative crosswalk (FIPS code changes only, - boundaries unchanged). Historical counties were replaced by planning regions. -- Other states: No geographic changes occurred between 2020 and 2022. - Returning identity mapping (source_geoid = target_geoid) for non-CT states.") - - ct_crosswalk <- get_ctdata_crosswalk( + # get_ctdata_crosswalk() now returns nationally comprehensive data + result <- get_ctdata_crosswalk( geography = geography_standardized, cache = cache) - message( - "Connecticut crosswalk loaded: ", nrow(ct_crosswalk), " ", - geography_standardized, " records.") - - attr(ct_crosswalk, "crosswalk_sources") <- list( - connecticut = "ctdata", - other_states = "identity_mapping") - attr(ct_crosswalk, "identity_states_note") <- -"For states other than Connecticut, no geographic changes occurred between 2020 -and 2022. When joining your data, non-CT records will match on identical GEOIDs. -This crosswalk only contains Connecticut records where FIPS codes changed." - - return(ct_crosswalk) + return(result) } -# ## write out geocorr crosswalks -# core_sources_geocorr = c( -# #"place", "county", -# "tract", -# #"blockgroup", -# "zcta", -# "puma22"#, -# #"cd119", "cd118" -# ) - -# library(climateapi) -## create an intersection of all geography combinations -# expand.grid(core_sources_geocorr, core_sources_geocorr) |> -# dplyr::rename(source_geography = 1, target_geography = 2) |> -# ## drop where the source and target geographies are the same -# dplyr::filter(source_geography != target_geography) |> -# dplyr::mutate( -# weight = "housing", -# cache = file.path("C:", "Users", climateapi::get_system_username(), "Box", "Arnold LIHTC study", "Data", "Shapefiles and crosswalks", "crosswalk_acs_decennial_chas"), -# dplyr::across(dplyr::where(is.factor), as.character)) |> -# purrr::pwalk(get_crosswalk) - -# tibble::tibble( -# source_geography = "tract", -# target_geography = "tract", -# source_year = c(1990, 2000, 2010), -# target_year = c(2010, 2010, 2020)) |> -# dplyr::mutate( -# weight = "housing", -# cache = file.path("C:", "Users", climateapi::get_system_username(), "Box", "Arnold LIHTC study", "Data", "Shapefiles and crosswalks", "crosswalk_acs_decennial_chas"), -# dplyr::across(dplyr::where(is.factor), as.character)) |> -# purrr::pwalk(get_crosswalk) +utils::globalVariables(c("allocation_factor_source_to_target")) \ No newline at end of file diff --git a/R/get_crosswalk_chain.R b/R/get_crosswalk_chain.R new file mode 100644 index 0000000..1f630be --- /dev/null +++ b/R/get_crosswalk_chain.R @@ -0,0 +1,100 @@ +#' Get a Chain of Crosswalks for Multi-Step Transformations +#' +#' Retrieves a list of crosswalks needed to transform data from a source +#' geography/year to a target geography/year. For multi-step transformations, +#' users should apply each crosswalk sequentially using `crosswalk_data()`. +#' +#' @param source_geography Character. Source geography name. +#' @param target_geography Character. Target geography name. +#' @param source_year Numeric or NULL. Year of the source geography. +#' @param target_year Numeric or NULL. Year of the target geography. +#' @param weight Character or NULL. Weighting variable for Geocorr crosswalks. +#' @param cache Directory path or NULL. Where to cache crosswalks. +#' +#' @return A list with: +#' \describe{ +#' \item{crosswalks}{A named list of crosswalk tibbles (step_1, step_2, etc.)} +#' \item{plan}{The crosswalk plan from plan_crosswalk_chain()} +#' \item{message}{A formatted message describing the crosswalk chain} +#' } +#' +#' @export +#' @examples +#' \dontrun{ +#' # Get crosswalks for 2010 tracts to 2020 ZCTAs (requires two steps) +#' chain <- get_crosswalk_chain( +#' source_geography = "tract", +#' target_geography = "zcta", +#' source_year = 2010, +#' target_year = 2020, +#' weight = "population") +#' +#' # Apply crosswalks sequentially +#' data_step1 <- crosswalk_data( +#' data = my_data, +#' crosswalk = chain$crosswalks$step_1, +#' count_columns = "count_population") +#' +#' data_final <- crosswalk_data( +#' data = data_step1, +#' crosswalk = chain$crosswalks$step_2, +#' count_columns = "count_population") +#' } +get_crosswalk_chain <- function( + source_geography, + target_geography, + source_year = NULL, + target_year = NULL, + weight = "population", + cache = NULL) { + + # Get the plan + plan <- plan_crosswalk_chain( + source_geography = source_geography, + target_geography = target_geography, + source_year = source_year, + target_year = target_year, + weight = weight) + + # Check for planning errors + if (!is.null(plan$error)) { + stop(plan$error) + } + + # Initialize result + result <- list( + crosswalks = list(), + plan = plan, + message = format_chain_plan_message(plan)) + + # Print the plan message + message(result$message) + + # Handle case where no crosswalk is needed + if (nrow(plan$steps) > 0 && plan$steps$crosswalk_source[1] == "none") { + message("Returning empty crosswalk list since no transformation is needed.") + return(result) + } + + # Fetch each crosswalk step + for (i in seq_len(nrow(plan$steps))) { + step <- plan$steps[i, ] + step_name <- stringr::str_c("step_", i) + + message(stringr::str_c("\nFetching ", step_name, ": ", step$description)) + + crosswalk_i <- get_crosswalk_single( + source_geography = step$source_geography, + target_geography = step$target_geography, + source_year = if (!is.na(step$source_year)) as.numeric(step$source_year) else NULL, + target_year = if (!is.na(step$target_year)) as.numeric(step$target_year) else NULL, + weight = weight, + cache = cache) + + result$crosswalks[[step_name]] <- crosswalk_i + } + + return(result) +} + + diff --git a/R/get_ctdata_crosswalk.R b/R/get_ctdata_crosswalk.R index 93d89f5..2dbc355 100644 --- a/R/get_ctdata_crosswalk.R +++ b/R/get_ctdata_crosswalk.R @@ -1,20 +1,26 @@ -#' Get Connecticut 2020-2022 Crosswalk from CTData +#' Get Nationally Comprehensive 2020-2022 Crosswalk #' -#' Retrieves a crosswalk for Connecticut geographies between 2020 and 2022 from -#' the CT Data Collaborative. This handles the 2022 change when Connecticut -#' switched from eight historical counties to nine county-equivalent planning regions. +#' Retrieves a nationally comprehensive crosswalk between 2020 and 2022 geographies. +#' Connecticut records come from the CT Data Collaborative (handling the 2022 change +#' from historical counties to planning regions). All other states use identity +#' mapping since no geographic changes occurred outside Connecticut. #' -#' @details This function sources crosswalks from the CT Data Collaborative GitHub -#' repository. The crosswalk provides a 1:1 mapping between 2020 and 2022 FIPS -#' codes for Connecticut geographies. No interpolation weights are needed because -#' the physical boundaries did not change—only the county-level identifiers changed. +#' @details This function combines: +#' - **Connecticut**: Crosswalk from CT Data Collaborative GitHub repository. +#' In 2022, Connecticut replaced its 8 historical counties with 9 planning regions, +#' which changed county boundaries. For sub-county geographies (tract, block group, +#' block), physical boundaries did not change, only the FIPS codes changed to +#' align with the new county-level identifiers. +#' - **Other states**: Identity mapping derived from NHGIS 2010 -> 2020 crosswalks. +#' Since no geographic changes occurred between 2020 and 2022 outside Connecticut, +#' source_geoid equals target_geoid with allocation_factor = 1. #' #' @param geography Character. Geography type: one of "block", "block_group", "tract", #' or "county". #' @param cache Directory path. Where to download the crosswalk to. If NULL (default), #' crosswalk is returned but not saved to disk. #' -#' @return A tibble containing the Connecticut crosswalk with columns: +#' @return A tibble containing the national 2020-2022 crosswalk with columns: #' \describe{ #' \item{source_geoid}{The 2020 FIPS code} #' \item{target_geoid}{The 2022 FIPS code} @@ -22,14 +28,15 @@ #' \item{target_geography_name}{The geography type} #' \item{source_year}{2020} #' \item{target_year}{2022} -#' \item{allocation_factor_source_to_target}{Always 1 (identity mapping)} -#' \item{weighting_factor}{"identity" (no interpolation needed)} -#' \item{state_fips}{"09" (Connecticut)} +#' \item{allocation_factor_source_to_target}{1 for all records (identity or CT FIPS change)} +#' \item{weighting_factor}{"identity" for non-CT, varies for CT county} +#' \item{state_fips}{Two-digit state FIPS code} #' } +#' @keywords internal #' @noRd get_ctdata_crosswalk <- function(geography, cache = NULL) { - geography_standardized <- geography |> + geography_standardized <- geography |> stringr::str_to_lower() |> stringr::str_squish() |> stringr::str_replace_all("_", " ") @@ -43,7 +50,7 @@ get_ctdata_crosswalk <- function(geography, cache = NULL) { if (is.na(geography_standardized)) { stop( -"CTData crosswalks are only available for blocks, block groups, tracts, and counties. +"2020-2022 crosswalks are only available for blocks, block groups, tracts, and counties. The provided geography '", geography, "' is not supported.")} if (is.null(cache)) { @@ -54,41 +61,58 @@ The provided geography '", geography, "' is not supported.")} csv_path <- file.path( cache_path, - stringr::str_c("crosswalk_ctdata_2020_to_2022_", geography_standardized, ".csv")) + stringr::str_c("crosswalk_national_2020_to_2022_", geography_standardized, ".csv")) ctdata_urls <- list( block = "https://raw.githubusercontent.com/CT-Data-Collaborative/2022-block-crosswalk/main/2022blockcrosswalk.csv", tract = "https://raw.githubusercontent.com/CT-Data-Collaborative/2022-tract-crosswalk/main/2022tractcrosswalk.csv") - # Determine which URL will be used based on geography - download_url <- if (geography_standardized %in% c("block", "block_group")) { + # Determine which URL will be used based on geography for CT data + ctdata_download_url <- if (geography_standardized %in% c("block", "block_group")) { ctdata_urls$block } else { ctdata_urls$tract } + # Check cache for full national crosswalk if (file.exists(csv_path) & !is.null(cache)) { - message("Reading CTData crosswalk from cache.") + message("Reading national 2020-2022 crosswalk from cache.") result <- readr::read_csv( csv_path, col_types = readr::cols(.default = readr::col_character(), - allocation_factor_source_to_target = readr::col_double())) + allocation_factor_source_to_target = readr::col_double()), + show_col_types = FALSE) + + # Weighting note for metadata + weighting_note <- if (geography_standardized == "county") { + "CT county crosswalk uses population-weighted allocation factors from ACS 2021." + } else { + "All records have allocation_factor = 1 (identity mapping or CT FIPS code change)." + } - # Attach metadata to cached result attr(result, "crosswalk_metadata") <- list( - data_source = "ctdata", - data_source_full_name = "CT Data Collaborative", - download_url = download_url, - github_repository = "https://github.com/CT-Data-Collaborative", - documentation_url = "https://github.com/CT-Data-Collaborative/2022-tract-crosswalk", + data_source = "ctdata_nhgis_combined", + data_source_full_name = "CT Data Collaborative (CT) + NHGIS-derived identity mapping (other states)", + ctdata_download_url = ctdata_download_url, + ctdata_github_repository = "https://github.com/CT-Data-Collaborative", + ctdata_documentation_url = "https://github.com/CT-Data-Collaborative/2022-tract-crosswalk", + nhgis_crosswalk_used = if (geography_standardized != "county") { + stringr::str_c(geography_standardized, "2010_", geography_standardized, "2020") + } else { + "N/A (county GEOIDs from tidycensus)" + }, + nhgis_citation_url = "https://www.nhgis.org/citation-and-use-nhgis-data", source_year = "2020", target_year = "2022", source_geography = geography, source_geography_standardized = geography_standardized, target_geography = geography, target_geography_standardized = geography_standardized, - state_coverage = "Connecticut only (FIPS 09)", - notes = "Connecticut replaced 8 historical counties with 9 planning regions in 2022. Physical boundaries unchanged; only FIPS codes changed.", + state_coverage = "National (all 50 states, DC, and Puerto Rico)", + notes = c( + "Connecticut: 8 historical counties replaced by 9 planning regions in 2022 (county boundaries changed; sub-county geographies had FIPS code changes only).", + "Other states: No geographic changes between 2020 and 2022 (identity mapping).", + weighting_note), retrieved_at = NA, cached = TRUE, cache_path = csv_path, @@ -97,10 +121,85 @@ The provided geography '", geography, "' is not supported.")} return(result) } + message("Constructing nationally comprehensive 2020-2022 crosswalk...") + + # =========================================================================== + # STEP 1: Get all 2020 GEOIDs from NHGIS crosswalk (non-CT) or tidycensus (county) + # =========================================================================== + + # Map geography names for NHGIS + nhgis_geog_map <- c( + "block" = "block", + "block_group" = "block_group", + "tract" = "tract", + "county" = "county") + + nhgis_source_geog <- nhgis_geog_map[[geography_standardized]] + + if (geography_standardized == "county") { + # For county, use tidycensus since NHGIS doesn't have county -> county crosswalks + message("Fetching all 2020 county GEOIDs via tidycensus...") + + all_2020_geoids <- suppressMessages({ + tidycensus::get_acs( + year = 2021, + geography = "county", + variables = "B01003_001", + output = "wide") |> + dplyr::select(geoid_2020 = GEOID) |> + dplyr::filter(!stringr::str_starts(geoid_2020, "09")) |> + dplyr::pull(geoid_2020) + }) + + } else { + # For block, block_group, tract: use NHGIS 2010 -> 2020 crosswalk + message(stringr::str_c( + "Fetching NHGIS ", nhgis_source_geog, " 2010 -> 2020 crosswalk to obtain all 2020 GEOIDs...")) + + nhgis_crosswalk <- get_nhgis_crosswalk( + source_year = 2010, + source_geography = nhgis_source_geog, + target_year = 2020, + target_geography = nhgis_source_geog, + cache = cache) + + # Extract unique 2020 GEOIDs, excluding Connecticut (FIPS 09) + all_2020_geoids <- nhgis_crosswalk |> + dplyr::select(target_geoid) |> + dplyr::distinct() |> + dplyr::filter(!stringr::str_starts(target_geoid, "09")) |> + dplyr::pull(target_geoid) + } + + message(stringr::str_c( + "Found ", format(length(all_2020_geoids), big.mark = ","), + " non-CT 2020 ", geography_standardized, " GEOIDs.")) + + # =========================================================================== + # STEP 2: Create identity mapping for non-CT states + # =========================================================================== + + non_ct_crosswalk <- tibble::tibble( + source_geoid = all_2020_geoids, + target_geoid = all_2020_geoids, + source_geography_name = geography_standardized, + target_geography_name = geography_standardized, + source_year = "2020", + target_year = "2022", + allocation_factor_source_to_target = 1, + weighting_factor = "identity", + state_fips = stringr::str_sub(all_2020_geoids, 1, 2)) + + # =========================================================================== + # STEP 3: Get CT-specific crosswalk from CT Data Collaborative + # =========================================================================== + + message("Fetching Connecticut crosswalk from CT Data Collaborative...") + if (geography_standardized == "block") { raw_df <- readr::read_csv(ctdata_urls$block, show_col_types = FALSE) - result <- raw_df |> + ct_crosswalk <- raw_df |> dplyr::transmute( source_geoid = block_fips_2020, target_geoid = block_fips_2022, @@ -115,7 +214,7 @@ The provided geography '", geography, "' is not supported.")} } else if (geography_standardized == "block_group") { raw_df <- readr::read_csv(ctdata_urls$block, show_col_types = FALSE) - result <- raw_df |> + ct_crosswalk <- raw_df |> dplyr::transmute( source_geoid = stringr::str_sub(block_fips_2020, 1, 12), target_geoid = stringr::str_sub(block_fips_2022, 1, 12)) |> @@ -132,7 +231,7 @@ The provided geography '", geography, "' is not supported.")} } else if (geography_standardized == "tract") { raw_df <- readr::read_csv(ctdata_urls$tract, show_col_types = FALSE) - result <- raw_df |> + ct_crosswalk <- raw_df |> dplyr::transmute( source_geoid = tract_fips_2020, target_geoid = Tract_fips_2022, @@ -155,17 +254,17 @@ The provided geography '", geography, "' is not supported.")} ct_tract_populations <- suppressMessages({ tidycensus::get_acs( - year = 2021, - geography = "tract", - state = "CT", - variables = "B01003_001", - output = "wide") |> + year = 2021, + geography = "tract", + state = "CT", + variables = "B01003_001", + output = "wide") |> dplyr::select( tract_fips_2020 = GEOID, population_2020 = B01003_001E) }) - result <- raw_df |> + ct_crosswalk <- raw_df |> dplyr::left_join(ct_tract_populations, by = "tract_fips_2020") |> dplyr::summarize( population_2020 = sum(population_2020, na.rm = TRUE), @@ -191,113 +290,78 @@ The provided geography '", geography, "' is not supported.")} weighting_factor, state_fips) } + # =========================================================================== + # STEP 4: Combine CT and non-CT crosswalks + # =========================================================================== + + result <- dplyr::bind_rows(ct_crosswalk, non_ct_crosswalk) |> + dplyr::arrange(source_geoid) + + message(stringr::str_c( + "National 2020-2022 crosswalk constructed: ", + format(nrow(ct_crosswalk), big.mark = ","), " CT records + ", + format(nrow(non_ct_crosswalk), big.mark = ","), " non-CT records = ", + format(nrow(result), big.mark = ","), " total records.")) + + # =========================================================================== + # STEP 5: Cache and return + # =========================================================================== + if (!is.null(cache)) { if (!dir.exists(cache_path)) { dir.create(cache_path, recursive = TRUE) } readr::write_csv(result, csv_path) + message(stringr::str_c("Cached to: ", csv_path)) } message( -"Connecticut 2020-2022 crosswalk sourced from CT Data Collaborative. -See https://github.com/CT-Data-Collaborative for more information.") +"National 2020-2022 crosswalk constructed: +- Connecticut: CT Data Collaborative (https://github.com/CT-Data-Collaborative) +- Other states: Identity mapping derived from NHGIS 2010-2020 crosswalk") # Attach metadata to result weighting_note <- if (geography_standardized == "county") { - "County crosswalk uses population-weighted allocation factors from ACS 2021 tract populations." + "CT county crosswalk uses population-weighted allocation factors from ACS 2021." } else { - "Identity mapping (allocation_factor = 1) - physical boundaries unchanged, only FIPS codes changed." + "All records have allocation_factor = 1 (identity mapping or CT FIPS code change)." } attr(result, "crosswalk_metadata") <- list( - data_source = "ctdata", - data_source_full_name = "CT Data Collaborative", - download_url = download_url, - github_repository = "https://github.com/CT-Data-Collaborative", - documentation_url = "https://github.com/CT-Data-Collaborative/2022-tract-crosswalk", + data_source = "ctdata_nhgis_combined", + data_source_full_name = "CT Data Collaborative (CT) + NHGIS-derived identity mapping (other states)", + ctdata_download_url = ctdata_download_url, + ctdata_github_repository = "https://github.com/CT-Data-Collaborative", + ctdata_documentation_url = "https://github.com/CT-Data-Collaborative/2022-tract-crosswalk", + nhgis_crosswalk_used = if (geography_standardized != "county") { + stringr::str_c(geography_standardized, "2010_", geography_standardized, "2020") + } else { + "N/A (county GEOIDs from tidycensus)" + }, + nhgis_citation_url = "https://www.nhgis.org/citation-and-use-nhgis-data", source_year = "2020", target_year = "2022", source_geography = geography, source_geography_standardized = geography_standardized, target_geography = geography, target_geography_standardized = geography_standardized, - state_coverage = "Connecticut only (FIPS 09)", + state_coverage = "National (all 50 states, DC, and Puerto Rico)", notes = c( - "Connecticut replaced 8 historical counties with 9 planning regions in 2022.", + "Connecticut: 8 historical counties replaced by 9 planning regions in 2022 (county boundaries changed; sub-county geographies had FIPS code changes only).", + "Other states: No geographic changes between 2020 and 2022 (identity mapping).", weighting_note), retrieved_at = Sys.time(), cached = !is.null(cache), - cache_path = if (!is.null(cache)) csv_path else NULL) + cache_path = if (!is.null(cache)) csv_path else NULL, + read_from_cache = FALSE) return(result) } - -#' Generate Identity Crosswalk for Non-Connecticut States (2020-2022) -#' -#' For states other than Connecticut, there were no geographic changes between -#' 2020 and 2022. This function generates an identity mapping where source and -#' target GEOIDs are identical. -#' -#' @param geography Character. Geography type: one of "block_group", "tract", or "county". -#' @param states Character vector. State FIPS codes to include. Defaults to all -#' states except Connecticut ("09"). -#' -#' @return A tibble with identity mappings for the specified geography and states. -#' @noRd -get_identity_crosswalk_2020_2022 <- function(geography, states = NULL) { - - geography_standardized <- geography |> - stringr::str_to_lower() |> - stringr::str_squish() |> - stringr::str_replace_all("_", " ") - - geography_standardized <- dplyr::case_when( - geography_standardized %in% c("block group", "blockgroup", "bg") ~ "block_group", - geography_standardized %in% c("tract", "tracts", "tr") ~ "tract", - geography_standardized %in% c("county", "counties", "co") ~ "county", - TRUE ~ NA_character_) - - if (is.na(geography_standardized)) { - stop( -"Identity crosswalks for 2020-2022 are only available for block groups, tracts, -and counties. Block-level identity crosswalks are not supported due to data size.")} - - all_state_fips <- c( - "01", "02", "04", "05", "06", "08", "10", "11", "12", "13", - "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", - "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", - "35", "36", "37", "38", "39", "40", "41", "42", "44", "45", - "46", "47", "48", "49", "50", "51", "53", "54", "55", "56", "72") - - non_ct_states <- all_state_fips[all_state_fips != "09"] - - if (is.null(states)) { - states <- non_ct_states - } else { - states <- states[states != "09"] - } - - result <- tibble::tibble( - source_geoid = character(), - target_geoid = character(), - source_geography_name = character(), - target_geography_name = character(), - source_year = character(), - target_year = character(), - allocation_factor_source_to_target = numeric(), - weighting_factor = character(), - state_fips = character()) - - message( -"For states other than Connecticut, no geographic changes occurred between -2020 and 2022. Returning identity mapping (source_geoid = target_geoid). -Note: This function returns an empty template. To populate with actual GEOIDs, -you would need to provide a list of GEOIDs or use Census Bureau geography files.") - - attr(result, "identity_mapping") <- TRUE - attr(result, "states_included") <- states - attr(result, "geography") <- geography_standardized - - return(result) -} +utils::globalVariables(c( + "B01003_001E", "GEOID", "Tract_fips_2022", "allocation_factor_source_to_target", + "block_fips_2020", "block_fips_2022", "ce_fips_2022", "county_fips_2020", + "county_fips_2022", "geoid_2020", "population_2020", "population_2020_total", + "source_geography_name", "source_geoid", "source_year", "state_fips", + "target_geography_name", "target_geoid", "target_year", "tract_fips_2020", + "tract_fips_2022", "weighting_factor")) \ No newline at end of file diff --git a/R/get_geocorr_crosswalk.R b/R/get_geocorr_crosswalk.R index dbe0932..e9c9ba0 100644 --- a/R/get_geocorr_crosswalk.R +++ b/R/get_geocorr_crosswalk.R @@ -37,13 +37,15 @@ #' \item{weighting_factor}{The attribute used to calculate allocation factors #' (one of population, housing, land)} #' } +#' @keywords internal #' @noRd get_geocorr_crosswalk <- function( source_geography, target_geography, - weight = c("population", "housing", "land"), + weight = "population", cache = NULL) { + outpath = "no file exists here" ## identify the relevant file paths for potentially-cached crosswalks if (!is.null(cache)) { outpath = file.path( @@ -53,7 +55,7 @@ get_geocorr_crosswalk <- function( ## if the file exists and the user does not wish to overwrite it if (file.exists(outpath) & !is.null(cache)) { - result = readr::read_csv(outpath) + result = readr::read_csv(outpath, show_col_types = FALSE) message("Reading file from cache.") @@ -77,8 +79,14 @@ get_geocorr_crosswalk <- function( # Base API URL for geocorr2022 base_url <- "https://mcdc.missouri.edu/cgi-bin/broker" + if (is.null(weight)) { + message("Setting the default crosswalk weighting variable to: population.") + weight = "population" + } + # Map weight parameter to API format - weight_value <- switch(weight, + weight_value <- switch( + weight, "population" = "pop20", "land" = "landsqmi", "housing" = "hus20") @@ -188,7 +196,7 @@ get_geocorr_crosswalk <- function( if (is.na(csv_path)) { stop("Unable to acquire the specified crosswalk; please file an issue.") } - df1 = readr::read_csv(file.path("https://mcdc.missouri.edu", "temp", csv_path)) |> + df1 = readr::read_csv(file.path("https://mcdc.missouri.edu", "temp", csv_path), show_col_types = FALSE) |> janitor::clean_names() })} else { # Build query parameters @@ -229,7 +237,7 @@ get_geocorr_crosswalk <- function( if (is.na(csv_path)) { stop("Unable to acquire the specified crosswalk; please file an issue.") } - df1 = readr::read_csv(file.path("https://mcdc.missouri.edu", "temp", csv_path)) |> + df1 = readr::read_csv(file.path("https://mcdc.missouri.edu", "temp", csv_path), show_col_types = FALSE) |> janitor::clean_names() } df2 = df1 |> @@ -297,8 +305,17 @@ get_geocorr_crosswalk <- function( allocation_factor_target_to_source = afact2, dplyr::any_of(c("housing_2020", "population_2020", "land_area_sqmi"))) |> dplyr::mutate( + ## tract-level geoids (or the component columns we use to create them) aren't consistently + ## structured across tract-level crosswalks. in the case that we've accidentally created + ## 13-character geoids (by duplicating the state fips), we drop that here source_geography = source_geography, + source_geoid = dplyr::case_when( + source_geography == "tract" & nchar(source_geoid) == 13 ~ stringr::str_sub(source_geoid, 3, 13), + TRUE ~ source_geoid), target_geography = target_geography, + target_geoid = dplyr::case_when( + target_geography == "tract" & nchar(target_geoid) == 13 ~ stringr::str_sub(target_geoid, 3, 13), + TRUE ~ target_geoid), weighting_factor = weight, dplyr::across(.cols = dplyr::matches("allocation"), .fns = as.numeric)) @@ -330,34 +347,4 @@ get_geocorr_crosswalk <- function( utils::globalVariables(c("afact", "afact2", "county")) -# get_geocorr_crosswalk( -# source_geography = "zcta", -# target_geography = "puma22", -# weight = c("population"), -# cache = here::here("crosswalks-cache"), -# overwrite_cache = FALSE) - -# ## omitting the provided MO-specific geographies -# sources = c( -# "place", "county", "tract", "blockgroup", "block", "zcta", "puma22", "cousub", -# "cbsa20", "cbsatype20", "metdiv20", "csa20", "necta", "nectadiv", "cnect", "aiannh", -# ## these may be formatted differently -- including a state parameter? -# "sduni20", "sdelem20", "sdsec20", "sdbest20", "sdbesttype20", "placesc", "puma12", -# "countysc", "inplace", "ur", "ua", "cbsa23", "cbsatype23", "metdiv23", "csa23", -# "cbsacentral23", "sldu24", "sldl24", "sldu22", "sld22", "sldu18", "sldl28", -# "cd119", "cd118", "cd117", "cd116", -# ## ctregion only works for CT; "vtd20" may be nested at the country level? -# "ctregion", "vtd20", "hsa19", "hrr19", "rucc23") -# -# ## "block" -- this level requires submitting 13 or fewer states at a time -# -# core_sources = c("place", "county", "tract", "blockgroup", -# "zcta", "puma22", "cd119", "cd118") -# -# expand.grid(core_sources, core_sources) |> -# dplyr::rename(source_geography = 1, target_geography = 2) |> -# dplyr::filter(source_geography != target_geography) |> -# dplyr::mutate(weight = "population", cache = here::here("crosswalks-cache"), overwrite_cache = FALSE, dplyr::across(dplyr::where(is.factor), as.character)) |> -# purrr::pwalk(get_geocorr_crosswalk) - diff --git a/R/get_nhgis_crosswalk.R b/R/get_nhgis_crosswalk.R index 5f2d13c..c36f0df 100644 --- a/R/get_nhgis_crosswalk.R +++ b/R/get_nhgis_crosswalk.R @@ -6,6 +6,7 @@ #' @param context Character. Either "source" or "target" to determine valid options. #' @return Character. Standardized geography code. #' @keywords internal +#' @noRd standardize_geography <- function(geography, context = "source") { # Convert to lowercase and remove extra whitespace geography <- geography |> @@ -133,7 +134,7 @@ list_nhgis_crosswalks <- function() { "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_blk2010.zip", ## ========================================================================= - ## BLOCK → BLOCK GROUP + ## BLOCK -> BLOCK GROUP ## ========================================================================= ## from 1990 "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_bg2010.zip", @@ -158,7 +159,7 @@ list_nhgis_crosswalks <- function() { "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_bg2015.zip", ## ========================================================================= - ## BLOCK GROUP ↔ BLOCK GROUP (bidirectional) + ## BLOCK GROUP <-> BLOCK GROUP (bidirectional) ## ========================================================================= ## from 2010s "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2010_bg2020.zip", @@ -184,7 +185,7 @@ list_nhgis_crosswalks <- function() { "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2022_bg2015.zip", ## ========================================================================= - ## BLOCK → TRACT + ## BLOCK -> TRACT ## ========================================================================= ## from 1990 "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_tr2010.zip", @@ -209,7 +210,7 @@ list_nhgis_crosswalks <- function() { "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_tr2015.zip", ## ========================================================================= - ## BLOCK GROUP → TRACT + ## BLOCK GROUP -> TRACT ## ========================================================================= ## from 2010s "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2010_tr2020.zip", @@ -235,7 +236,7 @@ list_nhgis_crosswalks <- function() { "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2022_tr2015.zip", ## ========================================================================= - ## TRACT ↔ TRACT (bidirectional) + ## TRACT<-> TRACT (bidirectional) ## ========================================================================= ## from 1990 "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr1990_tr2010.zip", @@ -273,7 +274,7 @@ list_nhgis_crosswalks <- function() { "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2022_tr2015.zip", ## ========================================================================= - ## BLOCK → COUNTY + ## BLOCK -> COUNTY ## Note: 2011/2012 targets only available from 2020 source (not 1990/2000) ## ========================================================================= ## from 1990 (to 2010, 2014, 2015 only) @@ -295,7 +296,7 @@ list_nhgis_crosswalks <- function() { "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk2020_co2015.zip", ## ========================================================================= - ## BLOCK GROUP → COUNTY + ## BLOCK GROUP -> COUNTY ## Note: bg source to co only available for 2010, 2014, 2015 sources ## (NOT 2011 or 2012 sources) ## ========================================================================= @@ -318,7 +319,7 @@ list_nhgis_crosswalks <- function() { "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_bg2022_co2015.zip", ## ========================================================================= - ## TRACT → COUNTY + ## TRACT -> COUNTY ## Note: tr source to co only available for 1990, 2000, 2010, 2014, 2015, ## 2020, 2022 sources (NOT 2011 or 2012 sources) ## ========================================================================= @@ -349,7 +350,7 @@ list_nhgis_crosswalks <- function() { "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_tr2022_co2015.zip", ## ========================================================================= - ## BLOCK → OTHER GEOGRAPHIES (decennial years only) + ## BLOCK -> OTHER GEOGRAPHIES (decennial years only) ## ========================================================================= ## CBSA "https://api.ipums.org/supplemental-data/nhgis/crosswalks/nhgis_blk1990_cbsa2010.zip", @@ -427,12 +428,7 @@ list_nhgis_crosswalks <- function() { #' @param cache Directory path. Where to download the crosswalk to. If NULL (default), #' crosswalk is returned but not saved to disk. #' -#' @return A data frame containing the crosswalk between the specified geographies. -#' Data are tidy-formatted, with each observation reflecting a unique -#' source-target-weighting factor combination. Note that all (typically two -#' or three) available weighting factors are returned. -#' -#'#' @return A dataframe representing the requested Geocorr22 crosswalk for all +#' @return A dataframe representing the requested Geocorr22 crosswalk for all #' 51 states and Puerto Rico. Depending on the desired geographies, some #' fields may not be included. #' \describe{ @@ -446,6 +442,7 @@ list_nhgis_crosswalks <- function() { #' from the source geography to the target geography} #' \item{weighting_factor}{The attribute used to calculate allocation factors} #' } +#' @keywords internal #' @noRd get_nhgis_crosswalk <- function( source_year, @@ -477,7 +474,7 @@ get_nhgis_crosswalk <- function( ## if the file exists and cache == TRUE if (file.exists(csv_path) & !is.null(cache)) { - result = readr::read_csv(csv_path) + result = readr::read_csv(csv_path, show_col_types = FALSE) message( "Use of NHGIS crosswalks is subject to the same conditions as for all NHGIS data. @@ -718,7 +715,8 @@ variable. Get your key at https://account.ipums.org/api_keys") } crosswalk_df = readr::read_csv( csv_files[1], - col_types = readr::cols(.default = readr::col_character())) |> + col_types = readr::cols(.default = readr::col_character()), + show_col_types = FALSE) |> janitor::clean_names() crosswalk_df diff --git a/R/plan_crosswalk_chain.R b/R/plan_crosswalk_chain.R new file mode 100644 index 0000000..ee1a38b --- /dev/null +++ b/R/plan_crosswalk_chain.R @@ -0,0 +1,267 @@ +#' Plan a Crosswalk Chain +#' +#' Internal function that analyzes source and target geography/year combinations +#' to determine the sequence of crosswalks needed. Returns a plan object describing +#' the chain without fetching any data. +#' +#' @param source_geography Character. Source geography name. +#' @param target_geography Character. Target geography name. +#' @param source_year Numeric or NULL. Year of the source geography. +#' @param target_year Numeric or NULL. Year of the target geography. +#' @param weight Character or NULL. Weighting variable for Geocorr crosswalks. +#' +#' @return A list with the following elements: +#' \describe{ +#' \item{is_multi_step}{Logical. TRUE if multiple crosswalks are needed.} +#' \item{steps}{A tibble describing each step with columns: step_number, +#' source_geography, source_year, target_geography, target_year, +#' crosswalk_source, description.} +#' \item{intermediate_geography}{Character or NULL. The pivot geography +#' between steps (if multi-step).} +#' \item{intermediate_year}{Numeric or NULL. The pivot year between steps +#' (if multi-step).} +#' \item{composition_note}{Character. Explanation of how to compose +#' allocation factors.} +#' \item{error}{Character or NULL. Error message if the chain is not possible.} +#' } +#' +#' @details +#' Multi-step crosswalks use a year-first approach: +#' 1. Step 1 (NHGIS): Change year while keeping geography constant +#' 2. Step 2 (Geocorr): Change geography at the target year +#' +#' This approach works because NHGIS supports inter-temporal crosswalks for +#' base Census geographies (block, block group, tract, county), while Geocorr +#' has broader geography coverage (ZCTA, PUMA, place, etc.). +#' +#' @keywords internal +#' @noRd +plan_crosswalk_chain <- function( + source_geography, + target_geography, + source_year = NULL, + target_year = NULL, + weight = NULL) { + + # Initialize result structure + result <- list( + is_multi_step = FALSE, + steps = tibble::tibble(), + intermediate_geography = NULL, + intermediate_year = NULL, + composition_note = NULL, + error = NULL) + + # Standardize geography names for comparison + source_geog_std <- standardize_geography_for_chain(source_geography) + target_geog_std <- standardize_geography_for_chain(target_geography) + + + # Convert years to character for consistent handling + # Use NA_character_ instead of NULL for tibble compatibility + source_year_chr <- if (!is.null(source_year)) as.character(source_year) else NA_character_ + target_year_chr <- if (!is.null(target_year)) as.character(target_year) else NA_character_ + + # Determine what kind of crosswalk is needed + geography_changes <- !isTRUE(source_geog_std == target_geog_std) + year_changes <- !is.na(source_year_chr) && !is.na(target_year_chr) && + !isTRUE(source_year_chr == target_year_chr) + + # Case 1: Same geography, same year (or no years) - no crosswalk needed + if (!geography_changes && !year_changes) { + result$steps <- tibble::tibble( + step_number = 0L, + source_geography = source_geography, + source_year = source_year_chr, + target_geography = target_geography, + target_year = target_year_chr, + crosswalk_source = "none", + description = "No crosswalk needed: source and target are identical") + result$composition_note <- "No composition needed." + return(result) + } + + # Case 2: Same geography, different years - single NHGIS crosswalk + if (!geography_changes && year_changes) { + result$steps <- tibble::tibble( + step_number = 1L, + source_geography = source_geography, + source_year = source_year_chr, + target_geography = target_geography, + target_year = target_year_chr, + crosswalk_source = determine_temporal_source(source_year_chr, target_year_chr), + description = stringr::str_c( + source_year_chr, " ", source_geog_std, " -> ", + target_year_chr, " ", target_geog_std, " (inter-temporal)")) + result$composition_note <- "Single crosswalk; use allocation_factor_source_to_target directly." + return(result) + } + + # Case 3: Different geography, same year (or no years) - single Geocorr crosswalk + if (geography_changes && !year_changes) { + ref_year <- if (!is.na(target_year_chr)) target_year_chr else "2022" + result$steps <- tibble::tibble( + step_number = 1L, + source_geography = source_geography, + source_year = source_year_chr, + target_geography = target_geography, + target_year = target_year_chr, + crosswalk_source = "geocorr", + description = stringr::str_c( + source_geog_std, " -> ", target_geog_std, " (inter-geography, ", ref_year, ")")) + result$composition_note <- "Single crosswalk; use allocation_factor_source_to_target directly." + return(result) + } + + # Case 4: Different geography AND different year - multi-step required + result$is_multi_step <- TRUE + + # Check if NHGIS supports the source geography for inter-temporal crosswalk + nhgis_temporal_geographies <- c("block", "block_group", "tract", "county") + source_supports_temporal <- source_geog_std %in% nhgis_temporal_geographies + + if (!source_supports_temporal) { + # Cannot do year-first approach; source geography not supported by NHGIS + result$error <- stringr::str_c( + "Multi-step crosswalk not possible: NHGIS does not support inter-temporal ", + "crosswalks for '", source_geography, "'. NHGIS only supports: ", + paste(nhgis_temporal_geographies, collapse = ", "), ". ", + "Consider using a different source geography or performing the crosswalk ", + "in a different order manually.") + return(result) + } + + # Determine the intermediate point (year-first approach) + # Step 1: source_geog(source_year) -> source_geog(target_year) via NHGIS + # Step 2: source_geog(target_year) -> target_geog(target_year) via Geocorr + + result$intermediate_geography <- source_geography[1] + result$intermediate_year <- target_year_chr[1] + + # Determine temporal crosswalk source for step 1 + step1_source <- determine_temporal_source(source_year_chr[1], target_year_chr[1]) + + # Use first element to ensure scalar values + src_geog <- source_geography[1] + tgt_geog <- target_geography[1] + src_year <- source_year_chr[1] + tgt_year <- target_year_chr[1] + src_std <- source_geog_std[1] + tgt_std <- target_geog_std[1] + + result$steps <- tibble::tibble( + step_number = c(1L, 2L), + source_geography = c(src_geog, src_geog), + source_year = c(src_year, tgt_year), + target_geography = c(src_geog, tgt_geog), + target_year = c(tgt_year, tgt_year), + crosswalk_source = c(step1_source, "geocorr"), + description = c( + stringr::str_c( + src_year, " ", src_std, " -> ", + tgt_year, " ", src_std, " (inter-temporal via ", step1_source, ")"), + stringr::str_c( + tgt_year, " ", src_std, " -> ", + tgt_year, " ", tgt_std, " (inter-geography via Geocorr)"))) + + result$composition_note <- stringr::str_c( + "Compose crosswalks by joining on intermediate geography (", + src_std, " ", tgt_year, ") and multiplying allocation factors: ", + "final_allocation = step1_allocation * step2_allocation") + + return(result) +} + + +#' Standardize Geography Name for Chain Planning +#' +#' Simplified geography standardization for internal chain planning. +#' +#' @param geography Character. Geography name to standardize. +#' @return Character. Standardized geography name. +#' @keywords internal +#' @noRd +standardize_geography_for_chain <- function(geography) { + geography <- geography |> + stringr::str_to_lower() |> + stringr::str_squish() |> + stringr::str_replace_all("_", " ") + + dplyr::case_when( + geography %in% c("blk", "block", "blocks", "census block") ~ "block", + geography %in% c("bg", "blockgroup", "block group", "census block group") ~ "block_group", + geography %in% c("tr", "tract", "tracts", "census tract") ~ "tract", + geography %in% c("co", "county", "counties", "cnty") ~ "county", + geography %in% c("pl", "place", "places") ~ "place", + geography %in% c("zcta", "zctas", "zip code", "zip code tabulation area") ~ "zcta", + geography %in% c("puma", "pumas", "puma22", "public use microdata area") ~ "puma", + geography %in% c("cbsa", "core based statistical area") ~ "cbsa", + geography %in% c("ua", "urban area", "urban areas") ~ "urban_area", + geography %in% c("cd118", "cd119", "congressional district") ~ geography, + TRUE ~ geography) +} + + +#' Determine Temporal Crosswalk Source +#' +#' Determines which data source to use for inter-temporal crosswalks. +#' +#' @param source_year Character. Source year. +#' @param target_year Character. Target year. +#' @return Character. One of "nhgis", "ctdata", or "identity". +#' @keywords internal +#' @noRd +determine_temporal_source <- function(source_year, target_year) { + # 2020 to 2022 special case: CTData for Connecticut + if (isTRUE(source_year == "2020") && isTRUE(target_year == "2022")) { + return("ctdata_2020_2022") + } + + # 2022 to 2020 would be reverse of CTData case + if (isTRUE(source_year == "2022") && isTRUE(target_year == "2020")) { + return("ctdata_2020_2022") + } + + # All other inter-temporal crosswalks use NHGIS + return("nhgis") +} + + +#' Format Crosswalk Chain Plan as Message +#' +#' Formats a crosswalk chain plan as a human-readable message. +#' +#' @param plan A plan object from plan_crosswalk_chain(). +#' @return Character. Formatted message describing the plan. +#' @keywords internal +#' @noRd +format_chain_plan_message <- function(plan) { + if (!is.null(plan$error)) { + return(stringr::str_c("Error: ", plan$error)) + } + + if (nrow(plan$steps) == 0) { + return("No crosswalk steps defined.") + } + + if (plan$steps$crosswalk_source[1] == "none") { + return("No crosswalk needed: source and target are identical.") + } + + if (!plan$is_multi_step) { + return(stringr::str_c( + "Single-step crosswalk:\n", + " Step 1: ", plan$steps$description[1], "\n", + "\n", plan$composition_note)) + } + + step_lines <- purrr::map_chr( + seq_len(nrow(plan$steps)), + ~ stringr::str_c(" Step ", .x, ": ", plan$steps$description[.x])) + + stringr::str_c( + "Multi-step crosswalk required:\n", + paste(step_lines, collapse = "\n"), "\n", + "\nIntermediate: ", plan$intermediate_geography, " (", plan$intermediate_year, ")\n", + "\n", plan$composition_note) +} diff --git a/man/crosswalk_data.Rd b/man/crosswalk_data.Rd new file mode 100644 index 0000000..1a7646b --- /dev/null +++ b/man/crosswalk_data.Rd @@ -0,0 +1,123 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/crosswalk_data.R +\name{crosswalk_data} +\alias{crosswalk_data} +\title{Apply a Crosswalk to Transform Data} +\usage{ +crosswalk_data( + data, + crosswalk, + geoid_column = "geoid", + count_columns = NULL, + non_count_columns = NULL, + return_intermediate = FALSE +) +} +\arguments{ +\item{data}{A data frame or tibble containing the data to crosswalk.} + +\item{crosswalk}{The output from \code{get_crosswalk()} - a list containing: +\describe{ +\item{crosswalks}{A named list of crosswalk tibbles (step_1, step_2, etc.)} +\item{plan}{The crosswalk plan} +\item{message}{Description of the crosswalk chain} +} +Alternatively, a single crosswalk tibble can be provided for backwards +compatibility.} + +\item{geoid_column}{Character. The name of the column in \code{data} containing +the source geography identifiers (GEOIDs). Default is "geoid".} + +\item{count_columns}{Character vector or NULL. Column names in \code{data} that represent +count variables. These will be summed after multiplying by the allocation factor. +If NULL (default), automatically detects columns with the prefix "count_".} + +\item{non_count_columns}{Character vector or NULL. Column names in \code{data} that represent +mean, median, percentage, and ratio variables. These will be calculated as weighted +means using the allocation factor as weights. If NULL (default), automatically +detects columns with prefixes "mean_", "median_", "percent_", or "ratio_".} + +\item{return_intermediate}{Logical. If TRUE and crosswalk has multiple steps, +returns a list containing both the final result and intermediate results +from each step. Default is FALSE, which returns only the final result.} +} +\value{ +If \code{return_intermediate = FALSE} (default), a tibble with data summarized +to the final target geography. + +If \code{return_intermediate = TRUE} and there are multiple crosswalk steps, a list with: +\describe{ +\item{final}{The final crosswalked data} +\item{intermediate}{A named list of intermediate results (step_1, step_2, etc.)} +} + +The returned tibble(s) include an attribute \code{crosswalk_metadata} from the +underlying crosswalk (access via \code{attr(result, "crosswalk_metadata")}). +} +\description{ +Applies geographic crosswalk weights to transform data from a source geography +to a target geography. Accepts the output from \code{get_crosswalk()} and automatically +applies all crosswalk steps sequentially for multi-step transformations. +} +\details{ +\strong{Count variables} (specified in \code{count_columns}) are interpolated by summing +the product of the value and the allocation factor across all source geographies +that overlap with each target geography. + +\strong{Non-count variables} (specified in \code{non_count_columns}) are interpolated using +a weighted mean, with the allocation factor serving as the weight. + +\strong{Automatic column detection}: If \code{count_columns} and \code{non_count_columns} are +both NULL, the function will automatically detect columns based on naming prefixes: +\itemize{ +\item Columns starting with "count_" are treated as count variables +\item Columns starting with "mean_", "median_", "percent_", or "ratio_" are treated +as non-count variables +} + +\strong{Multi-step crosswalks}: When \code{get_crosswalk()} returns multiple crosswalks +(for transformations that change both geography and year), this function +automatically applies them in sequence. +} +\examples{ +\dontrun{ +# Single-step crosswalk +crosswalk <- get_crosswalk( + source_geography = "tract", + target_geography = "zcta", + weight = "population") + +result <- crosswalk_data( + data = my_tract_data, + crosswalk = crosswalk, + geoid_column = "tract_geoid", + count_columns = c("count_population", "count_housing_units")) + +# Multi-step crosswalk (geography + year change) +crosswalk <- get_crosswalk( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020, + weight = "population") + +# Automatically applies both steps +result <- crosswalk_data( + data = my_data, + crosswalk = crosswalk, + geoid_column = "tract_geoid", + count_columns = "count_population") + +# To get intermediate results +result <- crosswalk_data( + data = my_data, + crosswalk = crosswalk, + geoid_column = "tract_geoid", + count_columns = "count_population", + return_intermediate = TRUE) + +# Access intermediate and final +result$intermediate$step_1 # After first crosswalk +result$final # Final result +} +} diff --git a/man/get_crosswalk.Rd b/man/get_crosswalk.Rd index d278fc4..da12545 100644 --- a/man/get_crosswalk.Rd +++ b/man/get_crosswalk.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/get_crosswalk.R \name{get_crosswalk} \alias{get_crosswalk} -\title{Get an inter-temporal or inter-geography crosswalk} +\title{Get a Geographic Crosswalk} \usage{ get_crosswalk( source_geography, @@ -10,7 +10,7 @@ get_crosswalk( source_year = NULL, target_year = NULL, cache = NULL, - weight = NULL + weight = "population" ) } \arguments{ @@ -38,21 +38,19 @@ are cached separately when provided.} c("population", "housing", "land").} } \value{ -A tibble containing the crosswalk between the specified geographies. -Data are tidy-formatted, with each observation reflecting a unique -source-target-weighting factor combination. - -The returned tibble includes an attribute \code{crosswalk_metadata} containing: +A list with a consistent structure: \describe{ -\item{source}{Character vector of data sources used (e.g., "nhgis", "ctdata")} -\item{source_year}{The source year} -\item{target_year}{The target year} -\item{source_geography}{The source geography} -\item{target_geography}{The target geography} -\item{notes}{Any relevant notes about the crosswalk construction} +\item{crosswalks}{A named list of crosswalk tibbles (step_1, step_2, etc.). +Single-step transformations have one crosswalk; multi-step have two or more.} +\item{plan}{The crosswalk plan describing the transformation steps} +\item{message}{A formatted message describing the crosswalk chain} } -Columns in the returned dataframe (some may not be present depending on source): +Each crosswalk tibble includes an attribute \code{crosswalk_metadata} (access via +\code{attr(result$crosswalks$step_1, "crosswalk_metadata")}) containing comprehensive +information about how the crosswalk was produced. + +Columns in returned crosswalk dataframes (some may not be present depending on source): \describe{ \item{source_geoid}{A unique identifier for the source geography} \item{target_geoid}{A unique identifier for the target geography} @@ -73,7 +71,8 @@ from the target geography to the source geography} } \description{ Retrieves a crosswalk with interpolation values from a source geography to a target -geography or from a source year to a target year. +geography, optionally across different years. Always returns a list with a consistent +structure containing one or more crosswalk tibbles. } \details{ This function sources crosswalks from Geocorr 2022, IPUMS NHGIS, and @@ -81,6 +80,14 @@ CT Data Collaborative. Crosswalk weights are from the original sources and have not been modified; this function merely standardizes the format of the returned crosswalks and enables easy programmatic access and caching. +\strong{Multi-step crosswalks}: When both geography AND year change (e.g., +2010 tracts to 2020 ZCTAs), no single crosswalk source provides this directly. +This function returns multiple crosswalks that should be applied sequentially: +\enumerate{ +\item First crosswalk changes year (via NHGIS): source_geog(source_year) -> source_geog(target_year) +\item Second crosswalk changes geography (via Geocorr): source_geog(target_year) -> target_geog(target_year) +} + \strong{Non-census year support}: For target years 2011, 2012, 2014, 2015, and 2022, crosswalks are available only for block groups, tracts, and counties. These years correspond to American Community Survey geography changes. @@ -98,26 +105,39 @@ obtain a key from: https://account.ipums.org/api_keys. \examples{ \dontrun{ # Same-year crosswalk between geographies (uses Geocorr) -get_crosswalk( +# Returns list with one crosswalk in crosswalks$step_1 +result <- get_crosswalk( source_geography = "zcta", target_geography = "puma22", weight = "population", cache = here::here("crosswalks-cache")) -# Inter-temporal crosswalk (uses NHGIS) -get_crosswalk( +# Apply to data using crosswalk_data() +output <- crosswalk_data( + data = my_data, + crosswalk = result, + count_columns = "count_population") + +# Multi-step crosswalk: both geography AND year change +# Returns list with two crosswalks in crosswalks$step_1 and crosswalks$step_2 +result <- get_crosswalk( source_geography = "tract", - target_geography = "tract", + target_geography = "zcta", source_year = 2010, target_year = 2020, - cache = here::here("crosswalks-cache")) + weight = "population") -# Non-census year crosswalk (2020 to 2022, CT changes) -get_crosswalk( - source_geography = "tract", - target_geography = "tract", - source_year = 2020, - target_year = 2022, - cache = here::here("crosswalks-cache")) +# crosswalk_data() automatically applies all steps +output <- crosswalk_data( + data = my_data, + crosswalk = result, + count_columns = "count_population") + +# To get intermediate results, set return_intermediate = TRUE +output <- crosswalk_data( + data = my_data, + crosswalk = result, + count_columns = "count_population", + return_intermediate = TRUE) } } diff --git a/man/get_crosswalk_chain.Rd b/man/get_crosswalk_chain.Rd new file mode 100644 index 0000000..d314cc4 --- /dev/null +++ b/man/get_crosswalk_chain.Rd @@ -0,0 +1,63 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/get_crosswalk_chain.R +\name{get_crosswalk_chain} +\alias{get_crosswalk_chain} +\title{Get a Chain of Crosswalks for Multi-Step Transformations} +\usage{ +get_crosswalk_chain( + source_geography, + target_geography, + source_year = NULL, + target_year = NULL, + weight = "population", + cache = NULL +) +} +\arguments{ +\item{source_geography}{Character. Source geography name.} + +\item{target_geography}{Character. Target geography name.} + +\item{source_year}{Numeric or NULL. Year of the source geography.} + +\item{target_year}{Numeric or NULL. Year of the target geography.} + +\item{weight}{Character or NULL. Weighting variable for Geocorr crosswalks.} + +\item{cache}{Directory path or NULL. Where to cache crosswalks.} +} +\value{ +A list with: +\describe{ +\item{crosswalks}{A named list of crosswalk tibbles (step_1, step_2, etc.)} +\item{plan}{The crosswalk plan from plan_crosswalk_chain()} +\item{message}{A formatted message describing the crosswalk chain} +} +} +\description{ +Retrieves a list of crosswalks needed to transform data from a source +geography/year to a target geography/year. For multi-step transformations, +users should apply each crosswalk sequentially using \code{crosswalk_data()}. +} +\examples{ +\dontrun{ +# Get crosswalks for 2010 tracts to 2020 ZCTAs (requires two steps) +chain <- get_crosswalk_chain( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020, + weight = "population") + +# Apply crosswalks sequentially +data_step1 <- crosswalk_data( + data = my_data, + crosswalk = chain$crosswalks$step_1, + count_columns = "count_population") + +data_final <- crosswalk_data( + data = data_step1, + crosswalk = chain$crosswalks$step_2, + count_columns = "count_population") +} +} diff --git a/man/standardize_geography.Rd b/man/standardize_geography.Rd deleted file mode 100644 index b45ca6a..0000000 --- a/man/standardize_geography.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/get_nhgis_crosswalk.R -\name{standardize_geography} -\alias{standardize_geography} -\title{Standardize Geography Names} -\usage{ -standardize_geography(geography, context = "source") -} -\arguments{ -\item{geography}{Character. Geography name in various formats.} - -\item{context}{Character. Either "source" or "target" to determine valid options.} -} -\value{ -Character. Standardized geography code. -} -\description{ -Internal helper function to convert various geography name spellings to standard codes. -} -\keyword{internal} diff --git a/renv.lock b/renv.lock index 6caeb86..45cd8e6 100644 --- a/renv.lock +++ b/renv.lock @@ -1,1869 +1,1937 @@ -{ - "R": { - "Version": "4.5.1", - "Repositories": [ - { - "Name": "CRAN", - "URL": "https://packagemanager.posit.co/cran/latest" - } - ] - }, - "Packages": { - "R6": { - "Package": "R6", - "Version": "2.6.1", - "Source": "Repository", - "Title": "Encapsulated Classes with Reference Semantics", - "Authors@R": "c( person(\"Winston\", \"Chang\", , \"winston@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "Creates classes with reference semantics, similar to R's built-in reference classes. Compared to reference classes, R6 classes are simpler and lighter-weight, and they are not built on S4 classes so they do not require the methods package. These classes allow public and private members, and they support inheritance, even when the classes are defined in different packages.", - "License": "MIT + file LICENSE", - "URL": "https://r6.r-lib.org, https://github.com/r-lib/R6", - "BugReports": "https://github.com/r-lib/R6/issues", - "Depends": [ - "R (>= 3.6)" - ], - "Suggests": [ - "lobstr", - "testthat (>= 3.0.0)" - ], - "Config/Needs/website": "tidyverse/tidytemplate, ggplot2, microbenchmark, scales", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "NeedsCompilation": "no", - "Author": "Winston Chang [aut, cre], Posit Software, PBC [cph, fnd]", - "Maintainer": "Winston Chang ", - "Repository": "CRAN" - }, - "askpass": { - "Package": "askpass", - "Version": "1.2.1", - "Source": "Repository", - "Type": "Package", - "Title": "Password Entry Utilities for R, Git, and SSH", - "Authors@R": "person(\"Jeroen\", \"Ooms\", role = c(\"aut\", \"cre\"), email = \"jeroenooms@gmail.com\", comment = c(ORCID = \"0000-0002-4035-0289\"))", - "Description": "Cross-platform utilities for prompting the user for credentials or a passphrase, for example to authenticate with a server or read a protected key. Includes native programs for MacOS and Windows, hence no 'tcltk' is required. Password entry can be invoked in two different ways: directly from R via the askpass() function, or indirectly as password-entry back-end for 'ssh-agent' or 'git-credential' via the SSH_ASKPASS and GIT_ASKPASS environment variables. Thereby the user can be prompted for credentials or a passphrase if needed when R calls out to git or ssh.", - "License": "MIT + file LICENSE", - "URL": "https://r-lib.r-universe.dev/askpass", - "BugReports": "https://github.com/r-lib/askpass/issues", - "Encoding": "UTF-8", - "Imports": [ - "sys (>= 2.1)" - ], - "RoxygenNote": "7.2.3", - "Suggests": [ - "testthat" - ], - "Language": "en-US", - "NeedsCompilation": "yes", - "Author": "Jeroen Ooms [aut, cre] ()", - "Maintainer": "Jeroen Ooms ", - "Repository": "CRAN" - }, - "bit": { - "Package": "bit", - "Version": "4.6.0", - "Source": "Repository", - "Title": "Classes and Methods for Fast Memory-Efficient Boolean Selections", - "Authors@R": "c( person(\"Michael\", \"Chirico\", email = \"MichaelChirico4@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Jens\", \"Oehlschlägel\", role = \"aut\"), person(\"Brian\", \"Ripley\", role = \"ctb\") )", - "Depends": [ - "R (>= 3.4.0)" - ], - "Suggests": [ - "testthat (>= 3.0.0)", - "roxygen2", - "knitr", - "markdown", - "rmarkdown", - "microbenchmark", - "bit64 (>= 4.0.0)", - "ff (>= 4.0.0)" - ], - "Description": "Provided are classes for boolean and skewed boolean vectors, fast boolean methods, fast unique and non-unique integer sorting, fast set operations on sorted and unsorted sets of integers, and foundations for ff (range index, compression, chunked processing).", - "License": "GPL-2 | GPL-3", - "LazyLoad": "yes", - "ByteCompile": "yes", - "Encoding": "UTF-8", - "URL": "https://github.com/r-lib/bit", - "VignetteBuilder": "knitr, rmarkdown", - "RoxygenNote": "7.3.2", - "Config/testthat/edition": "3", - "NeedsCompilation": "yes", - "Author": "Michael Chirico [aut, cre], Jens Oehlschlägel [aut], Brian Ripley [ctb]", - "Maintainer": "Michael Chirico ", - "Repository": "CRAN" - }, - "bit64": { - "Package": "bit64", - "Version": "4.6.0-1", - "Source": "Repository", - "Title": "A S3 Class for Vectors of 64bit Integers", - "Authors@R": "c( person(\"Michael\", \"Chirico\", email = \"michaelchirico4@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Jens\", \"Oehlschlägel\", role = \"aut\"), person(\"Leonardo\", \"Silvestri\", role = \"ctb\"), person(\"Ofek\", \"Shilon\", role = \"ctb\") )", - "Depends": [ - "R (>= 3.4.0)", - "bit (>= 4.0.0)" - ], - "Description": "Package 'bit64' provides serializable S3 atomic 64bit (signed) integers. These are useful for handling database keys and exact counting in +-2^63. WARNING: do not use them as replacement for 32bit integers, integer64 are not supported for subscripting by R-core and they have different semantics when combined with double, e.g. integer64 + double => integer64. Class integer64 can be used in vectors, matrices, arrays and data.frames. Methods are available for coercion from and to logicals, integers, doubles, characters and factors as well as many elementwise and summary functions. Many fast algorithmic operations such as 'match' and 'order' support inter- active data exploration and manipulation and optionally leverage caching.", - "License": "GPL-2 | GPL-3", - "LazyLoad": "yes", - "ByteCompile": "yes", - "URL": "https://github.com/r-lib/bit64", - "Encoding": "UTF-8", - "Imports": [ - "graphics", - "methods", - "stats", - "utils" - ], - "Suggests": [ - "testthat (>= 3.0.3)", - "withr" - ], - "Config/testthat/edition": "3", - "Config/needs/development": "testthat", - "RoxygenNote": "7.3.2", - "NeedsCompilation": "yes", - "Author": "Michael Chirico [aut, cre], Jens Oehlschlägel [aut], Leonardo Silvestri [ctb], Ofek Shilon [ctb]", - "Maintainer": "Michael Chirico ", - "Repository": "CRAN" - }, - "cli": { - "Package": "cli", - "Version": "3.6.5", - "Source": "Repository", - "Title": "Helpers for Developing Command Line Interfaces", - "Authors@R": "c( person(\"Gábor\", \"Csárdi\", , \"gabor@posit.co\", role = c(\"aut\", \"cre\")), person(\"Hadley\", \"Wickham\", role = \"ctb\"), person(\"Kirill\", \"Müller\", role = \"ctb\"), person(\"Salim\", \"Brüggemann\", , \"salim-b@pm.me\", role = \"ctb\", comment = c(ORCID = \"0000-0002-5329-5987\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "A suite of tools to build attractive command line interfaces ('CLIs'), from semantic elements: headings, lists, alerts, paragraphs, etc. Supports custom themes via a 'CSS'-like language. It also contains a number of lower level 'CLI' elements: rules, boxes, trees, and 'Unicode' symbols with 'ASCII' alternatives. It support ANSI colors and text styles as well.", - "License": "MIT + file LICENSE", - "URL": "https://cli.r-lib.org, https://github.com/r-lib/cli", - "BugReports": "https://github.com/r-lib/cli/issues", - "Depends": [ - "R (>= 3.4)" - ], - "Imports": [ - "utils" - ], - "Suggests": [ - "callr", - "covr", - "crayon", - "digest", - "glue (>= 1.6.0)", - "grDevices", - "htmltools", - "htmlwidgets", - "knitr", - "methods", - "processx", - "ps (>= 1.3.4.9000)", - "rlang (>= 1.0.2.9003)", - "rmarkdown", - "rprojroot", - "rstudioapi", - "testthat (>= 3.2.0)", - "tibble", - "whoami", - "withr" - ], - "Config/Needs/website": "r-lib/asciicast, bench, brio, cpp11, decor, desc, fansi, prettyunits, sessioninfo, tidyverse/tidytemplate, usethis, vctrs", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "NeedsCompilation": "yes", - "Author": "Gábor Csárdi [aut, cre], Hadley Wickham [ctb], Kirill Müller [ctb], Salim Brüggemann [ctb] (), Posit Software, PBC [cph, fnd]", - "Maintainer": "Gábor Csárdi ", - "Repository": "CRAN" - }, - "clipr": { - "Package": "clipr", - "Version": "0.8.0", - "Source": "Repository", - "Type": "Package", - "Title": "Read and Write from the System Clipboard", - "Authors@R": "c( person(\"Matthew\", \"Lincoln\", , \"matthew.d.lincoln@gmail.com\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0002-4387-3384\")), person(\"Louis\", \"Maddox\", role = \"ctb\"), person(\"Steve\", \"Simpson\", role = \"ctb\"), person(\"Jennifer\", \"Bryan\", role = \"ctb\") )", - "Description": "Simple utility functions to read from and write to the Windows, OS X, and X11 clipboards.", - "License": "GPL-3", - "URL": "https://github.com/mdlincoln/clipr, http://matthewlincoln.net/clipr/", - "BugReports": "https://github.com/mdlincoln/clipr/issues", - "Imports": [ - "utils" - ], - "Suggests": [ - "covr", - "knitr", - "rmarkdown", - "rstudioapi (>= 0.5)", - "testthat (>= 2.0.0)" - ], - "VignetteBuilder": "knitr", - "Encoding": "UTF-8", - "Language": "en-US", - "RoxygenNote": "7.1.2", - "SystemRequirements": "xclip (https://github.com/astrand/xclip) or xsel (http://www.vergenet.net/~conrad/software/xsel/) for accessing the X11 clipboard, or wl-clipboard (https://github.com/bugaevc/wl-clipboard) for systems using Wayland.", - "NeedsCompilation": "no", - "Author": "Matthew Lincoln [aut, cre] (), Louis Maddox [ctb], Steve Simpson [ctb], Jennifer Bryan [ctb]", - "Maintainer": "Matthew Lincoln ", - "Repository": "CRAN" - }, - "cpp11": { - "Package": "cpp11", - "Version": "0.5.2", - "Source": "Repository", - "Title": "A C++11 Interface for R's C Interface", - "Authors@R": "c( person(\"Davis\", \"Vaughan\", email = \"davis@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0003-4777-038X\")), person(\"Jim\",\"Hester\", role = \"aut\", comment = c(ORCID = \"0000-0002-2739-7082\")), person(\"Romain\", \"François\", role = \"aut\", comment = c(ORCID = \"0000-0002-2444-4226\")), person(\"Benjamin\", \"Kietzman\", role = \"ctb\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "Provides a header only, C++11 interface to R's C interface. Compared to other approaches 'cpp11' strives to be safe against long jumps from the C API as well as C++ exceptions, conform to normal R function semantics and supports interaction with 'ALTREP' vectors.", - "License": "MIT + file LICENSE", - "URL": "https://cpp11.r-lib.org, https://github.com/r-lib/cpp11", - "BugReports": "https://github.com/r-lib/cpp11/issues", - "Depends": [ - "R (>= 4.0.0)" - ], - "Suggests": [ - "bench", - "brio", - "callr", - "cli", - "covr", - "decor", - "desc", - "ggplot2", - "glue", - "knitr", - "lobstr", - "mockery", - "progress", - "rmarkdown", - "scales", - "Rcpp", - "testthat (>= 3.2.0)", - "tibble", - "utils", - "vctrs", - "withr" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Config/Needs/cpp11/cpp_register": "brio, cli, decor, desc, glue, tibble, vctrs", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "NeedsCompilation": "no", - "Author": "Davis Vaughan [aut, cre] (), Jim Hester [aut] (), Romain François [aut] (), Benjamin Kietzman [ctb], Posit Software, PBC [cph, fnd]", - "Maintainer": "Davis Vaughan ", - "Repository": "CRAN" - }, - "crayon": { - "Package": "crayon", - "Version": "1.5.3", - "Source": "Repository", - "Title": "Colored Terminal Output", - "Authors@R": "c( person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Brodie\", \"Gaslam\", , \"brodie.gaslam@yahoo.com\", role = \"ctb\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "The crayon package is now superseded. Please use the 'cli' package for new projects. Colored terminal output on terminals that support 'ANSI' color and highlight codes. It also works in 'Emacs' 'ESS'. 'ANSI' color support is automatically detected. Colors and highlighting can be combined and nested. New styles can also be created easily. This package was inspired by the 'chalk' 'JavaScript' project.", - "License": "MIT + file LICENSE", - "URL": "https://r-lib.github.io/crayon/, https://github.com/r-lib/crayon", - "BugReports": "https://github.com/r-lib/crayon/issues", - "Imports": [ - "grDevices", - "methods", - "utils" - ], - "Suggests": [ - "mockery", - "rstudioapi", - "testthat", - "withr" - ], - "Config/Needs/website": "tidyverse/tidytemplate", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.1", - "Collate": "'aaa-rstudio-detect.R' 'aaaa-rematch2.R' 'aab-num-ansi-colors.R' 'aac-num-ansi-colors.R' 'ansi-256.R' 'ansi-palette.R' 'combine.R' 'string.R' 'utils.R' 'crayon-package.R' 'disposable.R' 'enc-utils.R' 'has_ansi.R' 'has_color.R' 'link.R' 'styles.R' 'machinery.R' 'parts.R' 'print.R' 'style-var.R' 'show.R' 'string_operations.R'", - "NeedsCompilation": "no", - "Author": "Gábor Csárdi [aut, cre], Brodie Gaslam [ctb], Posit Software, PBC [cph, fnd]", - "Maintainer": "Gábor Csárdi ", - "Repository": "CRAN" - }, - "curl": { - "Package": "curl", - "Version": "6.4.0", - "Source": "Repository", - "Type": "Package", - "Title": "A Modern and Flexible Web Client for R", - "Authors@R": "c( person(\"Jeroen\", \"Ooms\", role = c(\"aut\", \"cre\"), email = \"jeroenooms@gmail.com\", comment = c(ORCID = \"0000-0002-4035-0289\")), person(\"Hadley\", \"Wickham\", role = \"ctb\"), person(\"Posit Software, PBC\", role = \"cph\"))", - "Description": "Bindings to 'libcurl' for performing fully configurable HTTP/FTP requests where responses can be processed in memory, on disk, or streaming via the callback or connection interfaces. Some knowledge of 'libcurl' is recommended; for a more-user-friendly web client see the 'httr2' package which builds on this package with http specific tools and logic.", - "License": "MIT + file LICENSE", - "SystemRequirements": "libcurl (>= 7.73): libcurl-devel (rpm) or libcurl4-openssl-dev (deb)", - "URL": "https://jeroen.r-universe.dev/curl", - "BugReports": "https://github.com/jeroen/curl/issues", - "Suggests": [ - "spelling", - "testthat (>= 1.0.0)", - "knitr", - "jsonlite", - "later", - "rmarkdown", - "httpuv (>= 1.4.4)", - "webutils" - ], - "VignetteBuilder": "knitr", - "Depends": [ - "R (>= 3.0.0)" - ], - "RoxygenNote": "7.3.2.9000", - "Encoding": "UTF-8", - "Language": "en-US", - "NeedsCompilation": "yes", - "Author": "Jeroen Ooms [aut, cre] (ORCID: ), Hadley Wickham [ctb], Posit Software, PBC [cph]", - "Maintainer": "Jeroen Ooms ", - "Repository": "CRAN" - }, - "dplyr": { - "Package": "dplyr", - "Version": "1.1.4", - "Source": "Repository", - "Type": "Package", - "Title": "A Grammar of Data Manipulation", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Romain\", \"François\", role = \"aut\", comment = c(ORCID = \"0000-0002-2444-4226\")), person(\"Lionel\", \"Henry\", role = \"aut\"), person(\"Kirill\", \"Müller\", role = \"aut\", comment = c(ORCID = \"0000-0002-1416-3412\")), person(\"Davis\", \"Vaughan\", , \"davis@posit.co\", role = \"aut\", comment = c(ORCID = \"0000-0003-4777-038X\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "A fast, consistent tool for working with data frame like objects, both in memory and out of memory.", - "License": "MIT + file LICENSE", - "URL": "https://dplyr.tidyverse.org, https://github.com/tidyverse/dplyr", - "BugReports": "https://github.com/tidyverse/dplyr/issues", - "Depends": [ - "R (>= 3.5.0)" - ], - "Imports": [ - "cli (>= 3.4.0)", - "generics", - "glue (>= 1.3.2)", - "lifecycle (>= 1.0.3)", - "magrittr (>= 1.5)", - "methods", - "pillar (>= 1.9.0)", - "R6", - "rlang (>= 1.1.0)", - "tibble (>= 3.2.0)", - "tidyselect (>= 1.2.0)", - "utils", - "vctrs (>= 0.6.4)" - ], - "Suggests": [ - "bench", - "broom", - "callr", - "covr", - "DBI", - "dbplyr (>= 2.2.1)", - "ggplot2", - "knitr", - "Lahman", - "lobstr", - "microbenchmark", - "nycflights13", - "purrr", - "rmarkdown", - "RMySQL", - "RPostgreSQL", - "RSQLite", - "stringi (>= 1.7.6)", - "testthat (>= 3.1.5)", - "tidyr (>= 1.3.0)", - "withr" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse, shiny, pkgdown, tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "LazyData": "true", - "RoxygenNote": "7.2.3", - "NeedsCompilation": "yes", - "Author": "Hadley Wickham [aut, cre] (), Romain François [aut] (), Lionel Henry [aut], Kirill Müller [aut] (), Davis Vaughan [aut] (), Posit Software, PBC [cph, fnd]", - "Maintainer": "Hadley Wickham ", - "Repository": "CRAN" - }, - "generics": { - "Package": "generics", - "Version": "0.1.4", - "Source": "Repository", - "Title": "Common S3 Generics not Provided by Base R Methods Related to Model Fitting", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Max\", \"Kuhn\", , \"max@posit.co\", role = \"aut\"), person(\"Davis\", \"Vaughan\", , \"davis@posit.co\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\"), comment = c(ROR = \"https://ror.org/03wc8by49\")) )", - "Description": "In order to reduce potential package dependencies and conflicts, generics provides a number of commonly used S3 generics.", - "License": "MIT + file LICENSE", - "URL": "https://generics.r-lib.org, https://github.com/r-lib/generics", - "BugReports": "https://github.com/r-lib/generics/issues", - "Depends": [ - "R (>= 3.6)" - ], - "Imports": [ - "methods" - ], - "Suggests": [ - "covr", - "pkgload", - "testthat (>= 3.0.0)", - "tibble", - "withr" - ], - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "NeedsCompilation": "no", - "Author": "Hadley Wickham [aut, cre] (ORCID: ), Max Kuhn [aut], Davis Vaughan [aut], Posit Software, PBC [cph, fnd] (ROR: )", - "Maintainer": "Hadley Wickham ", - "Repository": "CRAN" - }, - "glue": { - "Package": "glue", - "Version": "1.8.0", - "Source": "Repository", - "Title": "Interpreted String Literals", - "Authors@R": "c( person(\"Jim\", \"Hester\", role = \"aut\", comment = c(ORCID = \"0000-0002-2739-7082\")), person(\"Jennifer\", \"Bryan\", , \"jenny@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0002-6983-2759\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "An implementation of interpreted string literals, inspired by Python's Literal String Interpolation and Docstrings and Julia's Triple-Quoted String Literals .", - "License": "MIT + file LICENSE", - "URL": "https://glue.tidyverse.org/, https://github.com/tidyverse/glue", - "BugReports": "https://github.com/tidyverse/glue/issues", - "Depends": [ - "R (>= 3.6)" - ], - "Imports": [ - "methods" - ], - "Suggests": [ - "crayon", - "DBI (>= 1.2.0)", - "dplyr", - "knitr", - "magrittr", - "rlang", - "rmarkdown", - "RSQLite", - "testthat (>= 3.2.0)", - "vctrs (>= 0.3.0)", - "waldo (>= 0.5.3)", - "withr" - ], - "VignetteBuilder": "knitr", - "ByteCompile": "true", - "Config/Needs/website": "bench, forcats, ggbeeswarm, ggplot2, R.utils, rprintf, tidyr, tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "NeedsCompilation": "yes", - "Author": "Jim Hester [aut] (), Jennifer Bryan [aut, cre] (), Posit Software, PBC [cph, fnd]", - "Maintainer": "Jennifer Bryan ", - "Repository": "CRAN" - }, - "hms": { - "Package": "hms", - "Version": "1.1.3", - "Source": "Repository", - "Title": "Pretty Time of Day", - "Date": "2023-03-21", - "Authors@R": "c( person(\"Kirill\", \"Müller\", role = c(\"aut\", \"cre\"), email = \"kirill@cynkra.com\", comment = c(ORCID = \"0000-0002-1416-3412\")), person(\"R Consortium\", role = \"fnd\"), person(\"RStudio\", role = \"fnd\") )", - "Description": "Implements an S3 class for storing and formatting time-of-day values, based on the 'difftime' class.", - "Imports": [ - "lifecycle", - "methods", - "pkgconfig", - "rlang (>= 1.0.2)", - "vctrs (>= 0.3.8)" - ], - "Suggests": [ - "crayon", - "lubridate", - "pillar (>= 1.1.0)", - "testthat (>= 3.0.0)" - ], - "License": "MIT + file LICENSE", - "Encoding": "UTF-8", - "URL": "https://hms.tidyverse.org/, https://github.com/tidyverse/hms", - "BugReports": "https://github.com/tidyverse/hms/issues", - "RoxygenNote": "7.2.3", - "Config/testthat/edition": "3", - "Config/autostyle/scope": "line_breaks", - "Config/autostyle/strict": "false", - "Config/Needs/website": "tidyverse/tidytemplate", - "NeedsCompilation": "no", - "Author": "Kirill Müller [aut, cre] (), R Consortium [fnd], RStudio [fnd]", - "Maintainer": "Kirill Müller ", - "Repository": "CRAN" - }, - "httr": { - "Package": "httr", - "Version": "1.4.7", - "Source": "Repository", - "Title": "Tools for Working with URLs and HTTP", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "Useful tools for working with HTTP organised by HTTP verbs (GET(), POST(), etc). Configuration functions make it easy to control additional request components (authenticate(), add_headers() and so on).", - "License": "MIT + file LICENSE", - "URL": "https://httr.r-lib.org/, https://github.com/r-lib/httr", - "BugReports": "https://github.com/r-lib/httr/issues", - "Depends": [ - "R (>= 3.5)" - ], - "Imports": [ - "curl (>= 5.0.2)", - "jsonlite", - "mime", - "openssl (>= 0.8)", - "R6" - ], - "Suggests": [ - "covr", - "httpuv", - "jpeg", - "knitr", - "png", - "readr", - "rmarkdown", - "testthat (>= 0.8.0)", - "xml2" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse/tidytemplate", - "Encoding": "UTF-8", - "RoxygenNote": "7.2.3", - "NeedsCompilation": "no", - "Author": "Hadley Wickham [aut, cre], Posit, PBC [cph, fnd]", - "Maintainer": "Hadley Wickham ", - "Repository": "CRAN" - }, - "httr2": { - "Package": "httr2", - "Version": "1.2.0", - "Source": "Repository", - "Title": "Perform HTTP Requests and Process the Responses", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(\"Maximilian\", \"Girlich\", role = \"ctb\") )", - "Description": "Tools for creating and modifying HTTP requests, then performing them and processing the results. 'httr2' is a modern re-imagining of 'httr' that uses a pipe-based interface and solves more of the problems that API wrapping packages face.", - "License": "MIT + file LICENSE", - "URL": "https://httr2.r-lib.org, https://github.com/r-lib/httr2", - "BugReports": "https://github.com/r-lib/httr2/issues", - "Depends": [ - "R (>= 4.1)" - ], - "Imports": [ - "cli (>= 3.0.0)", - "curl (>= 6.4.0)", - "glue", - "lifecycle", - "magrittr", - "openssl", - "R6", - "rappdirs", - "rlang (>= 1.1.0)", - "vctrs (>= 0.6.3)", - "withr" - ], - "Suggests": [ - "askpass", - "bench", - "clipr", - "covr", - "docopt", - "httpuv", - "jose", - "jsonlite", - "knitr", - "later (>= 1.4.0)", - "nanonext", - "paws.common", - "promises", - "rmarkdown", - "testthat (>= 3.1.8)", - "tibble", - "webfakes (>= 1.4.0)", - "xml2" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Config/testthat/parallel": "true", - "Config/testthat/start-first": "resp-stream, req-perform", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "NeedsCompilation": "no", - "Author": "Hadley Wickham [aut, cre], Posit Software, PBC [cph, fnd], Maximilian Girlich [ctb]", - "Maintainer": "Hadley Wickham ", - "Repository": "CRAN" - }, - "janitor": { - "Package": "janitor", - "Version": "2.2.1", - "Source": "Repository", - "Title": "Simple Tools for Examining and Cleaning Dirty Data", - "Authors@R": "c(person(\"Sam\", \"Firke\", email = \"samuel.firke@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Bill\", \"Denney\", email = \"wdenney@humanpredictions.com\", role = \"ctb\"), person(\"Chris\", \"Haid\", email = \"chrishaid@gmail.com\", role = \"ctb\"), person(\"Ryan\", \"Knight\", email = \"ryangknight@gmail.com\", role = \"ctb\"), person(\"Malte\", \"Grosser\", email = \"malte.grosser@gmail.com\", role = \"ctb\"), person(\"Jonathan\", \"Zadra\", email = \"jonathan.zadra@sorensonimpact.com\", role = \"ctb\"))", - "Description": "The main janitor functions can: perfectly format data.frame column names; provide quick counts of variable combinations (i.e., frequency tables and crosstabs); and explore duplicate records. Other janitor functions nicely format the tabulation results. These tabulate-and-report functions approximate popular features of SPSS and Microsoft Excel. This package follows the principles of the \"tidyverse\" and works well with the pipe function %>%. janitor was built with beginning-to-intermediate R users in mind and is optimized for user-friendliness.", - "URL": "https://github.com/sfirke/janitor, https://sfirke.github.io/janitor/", - "BugReports": "https://github.com/sfirke/janitor/issues", - "Depends": [ - "R (>= 3.1.2)" - ], - "Imports": [ - "dplyr (>= 1.0.0)", - "hms", - "lifecycle", - "lubridate", - "magrittr", - "purrr", - "rlang", - "stringi", - "stringr", - "snakecase (>= 0.9.2)", - "tidyselect (>= 1.0.0)", - "tidyr (>= 0.7.0)" - ], - "License": "MIT + file LICENSE", - "RoxygenNote": "7.2.3", - "Suggests": [ - "dbplyr", - "knitr", - "rmarkdown", - "RSQLite", - "sf", - "testthat (>= 3.0.0)", - "tibble", - "tidygraph" - ], - "VignetteBuilder": "knitr", - "Encoding": "UTF-8", - "Config/testthat/edition": "3", - "NeedsCompilation": "no", - "Author": "Sam Firke [aut, cre], Bill Denney [ctb], Chris Haid [ctb], Ryan Knight [ctb], Malte Grosser [ctb], Jonathan Zadra [ctb]", - "Maintainer": "Sam Firke ", - "Repository": "CRAN" - }, - "jsonlite": { - "Package": "jsonlite", - "Version": "2.0.0", - "Source": "Repository", - "Title": "A Simple and Robust JSON Parser and Generator for R", - "License": "MIT + file LICENSE", - "Depends": [ - "methods" - ], - "Authors@R": "c( person(\"Jeroen\", \"Ooms\", role = c(\"aut\", \"cre\"), email = \"jeroenooms@gmail.com\", comment = c(ORCID = \"0000-0002-4035-0289\")), person(\"Duncan\", \"Temple Lang\", role = \"ctb\"), person(\"Lloyd\", \"Hilaiel\", role = \"cph\", comment=\"author of bundled libyajl\"))", - "URL": "https://jeroen.r-universe.dev/jsonlite https://arxiv.org/abs/1403.2805", - "BugReports": "https://github.com/jeroen/jsonlite/issues", - "Maintainer": "Jeroen Ooms ", - "VignetteBuilder": "knitr, R.rsp", - "Description": "A reasonably fast JSON parser and generator, optimized for statistical data and the web. Offers simple, flexible tools for working with JSON in R, and is particularly powerful for building pipelines and interacting with a web API. The implementation is based on the mapping described in the vignette (Ooms, 2014). In addition to converting JSON data from/to R objects, 'jsonlite' contains functions to stream, validate, and prettify JSON data. The unit tests included with the package verify that all edge cases are encoded and decoded consistently for use with dynamic data in systems and applications.", - "Suggests": [ - "httr", - "vctrs", - "testthat", - "knitr", - "rmarkdown", - "R.rsp", - "sf" - ], - "RoxygenNote": "7.3.2", - "Encoding": "UTF-8", - "NeedsCompilation": "yes", - "Author": "Jeroen Ooms [aut, cre] (), Duncan Temple Lang [ctb], Lloyd Hilaiel [cph] (author of bundled libyajl)", - "Repository": "CRAN" - }, - "lifecycle": { - "Package": "lifecycle", - "Version": "1.0.4", - "Source": "Repository", - "Title": "Manage the Life Cycle of your Package Functions", - "Authors@R": "c( person(\"Lionel\", \"Henry\", , \"lionel@posit.co\", role = c(\"aut\", \"cre\")), person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = \"aut\", comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "Manage the life cycle of your exported functions with shared conventions, documentation badges, and user-friendly deprecation warnings.", - "License": "MIT + file LICENSE", - "URL": "https://lifecycle.r-lib.org/, https://github.com/r-lib/lifecycle", - "BugReports": "https://github.com/r-lib/lifecycle/issues", - "Depends": [ - "R (>= 3.6)" - ], - "Imports": [ - "cli (>= 3.4.0)", - "glue", - "rlang (>= 1.1.0)" - ], - "Suggests": [ - "covr", - "crayon", - "knitr", - "lintr", - "rmarkdown", - "testthat (>= 3.0.1)", - "tibble", - "tidyverse", - "tools", - "vctrs", - "withr" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse/tidytemplate, usethis", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "RoxygenNote": "7.2.1", - "NeedsCompilation": "no", - "Author": "Lionel Henry [aut, cre], Hadley Wickham [aut] (), Posit Software, PBC [cph, fnd]", - "Maintainer": "Lionel Henry ", - "Repository": "CRAN" - }, - "lubridate": { - "Package": "lubridate", - "Version": "1.9.4", - "Source": "Repository", - "Type": "Package", - "Title": "Make Dealing with Dates a Little Easier", - "Authors@R": "c( person(\"Vitalie\", \"Spinu\", , \"spinuvit@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Garrett\", \"Grolemund\", role = \"aut\"), person(\"Hadley\", \"Wickham\", role = \"aut\"), person(\"Davis\", \"Vaughan\", role = \"ctb\"), person(\"Ian\", \"Lyttle\", role = \"ctb\"), person(\"Imanuel\", \"Costigan\", role = \"ctb\"), person(\"Jason\", \"Law\", role = \"ctb\"), person(\"Doug\", \"Mitarotonda\", role = \"ctb\"), person(\"Joseph\", \"Larmarange\", role = \"ctb\"), person(\"Jonathan\", \"Boiser\", role = \"ctb\"), person(\"Chel Hee\", \"Lee\", role = \"ctb\") )", - "Maintainer": "Vitalie Spinu ", - "Description": "Functions to work with date-times and time-spans: fast and user friendly parsing of date-time data, extraction and updating of components of a date-time (years, months, days, hours, minutes, and seconds), algebraic manipulation on date-time and time-span objects. The 'lubridate' package has a consistent and memorable syntax that makes working with dates easy and fun.", - "License": "GPL (>= 2)", - "URL": "https://lubridate.tidyverse.org, https://github.com/tidyverse/lubridate", - "BugReports": "https://github.com/tidyverse/lubridate/issues", - "Depends": [ - "methods", - "R (>= 3.2)" - ], - "Imports": [ - "generics", - "timechange (>= 0.3.0)" - ], - "Suggests": [ - "covr", - "knitr", - "rmarkdown", - "testthat (>= 2.1.0)", - "vctrs (>= 0.6.5)" - ], - "Enhances": [ - "chron", - "data.table", - "timeDate", - "tis", - "zoo" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "LazyData": "true", - "RoxygenNote": "7.2.3", - "SystemRequirements": "C++11, A system with zoneinfo data (e.g. /usr/share/zoneinfo). On Windows the zoneinfo included with R is used.", - "Collate": "'Dates.r' 'POSIXt.r' 'util.r' 'parse.r' 'timespans.r' 'intervals.r' 'difftimes.r' 'durations.r' 'periods.r' 'accessors-date.R' 'accessors-day.r' 'accessors-dst.r' 'accessors-hour.r' 'accessors-minute.r' 'accessors-month.r' 'accessors-quarter.r' 'accessors-second.r' 'accessors-tz.r' 'accessors-week.r' 'accessors-year.r' 'am-pm.r' 'time-zones.r' 'numeric.r' 'coercion.r' 'constants.r' 'cyclic_encoding.r' 'data.r' 'decimal-dates.r' 'deprecated.r' 'format_ISO8601.r' 'guess.r' 'hidden.r' 'instants.r' 'leap-years.r' 'ops-addition.r' 'ops-compare.r' 'ops-division.r' 'ops-integer-division.r' 'ops-m+.r' 'ops-modulo.r' 'ops-multiplication.r' 'ops-subtraction.r' 'package.r' 'pretty.r' 'round.r' 'stamp.r' 'tzdir.R' 'update.r' 'vctrs.R' 'zzz.R'", - "NeedsCompilation": "yes", - "Author": "Vitalie Spinu [aut, cre], Garrett Grolemund [aut], Hadley Wickham [aut], Davis Vaughan [ctb], Ian Lyttle [ctb], Imanuel Costigan [ctb], Jason Law [ctb], Doug Mitarotonda [ctb], Joseph Larmarange [ctb], Jonathan Boiser [ctb], Chel Hee Lee [ctb]", - "Repository": "CRAN" - }, - "magrittr": { - "Package": "magrittr", - "Version": "2.0.3", - "Source": "Repository", - "Type": "Package", - "Title": "A Forward-Pipe Operator for R", - "Authors@R": "c( person(\"Stefan Milton\", \"Bache\", , \"stefan@stefanbache.dk\", role = c(\"aut\", \"cph\"), comment = \"Original author and creator of magrittr\"), person(\"Hadley\", \"Wickham\", , \"hadley@rstudio.com\", role = \"aut\"), person(\"Lionel\", \"Henry\", , \"lionel@rstudio.com\", role = \"cre\"), person(\"RStudio\", role = c(\"cph\", \"fnd\")) )", - "Description": "Provides a mechanism for chaining commands with a new forward-pipe operator, %>%. This operator will forward a value, or the result of an expression, into the next function call/expression. There is flexible support for the type of right-hand side expressions. For more information, see package vignette. To quote Rene Magritte, \"Ceci n'est pas un pipe.\"", - "License": "MIT + file LICENSE", - "URL": "https://magrittr.tidyverse.org, https://github.com/tidyverse/magrittr", - "BugReports": "https://github.com/tidyverse/magrittr/issues", - "Depends": [ - "R (>= 3.4.0)" - ], - "Suggests": [ - "covr", - "knitr", - "rlang", - "rmarkdown", - "testthat" - ], - "VignetteBuilder": "knitr", - "ByteCompile": "Yes", - "Config/Needs/website": "tidyverse/tidytemplate", - "Encoding": "UTF-8", - "RoxygenNote": "7.1.2", - "NeedsCompilation": "yes", - "Author": "Stefan Milton Bache [aut, cph] (Original author and creator of magrittr), Hadley Wickham [aut], Lionel Henry [cre], RStudio [cph, fnd]", - "Maintainer": "Lionel Henry ", - "Repository": "CRAN" - }, - "mime": { - "Package": "mime", - "Version": "0.13", - "Source": "Repository", - "Type": "Package", - "Title": "Map Filenames to MIME Types", - "Authors@R": "c( person(\"Yihui\", \"Xie\", role = c(\"aut\", \"cre\"), email = \"xie@yihui.name\", comment = c(ORCID = \"0000-0003-0645-5666\", URL = \"https://yihui.org\")), person(\"Jeffrey\", \"Horner\", role = \"ctb\"), person(\"Beilei\", \"Bian\", role = \"ctb\") )", - "Description": "Guesses the MIME type from a filename extension using the data derived from /etc/mime.types in UNIX-type systems.", - "Imports": [ - "tools" - ], - "License": "GPL", - "URL": "https://github.com/yihui/mime", - "BugReports": "https://github.com/yihui/mime/issues", - "RoxygenNote": "7.3.2", - "Encoding": "UTF-8", - "NeedsCompilation": "yes", - "Author": "Yihui Xie [aut, cre] (, https://yihui.org), Jeffrey Horner [ctb], Beilei Bian [ctb]", - "Maintainer": "Yihui Xie ", - "Repository": "CRAN" - }, - "openssl": { - "Package": "openssl", - "Version": "2.3.3", - "Source": "Repository", - "Type": "Package", - "Title": "Toolkit for Encryption, Signatures and Certificates Based on OpenSSL", - "Authors@R": "c(person(\"Jeroen\", \"Ooms\", role = c(\"aut\", \"cre\"), email = \"jeroenooms@gmail.com\", comment = c(ORCID = \"0000-0002-4035-0289\")), person(\"Oliver\", \"Keyes\", role = \"ctb\"))", - "Description": "Bindings to OpenSSL libssl and libcrypto, plus custom SSH key parsers. Supports RSA, DSA and EC curves P-256, P-384, P-521, and curve25519. Cryptographic signatures can either be created and verified manually or via x509 certificates. AES can be used in cbc, ctr or gcm mode for symmetric encryption; RSA for asymmetric (public key) encryption or EC for Diffie Hellman. High-level envelope functions combine RSA and AES for encrypting arbitrary sized data. Other utilities include key generators, hash functions (md5, sha1, sha256, etc), base64 encoder, a secure random number generator, and 'bignum' math methods for manually performing crypto calculations on large multibyte integers.", - "License": "MIT + file LICENSE", - "URL": "https://jeroen.r-universe.dev/openssl", - "BugReports": "https://github.com/jeroen/openssl/issues", - "SystemRequirements": "OpenSSL >= 1.0.2", - "VignetteBuilder": "knitr", - "Imports": [ - "askpass" - ], - "Suggests": [ - "curl", - "testthat (>= 2.1.0)", - "digest", - "knitr", - "rmarkdown", - "jsonlite", - "jose", - "sodium" - ], - "RoxygenNote": "7.3.2", - "Encoding": "UTF-8", - "NeedsCompilation": "yes", - "Author": "Jeroen Ooms [aut, cre] (ORCID: ), Oliver Keyes [ctb]", - "Maintainer": "Jeroen Ooms ", - "Repository": "CRAN" - }, - "pillar": { - "Package": "pillar", - "Version": "1.11.0", - "Source": "Repository", - "Title": "Coloured Formatting for Columns", - "Authors@R": "c(person(given = \"Kirill\", family = \"M\\u00fcller\", role = c(\"aut\", \"cre\"), email = \"kirill@cynkra.com\", comment = c(ORCID = \"0000-0002-1416-3412\")), person(given = \"Hadley\", family = \"Wickham\", role = \"aut\"), person(given = \"RStudio\", role = \"cph\"))", - "Description": "Provides 'pillar' and 'colonnade' generics designed for formatting columns of data using the full range of colours provided by modern terminals.", - "License": "MIT + file LICENSE", - "URL": "https://pillar.r-lib.org/, https://github.com/r-lib/pillar", - "BugReports": "https://github.com/r-lib/pillar/issues", - "Imports": [ - "cli (>= 2.3.0)", - "glue", - "lifecycle", - "rlang (>= 1.0.2)", - "utf8 (>= 1.1.0)", - "utils", - "vctrs (>= 0.5.0)" - ], - "Suggests": [ - "bit64", - "DBI", - "debugme", - "DiagrammeR", - "dplyr", - "formattable", - "ggplot2", - "knitr", - "lubridate", - "nanotime", - "nycflights13", - "palmerpenguins", - "rmarkdown", - "scales", - "stringi", - "survival", - "testthat (>= 3.1.1)", - "tibble", - "units (>= 0.7.2)", - "vdiffr", - "withr" - ], - "VignetteBuilder": "knitr", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2.9000", - "Config/testthat/edition": "3", - "Config/testthat/parallel": "true", - "Config/testthat/start-first": "format_multi_fuzz, format_multi_fuzz_2, format_multi, ctl_colonnade, ctl_colonnade_1, ctl_colonnade_2", - "Config/autostyle/scope": "line_breaks", - "Config/autostyle/strict": "true", - "Config/gha/extra-packages": "units=?ignore-before-r=4.3.0", - "Config/Needs/website": "tidyverse/tidytemplate", - "NeedsCompilation": "no", - "Author": "Kirill Müller [aut, cre] (ORCID: ), Hadley Wickham [aut], RStudio [cph]", - "Maintainer": "Kirill Müller ", - "Repository": "CRAN" - }, - "pkgconfig": { - "Package": "pkgconfig", - "Version": "2.0.3", - "Source": "Repository", - "Title": "Private Configuration for 'R' Packages", - "Author": "Gábor Csárdi", - "Maintainer": "Gábor Csárdi ", - "Description": "Set configuration options on a per-package basis. Options set by a given package only apply to that package, other packages are unaffected.", - "License": "MIT + file LICENSE", - "LazyData": "true", - "Imports": [ - "utils" - ], - "Suggests": [ - "covr", - "testthat", - "disposables (>= 1.0.3)" - ], - "URL": "https://github.com/r-lib/pkgconfig#readme", - "BugReports": "https://github.com/r-lib/pkgconfig/issues", - "Encoding": "UTF-8", - "NeedsCompilation": "no", - "Repository": "CRAN" - }, - "prettyunits": { - "Package": "prettyunits", - "Version": "1.2.0", - "Source": "Repository", - "Title": "Pretty, Human Readable Formatting of Quantities", - "Authors@R": "c( person(\"Gabor\", \"Csardi\", email=\"csardi.gabor@gmail.com\", role=c(\"aut\", \"cre\")), person(\"Bill\", \"Denney\", email=\"wdenney@humanpredictions.com\", role=c(\"ctb\"), comment=c(ORCID=\"0000-0002-5759-428X\")), person(\"Christophe\", \"Regouby\", email=\"christophe.regouby@free.fr\", role=c(\"ctb\")) )", - "Description": "Pretty, human readable formatting of quantities. Time intervals: '1337000' -> '15d 11h 23m 20s'. Vague time intervals: '2674000' -> 'about a month ago'. Bytes: '1337' -> '1.34 kB'. Rounding: '99' with 3 significant digits -> '99.0' p-values: '0.00001' -> '<0.0001'. Colors: '#FF0000' -> 'red'. Quantities: '1239437' -> '1.24 M'.", - "License": "MIT + file LICENSE", - "URL": "https://github.com/r-lib/prettyunits", - "BugReports": "https://github.com/r-lib/prettyunits/issues", - "Depends": [ - "R(>= 2.10)" - ], - "Suggests": [ - "codetools", - "covr", - "testthat" - ], - "RoxygenNote": "7.2.3", - "Encoding": "UTF-8", - "NeedsCompilation": "no", - "Author": "Gabor Csardi [aut, cre], Bill Denney [ctb] (), Christophe Regouby [ctb]", - "Maintainer": "Gabor Csardi ", - "Repository": "CRAN" - }, - "progress": { - "Package": "progress", - "Version": "1.2.3", - "Source": "Repository", - "Title": "Terminal Progress Bars", - "Authors@R": "c( person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Rich\", \"FitzJohn\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "Configurable Progress bars, they may include percentage, elapsed time, and/or the estimated completion time. They work in terminals, in 'Emacs' 'ESS', 'RStudio', 'Windows' 'Rgui' and the 'macOS' 'R.app'. The package also provides a 'C++' 'API', that works with or without 'Rcpp'.", - "License": "MIT + file LICENSE", - "URL": "https://github.com/r-lib/progress#readme, http://r-lib.github.io/progress/", - "BugReports": "https://github.com/r-lib/progress/issues", - "Depends": [ - "R (>= 3.6)" - ], - "Imports": [ - "crayon", - "hms", - "prettyunits", - "R6" - ], - "Suggests": [ - "Rcpp", - "testthat (>= 3.0.0)", - "withr" - ], - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "RoxygenNote": "7.2.3", - "NeedsCompilation": "no", - "Author": "Gábor Csárdi [aut, cre], Rich FitzJohn [aut], Posit Software, PBC [cph, fnd]", - "Maintainer": "Gábor Csárdi ", - "Repository": "CRAN" - }, - "purrr": { - "Package": "purrr", - "Version": "1.1.0", - "Source": "Repository", - "Title": "Functional Programming Tools", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Lionel\", \"Henry\", , \"lionel@posit.co\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\"), comment = c(ROR = \"https://ror.org/03wc8by49\")) )", - "Description": "A complete and consistent functional programming toolkit for R.", - "License": "MIT + file LICENSE", - "URL": "https://purrr.tidyverse.org/, https://github.com/tidyverse/purrr", - "BugReports": "https://github.com/tidyverse/purrr/issues", - "Depends": [ - "R (>= 4.1)" - ], - "Imports": [ - "cli (>= 3.6.1)", - "lifecycle (>= 1.0.3)", - "magrittr (>= 1.5.0)", - "rlang (>= 1.1.1)", - "vctrs (>= 0.6.3)" - ], - "Suggests": [ - "carrier (>= 0.2.0)", - "covr", - "dplyr (>= 0.7.8)", - "httr", - "knitr", - "lubridate", - "mirai (>= 2.4.0)", - "rmarkdown", - "testthat (>= 3.0.0)", - "tibble", - "tidyselect" - ], - "LinkingTo": [ - "cli" - ], - "VignetteBuilder": "knitr", - "Biarch": "true", - "Config/build/compilation-database": "true", - "Config/Needs/website": "tidyverse/tidytemplate, tidyr", - "Config/testthat/edition": "3", - "Config/testthat/parallel": "TRUE", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "NeedsCompilation": "yes", - "Author": "Hadley Wickham [aut, cre] (ORCID: ), Lionel Henry [aut], Posit Software, PBC [cph, fnd] (ROR: )", - "Maintainer": "Hadley Wickham ", - "Repository": "CRAN" - }, - "rappdirs": { - "Package": "rappdirs", - "Version": "0.3.3", - "Source": "Repository", - "Type": "Package", - "Title": "Application Directories: Determine Where to Save Data, Caches, and Logs", - "Authors@R": "c(person(given = \"Hadley\", family = \"Wickham\", role = c(\"trl\", \"cre\", \"cph\"), email = \"hadley@rstudio.com\"), person(given = \"RStudio\", role = \"cph\"), person(given = \"Sridhar\", family = \"Ratnakumar\", role = \"aut\"), person(given = \"Trent\", family = \"Mick\", role = \"aut\"), person(given = \"ActiveState\", role = \"cph\", comment = \"R/appdir.r, R/cache.r, R/data.r, R/log.r translated from appdirs\"), person(given = \"Eddy\", family = \"Petrisor\", role = \"ctb\"), person(given = \"Trevor\", family = \"Davis\", role = c(\"trl\", \"aut\")), person(given = \"Gabor\", family = \"Csardi\", role = \"ctb\"), person(given = \"Gregory\", family = \"Jefferis\", role = \"ctb\"))", - "Description": "An easy way to determine which directories on the users computer you should use to save data, caches and logs. A port of Python's 'Appdirs' () to R.", - "License": "MIT + file LICENSE", - "URL": "https://rappdirs.r-lib.org, https://github.com/r-lib/rappdirs", - "BugReports": "https://github.com/r-lib/rappdirs/issues", - "Depends": [ - "R (>= 3.2)" - ], - "Suggests": [ - "roxygen2", - "testthat (>= 3.0.0)", - "covr", - "withr" - ], - "Copyright": "Original python appdirs module copyright (c) 2010 ActiveState Software Inc. R port copyright Hadley Wickham, RStudio. See file LICENSE for details.", - "Encoding": "UTF-8", - "RoxygenNote": "7.1.1", - "Config/testthat/edition": "3", - "NeedsCompilation": "yes", - "Author": "Hadley Wickham [trl, cre, cph], RStudio [cph], Sridhar Ratnakumar [aut], Trent Mick [aut], ActiveState [cph] (R/appdir.r, R/cache.r, R/data.r, R/log.r translated from appdirs), Eddy Petrisor [ctb], Trevor Davis [trl, aut], Gabor Csardi [ctb], Gregory Jefferis [ctb]", - "Maintainer": "Hadley Wickham ", - "Repository": "CRAN" - }, - "readr": { - "Package": "readr", - "Version": "2.1.6", - "Source": "Repository", - "Title": "Read Rectangular Text Data", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = \"aut\"), person(\"Jim\", \"Hester\", role = \"aut\"), person(\"Romain\", \"Francois\", role = \"ctb\"), person(\"Jennifer\", \"Bryan\", , \"jenny@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0002-6983-2759\")), person(\"Shelby\", \"Bearrows\", role = \"ctb\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(\"https://github.com/mandreyel/\", role = \"cph\", comment = \"mio library\"), person(\"Jukka\", \"Jylänki\", role = c(\"ctb\", \"cph\"), comment = \"grisu3 implementation\"), person(\"Mikkel\", \"Jørgensen\", role = c(\"ctb\", \"cph\"), comment = \"grisu3 implementation\") )", - "Description": "The goal of 'readr' is to provide a fast and friendly way to read rectangular data (like 'csv', 'tsv', and 'fwf'). It is designed to flexibly parse many types of data found in the wild, while still cleanly failing when data unexpectedly changes.", - "License": "MIT + file LICENSE", - "URL": "https://readr.tidyverse.org, https://github.com/tidyverse/readr", - "BugReports": "https://github.com/tidyverse/readr/issues", - "Depends": [ - "R (>= 3.6)" - ], - "Imports": [ - "cli (>= 3.2.0)", - "clipr", - "crayon", - "hms (>= 0.4.1)", - "lifecycle (>= 0.2.0)", - "methods", - "R6", - "rlang", - "tibble", - "utils", - "vroom (>= 1.6.0)" - ], - "Suggests": [ - "covr", - "curl", - "datasets", - "knitr", - "rmarkdown", - "spelling", - "stringi", - "testthat (>= 3.2.0)", - "tzdb (>= 0.1.1)", - "waldo", - "withr", - "xml2" - ], - "LinkingTo": [ - "cpp11", - "tzdb (>= 0.1.1)" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse, tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Config/testthat/parallel": "false", - "Encoding": "UTF-8", - "Language": "en-US", - "RoxygenNote": "7.3.3", - "NeedsCompilation": "yes", - "Author": "Hadley Wickham [aut], Jim Hester [aut], Romain Francois [ctb], Jennifer Bryan [aut, cre] (ORCID: ), Shelby Bearrows [ctb], Posit Software, PBC [cph, fnd], https://github.com/mandreyel/ [cph] (mio library), Jukka Jylänki [ctb, cph] (grisu3 implementation), Mikkel Jørgensen [ctb, cph] (grisu3 implementation)", - "Maintainer": "Jennifer Bryan ", - "Repository": "CRAN" - }, - "renv": { - "Package": "renv", - "Version": "1.1.4", - "Source": "Repository", - "Type": "Package", - "Title": "Project Environments", - "Authors@R": "c( person(\"Kevin\", \"Ushey\", role = c(\"aut\", \"cre\"), email = \"kevin@rstudio.com\", comment = c(ORCID = \"0000-0003-2880-7407\")), person(\"Hadley\", \"Wickham\", role = c(\"aut\"), email = \"hadley@rstudio.com\", comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "A dependency management toolkit for R. Using 'renv', you can create and manage project-local R libraries, save the state of these libraries to a 'lockfile', and later restore your library as required. Together, these tools can help make your projects more isolated, portable, and reproducible.", - "License": "MIT + file LICENSE", - "URL": "https://rstudio.github.io/renv/, https://github.com/rstudio/renv", - "BugReports": "https://github.com/rstudio/renv/issues", - "Imports": [ - "utils" - ], - "Suggests": [ - "BiocManager", - "cli", - "compiler", - "covr", - "cpp11", - "devtools", - "gitcreds", - "jsonlite", - "jsonvalidate", - "knitr", - "miniUI", - "modules", - "packrat", - "pak", - "R6", - "remotes", - "reticulate", - "rmarkdown", - "rstudioapi", - "shiny", - "testthat", - "uuid", - "waldo", - "yaml", - "webfakes" - ], - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Config/testthat/parallel": "true", - "Config/testthat/start-first": "bioconductor,python,install,restore,snapshot,retrieve,remotes", - "NeedsCompilation": "no", - "Author": "Kevin Ushey [aut, cre] (), Hadley Wickham [aut] (), Posit Software, PBC [cph, fnd]", - "Maintainer": "Kevin Ushey ", - "Repository": "CRAN" - }, - "rlang": { - "Package": "rlang", - "Version": "1.1.6", - "Source": "Repository", - "Title": "Functions for Base Types and Core R and 'Tidyverse' Features", - "Description": "A toolbox for working with base types, core R features like the condition system, and core 'Tidyverse' features like tidy evaluation.", - "Authors@R": "c( person(\"Lionel\", \"Henry\", ,\"lionel@posit.co\", c(\"aut\", \"cre\")), person(\"Hadley\", \"Wickham\", ,\"hadley@posit.co\", \"aut\"), person(given = \"mikefc\", email = \"mikefc@coolbutuseless.com\", role = \"cph\", comment = \"Hash implementation based on Mike's xxhashlite\"), person(given = \"Yann\", family = \"Collet\", role = \"cph\", comment = \"Author of the embedded xxHash library\"), person(given = \"Posit, PBC\", role = c(\"cph\", \"fnd\")) )", - "License": "MIT + file LICENSE", - "ByteCompile": "true", - "Biarch": "true", - "Depends": [ - "R (>= 3.5.0)" - ], - "Imports": [ - "utils" - ], - "Suggests": [ - "cli (>= 3.1.0)", - "covr", - "crayon", - "desc", - "fs", - "glue", - "knitr", - "magrittr", - "methods", - "pillar", - "pkgload", - "rmarkdown", - "stats", - "testthat (>= 3.2.0)", - "tibble", - "usethis", - "vctrs (>= 0.2.3)", - "withr" - ], - "Enhances": [ - "winch" - ], - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "URL": "https://rlang.r-lib.org, https://github.com/r-lib/rlang", - "BugReports": "https://github.com/r-lib/rlang/issues", - "Config/build/compilation-database": "true", - "Config/testthat/edition": "3", - "Config/Needs/website": "dplyr, tidyverse/tidytemplate", - "NeedsCompilation": "yes", - "Author": "Lionel Henry [aut, cre], Hadley Wickham [aut], mikefc [cph] (Hash implementation based on Mike's xxhashlite), Yann Collet [cph] (Author of the embedded xxHash library), Posit, PBC [cph, fnd]", - "Maintainer": "Lionel Henry ", - "Repository": "CRAN" - }, - "rvest": { - "Package": "rvest", - "Version": "1.0.4", - "Source": "Repository", - "Title": "Easily Harvest (Scrape) Web Pages", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "Wrappers around the 'xml2' and 'httr' packages to make it easy to download, then manipulate, HTML and XML.", - "License": "MIT + file LICENSE", - "URL": "https://rvest.tidyverse.org/, https://github.com/tidyverse/rvest", - "BugReports": "https://github.com/tidyverse/rvest/issues", - "Depends": [ - "R (>= 3.6)" - ], - "Imports": [ - "cli", - "glue", - "httr (>= 0.5)", - "lifecycle (>= 1.0.3)", - "magrittr", - "rlang (>= 1.1.0)", - "selectr", - "tibble", - "xml2 (>= 1.3)" - ], - "Suggests": [ - "chromote", - "covr", - "knitr", - "R6", - "readr", - "repurrrsive", - "rmarkdown", - "spelling", - "stringi (>= 0.3.1)", - "testthat (>= 3.0.2)", - "webfakes" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Config/testthat/parallel": "true", - "Encoding": "UTF-8", - "Language": "en-US", - "RoxygenNote": "7.3.1", - "NeedsCompilation": "no", - "Author": "Hadley Wickham [aut, cre], Posit Software, PBC [cph, fnd]", - "Maintainer": "Hadley Wickham ", - "Repository": "CRAN" - }, - "selectr": { - "Package": "selectr", - "Version": "0.4-2", - "Source": "Repository", - "Type": "Package", - "Title": "Translate CSS Selectors to XPath Expressions", - "Date": "2019-11-20", - "Authors@R": "c(person(\"Simon\", \"Potter\", role = c(\"aut\", \"trl\", \"cre\"), email = \"simon@sjp.co.nz\"), person(\"Simon\", \"Sapin\", role = \"aut\"), person(\"Ian\", \"Bicking\", role = \"aut\"))", - "License": "BSD_3_clause + file LICENCE", - "Depends": [ - "R (>= 3.0)" - ], - "Imports": [ - "methods", - "stringr", - "R6" - ], - "Suggests": [ - "testthat", - "XML", - "xml2" - ], - "URL": "https://sjp.co.nz/projects/selectr", - "BugReports": "https://github.com/sjp/selectr/issues", - "Description": "Translates a CSS3 selector into an equivalent XPath expression. This allows us to use CSS selectors when working with the XML package as it can only evaluate XPath expressions. Also provided are convenience functions useful for using CSS selectors on XML nodes. This package is a port of the Python package 'cssselect' ().", - "NeedsCompilation": "no", - "Author": "Simon Potter [aut, trl, cre], Simon Sapin [aut], Ian Bicking [aut]", - "Maintainer": "Simon Potter ", - "Repository": "CRAN" - }, - "snakecase": { - "Package": "snakecase", - "Version": "0.11.1", - "Source": "Repository", - "Date": "2023-08-27", - "Title": "Convert Strings into any Case", - "Description": "A consistent, flexible and easy to use tool to parse and convert strings into cases like snake or camel among others.", - "Authors@R": "c( person(\"Malte\", \"Grosser\", , \"malte.grosser@gmail.com\", role = c(\"aut\", \"cre\")))", - "Maintainer": "Malte Grosser ", - "Depends": [ - "R (>= 3.2)" - ], - "Imports": [ - "stringr", - "stringi" - ], - "Suggests": [ - "testthat", - "covr", - "tibble", - "purrrlyr", - "knitr", - "rmarkdown", - "magrittr" - ], - "URL": "https://github.com/Tazinho/snakecase", - "BugReports": "https://github.com/Tazinho/snakecase/issues", - "Encoding": "UTF-8", - "License": "GPL-3", - "RoxygenNote": "6.1.1", - "VignetteBuilder": "knitr", - "NeedsCompilation": "no", - "Author": "Malte Grosser [aut, cre]", - "Repository": "CRAN" - }, - "stringi": { - "Package": "stringi", - "Version": "1.8.7", - "Source": "Repository", - "Date": "2025-03-27", - "Title": "Fast and Portable Character String Processing Facilities", - "Description": "A collection of character string/text/natural language processing tools for pattern searching (e.g., with 'Java'-like regular expressions or the 'Unicode' collation algorithm), random string generation, case mapping, string transliteration, concatenation, sorting, padding, wrapping, Unicode normalisation, date-time formatting and parsing, and many more. They are fast, consistent, convenient, and - thanks to 'ICU' (International Components for Unicode) - portable across all locales and platforms. Documentation about 'stringi' is provided via its website at and the paper by Gagolewski (2022, ).", - "URL": "https://stringi.gagolewski.com/, https://github.com/gagolews/stringi, https://icu.unicode.org/", - "BugReports": "https://github.com/gagolews/stringi/issues", - "SystemRequirements": "ICU4C (>= 61, optional)", - "Type": "Package", - "Depends": [ - "R (>= 3.4)" - ], - "Imports": [ - "tools", - "utils", - "stats" - ], - "Biarch": "TRUE", - "License": "file LICENSE", - "Authors@R": "c(person(given = \"Marek\", family = \"Gagolewski\", role = c(\"aut\", \"cre\", \"cph\"), email = \"marek@gagolewski.com\", comment = c(ORCID = \"0000-0003-0637-6028\")), person(given = \"Bartek\", family = \"Tartanus\", role = \"ctb\"), person(\"Unicode, Inc. and others\", role=\"ctb\", comment = \"ICU4C source code, Unicode Character Database\") )", - "RoxygenNote": "7.3.2", - "Encoding": "UTF-8", - "NeedsCompilation": "yes", - "Author": "Marek Gagolewski [aut, cre, cph] (), Bartek Tartanus [ctb], Unicode, Inc. and others [ctb] (ICU4C source code, Unicode Character Database)", - "Maintainer": "Marek Gagolewski ", - "License_is_FOSS": "yes", - "Repository": "CRAN" - }, - "stringr": { - "Package": "stringr", - "Version": "1.5.1", - "Source": "Repository", - "Title": "Simple, Consistent Wrappers for Common String Operations", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\", \"cph\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "A consistent, simple and easy to use set of wrappers around the fantastic 'stringi' package. All function and argument names (and positions) are consistent, all functions deal with \"NA\"'s and zero length vectors in the same way, and the output from one function is easy to feed into the input of another.", - "License": "MIT + file LICENSE", - "URL": "https://stringr.tidyverse.org, https://github.com/tidyverse/stringr", - "BugReports": "https://github.com/tidyverse/stringr/issues", - "Depends": [ - "R (>= 3.6)" - ], - "Imports": [ - "cli", - "glue (>= 1.6.1)", - "lifecycle (>= 1.0.3)", - "magrittr", - "rlang (>= 1.0.0)", - "stringi (>= 1.5.3)", - "vctrs (>= 0.4.0)" - ], - "Suggests": [ - "covr", - "dplyr", - "gt", - "htmltools", - "htmlwidgets", - "knitr", - "rmarkdown", - "testthat (>= 3.0.0)", - "tibble" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "LazyData": "true", - "RoxygenNote": "7.2.3", - "NeedsCompilation": "no", - "Author": "Hadley Wickham [aut, cre, cph], Posit Software, PBC [cph, fnd]", - "Maintainer": "Hadley Wickham ", - "Repository": "CRAN" - }, - "sys": { - "Package": "sys", - "Version": "3.4.3", - "Source": "Repository", - "Type": "Package", - "Title": "Powerful and Reliable Tools for Running System Commands in R", - "Authors@R": "c(person(\"Jeroen\", \"Ooms\", role = c(\"aut\", \"cre\"), email = \"jeroenooms@gmail.com\", comment = c(ORCID = \"0000-0002-4035-0289\")), person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = \"ctb\"))", - "Description": "Drop-in replacements for the base system2() function with fine control and consistent behavior across platforms. Supports clean interruption, timeout, background tasks, and streaming STDIN / STDOUT / STDERR over binary or text connections. Arguments on Windows automatically get encoded and quoted to work on different locales.", - "License": "MIT + file LICENSE", - "URL": "https://jeroen.r-universe.dev/sys", - "BugReports": "https://github.com/jeroen/sys/issues", - "Encoding": "UTF-8", - "RoxygenNote": "7.1.1", - "Suggests": [ - "unix (>= 1.4)", - "spelling", - "testthat" - ], - "Language": "en-US", - "NeedsCompilation": "yes", - "Author": "Jeroen Ooms [aut, cre] (), Gábor Csárdi [ctb]", - "Maintainer": "Jeroen Ooms ", - "Repository": "CRAN" - }, - "tibble": { - "Package": "tibble", - "Version": "3.3.0", - "Source": "Repository", - "Title": "Simple Data Frames", - "Authors@R": "c(person(given = \"Kirill\", family = \"M\\u00fcller\", role = c(\"aut\", \"cre\"), email = \"kirill@cynkra.com\", comment = c(ORCID = \"0000-0002-1416-3412\")), person(given = \"Hadley\", family = \"Wickham\", role = \"aut\", email = \"hadley@rstudio.com\"), person(given = \"Romain\", family = \"Francois\", role = \"ctb\", email = \"romain@r-enthusiasts.com\"), person(given = \"Jennifer\", family = \"Bryan\", role = \"ctb\", email = \"jenny@rstudio.com\"), person(given = \"RStudio\", role = c(\"cph\", \"fnd\")))", - "Description": "Provides a 'tbl_df' class (the 'tibble') with stricter checking and better formatting than the traditional data frame.", - "License": "MIT + file LICENSE", - "URL": "https://tibble.tidyverse.org/, https://github.com/tidyverse/tibble", - "BugReports": "https://github.com/tidyverse/tibble/issues", - "Depends": [ - "R (>= 3.4.0)" - ], - "Imports": [ - "cli", - "lifecycle (>= 1.0.0)", - "magrittr", - "methods", - "pillar (>= 1.8.1)", - "pkgconfig", - "rlang (>= 1.0.2)", - "utils", - "vctrs (>= 0.5.0)" - ], - "Suggests": [ - "bench", - "bit64", - "blob", - "brio", - "callr", - "DiagrammeR", - "dplyr", - "evaluate", - "formattable", - "ggplot2", - "here", - "hms", - "htmltools", - "knitr", - "lubridate", - "nycflights13", - "pkgload", - "purrr", - "rmarkdown", - "stringi", - "testthat (>= 3.0.2)", - "tidyr", - "withr" - ], - "VignetteBuilder": "knitr", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2.9000", - "Config/testthat/edition": "3", - "Config/testthat/parallel": "true", - "Config/testthat/start-first": "vignette-formats, as_tibble, add, invariants", - "Config/autostyle/scope": "line_breaks", - "Config/autostyle/strict": "true", - "Config/autostyle/rmd": "false", - "Config/Needs/website": "tidyverse/tidytemplate", - "NeedsCompilation": "yes", - "Author": "Kirill Müller [aut, cre] (ORCID: ), Hadley Wickham [aut], Romain Francois [ctb], Jennifer Bryan [ctb], RStudio [cph, fnd]", - "Maintainer": "Kirill Müller ", - "Repository": "CRAN" - }, - "tidyr": { - "Package": "tidyr", - "Version": "1.3.1", - "Source": "Repository", - "Title": "Tidy Messy Data", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\")), person(\"Davis\", \"Vaughan\", , \"davis@posit.co\", role = \"aut\"), person(\"Maximilian\", \"Girlich\", role = \"aut\"), person(\"Kevin\", \"Ushey\", , \"kevin@posit.co\", role = \"ctb\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "Tools to help to create tidy data, where each column is a variable, each row is an observation, and each cell contains a single value. 'tidyr' contains tools for changing the shape (pivoting) and hierarchy (nesting and 'unnesting') of a dataset, turning deeply nested lists into rectangular data frames ('rectangling'), and extracting values out of string columns. It also includes tools for working with missing values (both implicit and explicit).", - "License": "MIT + file LICENSE", - "URL": "https://tidyr.tidyverse.org, https://github.com/tidyverse/tidyr", - "BugReports": "https://github.com/tidyverse/tidyr/issues", - "Depends": [ - "R (>= 3.6)" - ], - "Imports": [ - "cli (>= 3.4.1)", - "dplyr (>= 1.0.10)", - "glue", - "lifecycle (>= 1.0.3)", - "magrittr", - "purrr (>= 1.0.1)", - "rlang (>= 1.1.1)", - "stringr (>= 1.5.0)", - "tibble (>= 2.1.1)", - "tidyselect (>= 1.2.0)", - "utils", - "vctrs (>= 0.5.2)" - ], - "Suggests": [ - "covr", - "data.table", - "knitr", - "readr", - "repurrrsive (>= 1.1.0)", - "rmarkdown", - "testthat (>= 3.0.0)" - ], - "LinkingTo": [ - "cpp11 (>= 0.4.0)" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "LazyData": "true", - "RoxygenNote": "7.3.0", - "NeedsCompilation": "yes", - "Author": "Hadley Wickham [aut, cre], Davis Vaughan [aut], Maximilian Girlich [aut], Kevin Ushey [ctb], Posit Software, PBC [cph, fnd]", - "Maintainer": "Hadley Wickham ", - "Repository": "CRAN" - }, - "tidyselect": { - "Package": "tidyselect", - "Version": "1.2.1", - "Source": "Repository", - "Title": "Select from a Set of Strings", - "Authors@R": "c( person(\"Lionel\", \"Henry\", , \"lionel@posit.co\", role = c(\"aut\", \"cre\")), person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "A backend for the selecting functions of the 'tidyverse'. It makes it easy to implement select-like functions in your own packages in a way that is consistent with other 'tidyverse' interfaces for selection.", - "License": "MIT + file LICENSE", - "URL": "https://tidyselect.r-lib.org, https://github.com/r-lib/tidyselect", - "BugReports": "https://github.com/r-lib/tidyselect/issues", - "Depends": [ - "R (>= 3.4)" - ], - "Imports": [ - "cli (>= 3.3.0)", - "glue (>= 1.3.0)", - "lifecycle (>= 1.0.3)", - "rlang (>= 1.0.4)", - "vctrs (>= 0.5.2)", - "withr" - ], - "Suggests": [ - "covr", - "crayon", - "dplyr", - "knitr", - "magrittr", - "rmarkdown", - "stringr", - "testthat (>= 3.1.1)", - "tibble (>= 2.1.3)" - ], - "VignetteBuilder": "knitr", - "ByteCompile": "true", - "Config/testthat/edition": "3", - "Config/Needs/website": "tidyverse/tidytemplate", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.0.9000", - "NeedsCompilation": "yes", - "Author": "Lionel Henry [aut, cre], Hadley Wickham [aut], Posit Software, PBC [cph, fnd]", - "Maintainer": "Lionel Henry ", - "Repository": "CRAN" - }, - "timechange": { - "Package": "timechange", - "Version": "0.3.0", - "Source": "Repository", - "Title": "Efficient Manipulation of Date-Times", - "Authors@R": "c(person(\"Vitalie\", \"Spinu\", email = \"spinuvit@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Google Inc.\", role = c(\"ctb\", \"cph\")))", - "Description": "Efficient routines for manipulation of date-time objects while accounting for time-zones and daylight saving times. The package includes utilities for updating of date-time components (year, month, day etc.), modification of time-zones, rounding of date-times, period addition and subtraction etc. Parts of the 'CCTZ' source code, released under the Apache 2.0 License, are included in this package. See for more details.", - "Depends": [ - "R (>= 3.3)" - ], - "License": "GPL (>= 3)", - "Encoding": "UTF-8", - "LinkingTo": [ - "cpp11 (>= 0.2.7)" - ], - "Suggests": [ - "testthat (>= 0.7.1.99)", - "knitr" - ], - "SystemRequirements": "A system with zoneinfo data (e.g. /usr/share/zoneinfo) as well as a recent-enough C++11 compiler (such as g++-4.8 or later). On Windows the zoneinfo included with R is used.", - "BugReports": "https://github.com/vspinu/timechange/issues", - "URL": "https://github.com/vspinu/timechange/", - "RoxygenNote": "7.2.1", - "NeedsCompilation": "yes", - "Author": "Vitalie Spinu [aut, cre], Google Inc. [ctb, cph]", - "Maintainer": "Vitalie Spinu ", - "Repository": "CRAN" - }, - "tzdb": { - "Package": "tzdb", - "Version": "0.5.0", - "Source": "Repository", - "Title": "Time Zone Database Information", - "Authors@R": "c( person(\"Davis\", \"Vaughan\", , \"davis@posit.co\", role = c(\"aut\", \"cre\")), person(\"Howard\", \"Hinnant\", role = \"cph\", comment = \"Author of the included date library\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "Provides an up-to-date copy of the Internet Assigned Numbers Authority (IANA) Time Zone Database. It is updated periodically to reflect changes made by political bodies to time zone boundaries, UTC offsets, and daylight saving time rules. Additionally, this package provides a C++ interface for working with the 'date' library. 'date' provides comprehensive support for working with dates and date-times, which this package exposes to make it easier for other R packages to utilize. Headers are provided for calendar specific calculations, along with a limited interface for time zone manipulations.", - "License": "MIT + file LICENSE", - "URL": "https://tzdb.r-lib.org, https://github.com/r-lib/tzdb", - "BugReports": "https://github.com/r-lib/tzdb/issues", - "Depends": [ - "R (>= 4.0.0)" - ], - "Suggests": [ - "covr", - "testthat (>= 3.0.0)" - ], - "LinkingTo": [ - "cpp11 (>= 0.5.2)" - ], - "Biarch": "yes", - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "NeedsCompilation": "yes", - "Author": "Davis Vaughan [aut, cre], Howard Hinnant [cph] (Author of the included date library), Posit Software, PBC [cph, fnd]", - "Maintainer": "Davis Vaughan ", - "Repository": "CRAN" - }, - "utf8": { - "Package": "utf8", - "Version": "1.2.6", - "Source": "Repository", - "Title": "Unicode Text Processing", - "Authors@R": "c(person(given = c(\"Patrick\", \"O.\"), family = \"Perry\", role = c(\"aut\", \"cph\")), person(given = \"Kirill\", family = \"M\\u00fcller\", role = \"cre\", email = \"kirill@cynkra.com\", comment = c(ORCID = \"0000-0002-1416-3412\")), person(given = \"Unicode, Inc.\", role = c(\"cph\", \"dtc\"), comment = \"Unicode Character Database\"))", - "Description": "Process and print 'UTF-8' encoded international text (Unicode). Input, validate, normalize, encode, format, and display.", - "License": "Apache License (== 2.0) | file LICENSE", - "URL": "https://krlmlr.github.io/utf8/, https://github.com/krlmlr/utf8", - "BugReports": "https://github.com/krlmlr/utf8/issues", - "Depends": [ - "R (>= 2.10)" - ], - "Suggests": [ - "cli", - "covr", - "knitr", - "rlang", - "rmarkdown", - "testthat (>= 3.0.0)", - "withr" - ], - "VignetteBuilder": "knitr, rmarkdown", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2.9000", - "NeedsCompilation": "yes", - "Author": "Patrick O. Perry [aut, cph], Kirill Müller [cre] (ORCID: ), Unicode, Inc. [cph, dtc] (Unicode Character Database)", - "Maintainer": "Kirill Müller ", - "Repository": "CRAN" - }, - "vctrs": { - "Package": "vctrs", - "Version": "0.6.5", - "Source": "Repository", - "Title": "Vector Helpers", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = \"aut\"), person(\"Lionel\", \"Henry\", , \"lionel@posit.co\", role = \"aut\"), person(\"Davis\", \"Vaughan\", , \"davis@posit.co\", role = c(\"aut\", \"cre\")), person(\"data.table team\", role = \"cph\", comment = \"Radix sort based on data.table's forder() and their contribution to R's order()\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "Defines new notions of prototype and size that are used to provide tools for consistent and well-founded type-coercion and size-recycling, and are in turn connected to ideas of type- and size-stability useful for analysing function interfaces.", - "License": "MIT + file LICENSE", - "URL": "https://vctrs.r-lib.org/, https://github.com/r-lib/vctrs", - "BugReports": "https://github.com/r-lib/vctrs/issues", - "Depends": [ - "R (>= 3.5.0)" - ], - "Imports": [ - "cli (>= 3.4.0)", - "glue", - "lifecycle (>= 1.0.3)", - "rlang (>= 1.1.0)" - ], - "Suggests": [ - "bit64", - "covr", - "crayon", - "dplyr (>= 0.8.5)", - "generics", - "knitr", - "pillar (>= 1.4.4)", - "pkgdown (>= 2.0.1)", - "rmarkdown", - "testthat (>= 3.0.0)", - "tibble (>= 3.1.3)", - "waldo (>= 0.2.0)", - "withr", - "xml2", - "zeallot" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "Language": "en-GB", - "RoxygenNote": "7.2.3", - "NeedsCompilation": "yes", - "Author": "Hadley Wickham [aut], Lionel Henry [aut], Davis Vaughan [aut, cre], data.table team [cph] (Radix sort based on data.table's forder() and their contribution to R's order()), Posit Software, PBC [cph, fnd]", - "Maintainer": "Davis Vaughan ", - "Repository": "CRAN" - }, - "vroom": { - "Package": "vroom", - "Version": "1.6.7", - "Source": "Repository", - "Title": "Read and Write Rectangular Text Data Quickly", - "Authors@R": "c( person(\"Jim\", \"Hester\", role = \"aut\", comment = c(ORCID = \"0000-0002-2739-7082\")), person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = \"aut\", comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Jennifer\", \"Bryan\", , \"jenny@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0002-6983-2759\")), person(\"Shelby\", \"Bearrows\", role = \"ctb\"), person(\"https://github.com/mandreyel/\", role = \"cph\", comment = \"mio library\"), person(\"Jukka\", \"Jylänki\", role = \"cph\", comment = \"grisu3 implementation\"), person(\"Mikkel\", \"Jørgensen\", role = \"cph\", comment = \"grisu3 implementation\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\"), comment = c(ROR = \"03wc8by49\")) )", - "Description": "The goal of 'vroom' is to read and write data (like 'csv', 'tsv' and 'fwf') quickly. When reading it uses a quick initial indexing step, then reads the values lazily , so only the data you actually use needs to be read. The writer formats the data in parallel and writes to disk asynchronously from formatting.", - "License": "MIT + file LICENSE", - "URL": "https://vroom.r-lib.org, https://github.com/tidyverse/vroom", - "BugReports": "https://github.com/tidyverse/vroom/issues", - "Depends": [ - "R (>= 4.1)" - ], - "Imports": [ - "bit64", - "cli (>= 3.2.0)", - "crayon", - "glue", - "hms", - "lifecycle (>= 1.0.3)", - "methods", - "rlang (>= 0.4.2)", - "stats", - "tibble (>= 2.0.0)", - "tidyselect", - "tzdb (>= 0.1.1)", - "vctrs (>= 0.2.0)", - "withr" - ], - "Suggests": [ - "archive", - "bench (>= 1.1.0)", - "covr", - "curl", - "dplyr", - "forcats", - "fs", - "ggplot2", - "knitr", - "patchwork", - "prettyunits", - "purrr", - "rmarkdown", - "rstudioapi", - "scales", - "spelling", - "testthat (>= 2.1.0)", - "tidyr", - "utils", - "waldo", - "xml2" - ], - "LinkingTo": [ - "cpp11 (>= 0.2.0)", - "progress (>= 1.2.3)", - "tzdb (>= 0.1.1)" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "nycflights13, tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Config/testthat/parallel": "false", - "Config/usethis/last-upkeep": "2025-11-25", - "Copyright": "file COPYRIGHTS", - "Encoding": "UTF-8", - "Language": "en-US", - "RoxygenNote": "7.3.3", - "NeedsCompilation": "yes", - "Author": "Jim Hester [aut] (ORCID: ), Hadley Wickham [aut] (ORCID: ), Jennifer Bryan [aut, cre] (ORCID: ), Shelby Bearrows [ctb], https://github.com/mandreyel/ [cph] (mio library), Jukka Jylänki [cph] (grisu3 implementation), Mikkel Jørgensen [cph] (grisu3 implementation), Posit Software, PBC [cph, fnd] (ROR: )", - "Maintainer": "Jennifer Bryan ", - "Repository": "CRAN" - }, - "withr": { - "Package": "withr", - "Version": "3.0.2", - "Source": "Repository", - "Title": "Run Code 'With' Temporarily Modified Global State", - "Authors@R": "c( person(\"Jim\", \"Hester\", role = \"aut\"), person(\"Lionel\", \"Henry\", , \"lionel@posit.co\", role = c(\"aut\", \"cre\")), person(\"Kirill\", \"Müller\", , \"krlmlr+r@mailbox.org\", role = \"aut\"), person(\"Kevin\", \"Ushey\", , \"kevinushey@gmail.com\", role = \"aut\"), person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = \"aut\"), person(\"Winston\", \"Chang\", role = \"aut\"), person(\"Jennifer\", \"Bryan\", role = \"ctb\"), person(\"Richard\", \"Cotton\", role = \"ctb\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", - "Description": "A set of functions to run code 'with' safely and temporarily modified global state. Many of these functions were originally a part of the 'devtools' package, this provides a simple package with limited dependencies to provide access to these functions.", - "License": "MIT + file LICENSE", - "URL": "https://withr.r-lib.org, https://github.com/r-lib/withr#readme", - "BugReports": "https://github.com/r-lib/withr/issues", - "Depends": [ - "R (>= 3.6.0)" - ], - "Imports": [ - "graphics", - "grDevices" - ], - "Suggests": [ - "callr", - "DBI", - "knitr", - "methods", - "rlang", - "rmarkdown (>= 2.12)", - "RSQLite", - "testthat (>= 3.0.0)" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse/tidytemplate", - "Config/testthat/edition": "3", - "Encoding": "UTF-8", - "RoxygenNote": "7.3.2", - "Collate": "'aaa.R' 'collate.R' 'connection.R' 'db.R' 'defer-exit.R' 'standalone-defer.R' 'defer.R' 'devices.R' 'local_.R' 'with_.R' 'dir.R' 'env.R' 'file.R' 'language.R' 'libpaths.R' 'locale.R' 'makevars.R' 'namespace.R' 'options.R' 'par.R' 'path.R' 'rng.R' 'seed.R' 'wrap.R' 'sink.R' 'tempfile.R' 'timezone.R' 'torture.R' 'utils.R' 'with.R'", - "NeedsCompilation": "no", - "Author": "Jim Hester [aut], Lionel Henry [aut, cre], Kirill Müller [aut], Kevin Ushey [aut], Hadley Wickham [aut], Winston Chang [aut], Jennifer Bryan [ctb], Richard Cotton [ctb], Posit Software, PBC [cph, fnd]", - "Maintainer": "Lionel Henry ", - "Repository": "CRAN" - }, - "xml2": { - "Package": "xml2", - "Version": "1.3.8", - "Source": "Repository", - "Title": "Parse XML", - "Authors@R": "c( person(\"Hadley\", \"Wickham\", role = \"aut\"), person(\"Jim\", \"Hester\", role = \"aut\"), person(\"Jeroen\", \"Ooms\", email = \"jeroenooms@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(\"R Foundation\", role = \"ctb\", comment = \"Copy of R-project homepage cached as example\") )", - "Description": "Bindings to 'libxml2' for working with XML data using a simple, consistent interface based on 'XPath' expressions. Also supports XML schema validation; for 'XSLT' transformations see the 'xslt' package.", - "License": "MIT + file LICENSE", - "URL": "https://xml2.r-lib.org, https://r-lib.r-universe.dev/xml2", - "BugReports": "https://github.com/r-lib/xml2/issues", - "Depends": [ - "R (>= 3.6.0)" - ], - "Imports": [ - "cli", - "methods", - "rlang (>= 1.1.0)" - ], - "Suggests": [ - "covr", - "curl", - "httr", - "knitr", - "magrittr", - "mockery", - "rmarkdown", - "testthat (>= 3.2.0)", - "xslt" - ], - "VignetteBuilder": "knitr", - "Config/Needs/website": "tidyverse/tidytemplate", - "Encoding": "UTF-8", - "RoxygenNote": "7.2.3", - "SystemRequirements": "libxml2: libxml2-dev (deb), libxml2-devel (rpm)", - "Collate": "'S4.R' 'as_list.R' 'xml_parse.R' 'as_xml_document.R' 'classes.R' 'format.R' 'import-standalone-obj-type.R' 'import-standalone-purrr.R' 'import-standalone-types-check.R' 'init.R' 'nodeset_apply.R' 'paths.R' 'utils.R' 'xml2-package.R' 'xml_attr.R' 'xml_children.R' 'xml_document.R' 'xml_find.R' 'xml_missing.R' 'xml_modify.R' 'xml_name.R' 'xml_namespaces.R' 'xml_node.R' 'xml_nodeset.R' 'xml_path.R' 'xml_schema.R' 'xml_serialize.R' 'xml_structure.R' 'xml_text.R' 'xml_type.R' 'xml_url.R' 'xml_write.R' 'zzz.R'", - "Config/testthat/edition": "3", - "NeedsCompilation": "yes", - "Author": "Hadley Wickham [aut], Jim Hester [aut], Jeroen Ooms [aut, cre], Posit Software, PBC [cph, fnd], R Foundation [ctb] (Copy of R-project homepage cached as example)", - "Maintainer": "Jeroen Ooms ", - "Repository": "CRAN" - } - } -} +{ + "R": { + "Version": "4.5.1", + "Repositories": [ + { + "Name": "CRAN", + "URL": "https://packagemanager.posit.co/cran/latest" + } + ] + }, + "Packages": { + "R6": { + "Package": "R6", + "Version": "2.6.1", + "Source": "Repository", + "Title": "Encapsulated Classes with Reference Semantics", + "Authors@R": "c( person(\"Winston\", \"Chang\", , \"winston@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "Creates classes with reference semantics, similar to R's built-in reference classes. Compared to reference classes, R6 classes are simpler and lighter-weight, and they are not built on S4 classes so they do not require the methods package. These classes allow public and private members, and they support inheritance, even when the classes are defined in different packages.", + "License": "MIT + file LICENSE", + "URL": "https://r6.r-lib.org, https://github.com/r-lib/R6", + "BugReports": "https://github.com/r-lib/R6/issues", + "Depends": [ + "R (>= 3.6)" + ], + "Suggests": [ + "lobstr", + "testthat (>= 3.0.0)" + ], + "Config/Needs/website": "tidyverse/tidytemplate, ggplot2, microbenchmark, scales", + "Config/testthat/edition": "3", + "Encoding": "UTF-8", + "RoxygenNote": "7.3.2", + "NeedsCompilation": "no", + "Author": "Winston Chang [aut, cre], Posit Software, PBC [cph, fnd]", + "Maintainer": "Winston Chang ", + "Repository": "CRAN" + }, + "askpass": { + "Package": "askpass", + "Version": "1.2.1", + "Source": "Repository", + "Type": "Package", + "Title": "Password Entry Utilities for R, Git, and SSH", + "Authors@R": "person(\"Jeroen\", \"Ooms\", role = c(\"aut\", \"cre\"), email = \"jeroenooms@gmail.com\", comment = c(ORCID = \"0000-0002-4035-0289\"))", + "Description": "Cross-platform utilities for prompting the user for credentials or a passphrase, for example to authenticate with a server or read a protected key. Includes native programs for MacOS and Windows, hence no 'tcltk' is required. Password entry can be invoked in two different ways: directly from R via the askpass() function, or indirectly as password-entry back-end for 'ssh-agent' or 'git-credential' via the SSH_ASKPASS and GIT_ASKPASS environment variables. Thereby the user can be prompted for credentials or a passphrase if needed when R calls out to git or ssh.", + "License": "MIT + file LICENSE", + "URL": "https://r-lib.r-universe.dev/askpass", + "BugReports": "https://github.com/r-lib/askpass/issues", + "Encoding": "UTF-8", + "Imports": [ + "sys (>= 2.1)" + ], + "RoxygenNote": "7.2.3", + "Suggests": [ + "testthat" + ], + "Language": "en-US", + "NeedsCompilation": "yes", + "Author": "Jeroen Ooms [aut, cre] ()", + "Maintainer": "Jeroen Ooms ", + "Repository": "CRAN" + }, + "bit": { + "Package": "bit", + "Version": "4.6.0", + "Source": "Repository", + "Title": "Classes and Methods for Fast Memory-Efficient Boolean Selections", + "Authors@R": "c( person(\"Michael\", \"Chirico\", email = \"MichaelChirico4@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Jens\", \"Oehlschlägel\", role = \"aut\"), person(\"Brian\", \"Ripley\", role = \"ctb\") )", + "Depends": [ + "R (>= 3.4.0)" + ], + "Suggests": [ + "testthat (>= 3.0.0)", + "roxygen2", + "knitr", + "markdown", + "rmarkdown", + "microbenchmark", + "bit64 (>= 4.0.0)", + "ff (>= 4.0.0)" + ], + "Description": "Provided are classes for boolean and skewed boolean vectors, fast boolean methods, fast unique and non-unique integer sorting, fast set operations on sorted and unsorted sets of integers, and foundations for ff (range index, compression, chunked processing).", + "License": "GPL-2 | GPL-3", + "LazyLoad": "yes", + "ByteCompile": "yes", + "Encoding": "UTF-8", + "URL": "https://github.com/r-lib/bit", + "VignetteBuilder": "knitr, rmarkdown", + "RoxygenNote": "7.3.2", + "Config/testthat/edition": "3", + "NeedsCompilation": "yes", + "Author": "Michael Chirico [aut, cre], Jens Oehlschlägel [aut], Brian Ripley [ctb]", + "Maintainer": "Michael Chirico ", + "Repository": "CRAN" + }, + "bit64": { + "Package": "bit64", + "Version": "4.6.0-1", + "Source": "Repository", + "Title": "A S3 Class for Vectors of 64bit Integers", + "Authors@R": "c( person(\"Michael\", \"Chirico\", email = \"michaelchirico4@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Jens\", \"Oehlschlägel\", role = \"aut\"), person(\"Leonardo\", \"Silvestri\", role = \"ctb\"), person(\"Ofek\", \"Shilon\", role = \"ctb\") )", + "Depends": [ + "R (>= 3.4.0)", + "bit (>= 4.0.0)" + ], + "Description": "Package 'bit64' provides serializable S3 atomic 64bit (signed) integers. These are useful for handling database keys and exact counting in +-2^63. WARNING: do not use them as replacement for 32bit integers, integer64 are not supported for subscripting by R-core and they have different semantics when combined with double, e.g. integer64 + double => integer64. Class integer64 can be used in vectors, matrices, arrays and data.frames. Methods are available for coercion from and to logicals, integers, doubles, characters and factors as well as many elementwise and summary functions. Many fast algorithmic operations such as 'match' and 'order' support inter- active data exploration and manipulation and optionally leverage caching.", + "License": "GPL-2 | GPL-3", + "LazyLoad": "yes", + "ByteCompile": "yes", + "URL": "https://github.com/r-lib/bit64", + "Encoding": "UTF-8", + "Imports": [ + "graphics", + "methods", + "stats", + "utils" + ], + "Suggests": [ + "testthat (>= 3.0.3)", + "withr" + ], + "Config/testthat/edition": "3", + "Config/needs/development": "testthat", + "RoxygenNote": "7.3.2", + "NeedsCompilation": "yes", + "Author": "Michael Chirico [aut, cre], Jens Oehlschlägel [aut], Leonardo Silvestri [ctb], Ofek Shilon [ctb]", + "Maintainer": "Michael Chirico ", + "Repository": "CRAN" + }, + "cli": { + "Package": "cli", + "Version": "3.6.5", + "Source": "Repository", + "Title": "Helpers for Developing Command Line Interfaces", + "Authors@R": "c( person(\"Gábor\", \"Csárdi\", , \"gabor@posit.co\", role = c(\"aut\", \"cre\")), person(\"Hadley\", \"Wickham\", role = \"ctb\"), person(\"Kirill\", \"Müller\", role = \"ctb\"), person(\"Salim\", \"Brüggemann\", , \"salim-b@pm.me\", role = \"ctb\", comment = c(ORCID = \"0000-0002-5329-5987\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "A suite of tools to build attractive command line interfaces ('CLIs'), from semantic elements: headings, lists, alerts, paragraphs, etc. Supports custom themes via a 'CSS'-like language. It also contains a number of lower level 'CLI' elements: rules, boxes, trees, and 'Unicode' symbols with 'ASCII' alternatives. It support ANSI colors and text styles as well.", + "License": "MIT + file LICENSE", + "URL": "https://cli.r-lib.org, https://github.com/r-lib/cli", + "BugReports": "https://github.com/r-lib/cli/issues", + "Depends": [ + "R (>= 3.4)" + ], + "Imports": [ + "utils" + ], + "Suggests": [ + "callr", + "covr", + "crayon", + "digest", + "glue (>= 1.6.0)", + "grDevices", + "htmltools", + "htmlwidgets", + "knitr", + "methods", + "processx", + "ps (>= 1.3.4.9000)", + "rlang (>= 1.0.2.9003)", + "rmarkdown", + "rprojroot", + "rstudioapi", + "testthat (>= 3.2.0)", + "tibble", + "whoami", + "withr" + ], + "Config/Needs/website": "r-lib/asciicast, bench, brio, cpp11, decor, desc, fansi, prettyunits, sessioninfo, tidyverse/tidytemplate, usethis, vctrs", + "Config/testthat/edition": "3", + "Encoding": "UTF-8", + "RoxygenNote": "7.3.2", + "NeedsCompilation": "yes", + "Author": "Gábor Csárdi [aut, cre], Hadley Wickham [ctb], Kirill Müller [ctb], Salim Brüggemann [ctb] (), Posit Software, PBC [cph, fnd]", + "Maintainer": "Gábor Csárdi ", + "Repository": "CRAN" + }, + "clipr": { + "Package": "clipr", + "Version": "0.8.0", + "Source": "Repository", + "Type": "Package", + "Title": "Read and Write from the System Clipboard", + "Authors@R": "c( person(\"Matthew\", \"Lincoln\", , \"matthew.d.lincoln@gmail.com\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0002-4387-3384\")), person(\"Louis\", \"Maddox\", role = \"ctb\"), person(\"Steve\", \"Simpson\", role = \"ctb\"), person(\"Jennifer\", \"Bryan\", role = \"ctb\") )", + "Description": "Simple utility functions to read from and write to the Windows, OS X, and X11 clipboards.", + "License": "GPL-3", + "URL": "https://github.com/mdlincoln/clipr, http://matthewlincoln.net/clipr/", + "BugReports": "https://github.com/mdlincoln/clipr/issues", + "Imports": [ + "utils" + ], + "Suggests": [ + "covr", + "knitr", + "rmarkdown", + "rstudioapi (>= 0.5)", + "testthat (>= 2.0.0)" + ], + "VignetteBuilder": "knitr", + "Encoding": "UTF-8", + "Language": "en-US", + "RoxygenNote": "7.1.2", + "SystemRequirements": "xclip (https://github.com/astrand/xclip) or xsel (http://www.vergenet.net/~conrad/software/xsel/) for accessing the X11 clipboard, or wl-clipboard (https://github.com/bugaevc/wl-clipboard) for systems using Wayland.", + "NeedsCompilation": "no", + "Author": "Matthew Lincoln [aut, cre] (), Louis Maddox [ctb], Steve Simpson [ctb], Jennifer Bryan [ctb]", + "Maintainer": "Matthew Lincoln ", + "Repository": "CRAN" + }, + "cpp11": { + "Package": "cpp11", + "Version": "0.5.2", + "Source": "Repository", + "Title": "A C++11 Interface for R's C Interface", + "Authors@R": "c( person(\"Davis\", \"Vaughan\", email = \"davis@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0003-4777-038X\")), person(\"Jim\",\"Hester\", role = \"aut\", comment = c(ORCID = \"0000-0002-2739-7082\")), person(\"Romain\", \"François\", role = \"aut\", comment = c(ORCID = \"0000-0002-2444-4226\")), person(\"Benjamin\", \"Kietzman\", role = \"ctb\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "Provides a header only, C++11 interface to R's C interface. Compared to other approaches 'cpp11' strives to be safe against long jumps from the C API as well as C++ exceptions, conform to normal R function semantics and supports interaction with 'ALTREP' vectors.", + "License": "MIT + file LICENSE", + "URL": "https://cpp11.r-lib.org, https://github.com/r-lib/cpp11", + "BugReports": "https://github.com/r-lib/cpp11/issues", + "Depends": [ + "R (>= 4.0.0)" + ], + "Suggests": [ + "bench", + "brio", + "callr", + "cli", + "covr", + "decor", + "desc", + "ggplot2", + "glue", + "knitr", + "lobstr", + "mockery", + "progress", + "rmarkdown", + "scales", + "Rcpp", + "testthat (>= 3.2.0)", + "tibble", + "utils", + "vctrs", + "withr" + ], + "VignetteBuilder": "knitr", + "Config/Needs/website": "tidyverse/tidytemplate", + "Config/testthat/edition": "3", + "Config/Needs/cpp11/cpp_register": "brio, cli, decor, desc, glue, tibble, vctrs", + "Encoding": "UTF-8", + "RoxygenNote": "7.3.2", + "NeedsCompilation": "no", + "Author": "Davis Vaughan [aut, cre] (), Jim Hester [aut] (), Romain François [aut] (), Benjamin Kietzman [ctb], Posit Software, PBC [cph, fnd]", + "Maintainer": "Davis Vaughan ", + "Repository": "CRAN" + }, + "crayon": { + "Package": "crayon", + "Version": "1.5.3", + "Source": "Repository", + "Title": "Colored Terminal Output", + "Authors@R": "c( person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Brodie\", \"Gaslam\", , \"brodie.gaslam@yahoo.com\", role = \"ctb\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "The crayon package is now superseded. Please use the 'cli' package for new projects. Colored terminal output on terminals that support 'ANSI' color and highlight codes. It also works in 'Emacs' 'ESS'. 'ANSI' color support is automatically detected. Colors and highlighting can be combined and nested. New styles can also be created easily. This package was inspired by the 'chalk' 'JavaScript' project.", + "License": "MIT + file LICENSE", + "URL": "https://r-lib.github.io/crayon/, https://github.com/r-lib/crayon", + "BugReports": "https://github.com/r-lib/crayon/issues", + "Imports": [ + "grDevices", + "methods", + "utils" + ], + "Suggests": [ + "mockery", + "rstudioapi", + "testthat", + "withr" + ], + "Config/Needs/website": "tidyverse/tidytemplate", + "Encoding": "UTF-8", + "RoxygenNote": "7.3.1", + "Collate": "'aaa-rstudio-detect.R' 'aaaa-rematch2.R' 'aab-num-ansi-colors.R' 'aac-num-ansi-colors.R' 'ansi-256.R' 'ansi-palette.R' 'combine.R' 'string.R' 'utils.R' 'crayon-package.R' 'disposable.R' 'enc-utils.R' 'has_ansi.R' 'has_color.R' 'link.R' 'styles.R' 'machinery.R' 'parts.R' 'print.R' 'style-var.R' 'show.R' 'string_operations.R'", + "NeedsCompilation": "no", + "Author": "Gábor Csárdi [aut, cre], Brodie Gaslam [ctb], Posit Software, PBC [cph, fnd]", + "Maintainer": "Gábor Csárdi ", + "Repository": "CRAN" + }, + "curl": { + "Package": "curl", + "Version": "6.4.0", + "Source": "Repository", + "Type": "Package", + "Title": "A Modern and Flexible Web Client for R", + "Authors@R": "c( person(\"Jeroen\", \"Ooms\", role = c(\"aut\", \"cre\"), email = \"jeroenooms@gmail.com\", comment = c(ORCID = \"0000-0002-4035-0289\")), person(\"Hadley\", \"Wickham\", role = \"ctb\"), person(\"Posit Software, PBC\", role = \"cph\"))", + "Description": "Bindings to 'libcurl' for performing fully configurable HTTP/FTP requests where responses can be processed in memory, on disk, or streaming via the callback or connection interfaces. Some knowledge of 'libcurl' is recommended; for a more-user-friendly web client see the 'httr2' package which builds on this package with http specific tools and logic.", + "License": "MIT + file LICENSE", + "SystemRequirements": "libcurl (>= 7.73): libcurl-devel (rpm) or libcurl4-openssl-dev (deb)", + "URL": "https://jeroen.r-universe.dev/curl", + "BugReports": "https://github.com/jeroen/curl/issues", + "Suggests": [ + "spelling", + "testthat (>= 1.0.0)", + "knitr", + "jsonlite", + "later", + "rmarkdown", + "httpuv (>= 1.4.4)", + "webutils" + ], + "VignetteBuilder": "knitr", + "Depends": [ + "R (>= 3.0.0)" + ], + "RoxygenNote": "7.3.2.9000", + "Encoding": "UTF-8", + "Language": "en-US", + "NeedsCompilation": "yes", + "Author": "Jeroen Ooms [aut, cre] (ORCID: ), Hadley Wickham [ctb], Posit Software, PBC [cph]", + "Maintainer": "Jeroen Ooms ", + "Repository": "CRAN" + }, + "data.table": { + "Package": "data.table", + "Version": "1.17.8", + "Source": "Repository", + "Title": "Extension of `data.frame`", + "Depends": [ + "R (>= 3.3.0)" + ], + "Imports": [ + "methods" + ], + "Suggests": [ + "bit64 (>= 4.0.0)", + "bit (>= 4.0.4)", + "R.utils", + "xts", + "zoo (>= 1.8-1)", + "yaml", + "knitr", + "markdown" + ], + "Description": "Fast aggregation of large data (e.g. 100GB in RAM), fast ordered joins, fast add/modify/delete of columns by group using no copies at all, list columns, friendly and fast character-separated-value read/write. Offers a natural and flexible syntax, for faster development.", + "License": "MPL-2.0 | file LICENSE", + "URL": "https://r-datatable.com, https://Rdatatable.gitlab.io/data.table, https://github.com/Rdatatable/data.table", + "BugReports": "https://github.com/Rdatatable/data.table/issues", + "VignetteBuilder": "knitr", + "Encoding": "UTF-8", + "ByteCompile": "TRUE", + "Authors@R": "c( person(\"Tyson\",\"Barrett\", role=c(\"aut\",\"cre\"), email=\"t.barrett88@gmail.com\", comment = c(ORCID=\"0000-0002-2137-1391\")), person(\"Matt\",\"Dowle\", role=\"aut\", email=\"mattjdowle@gmail.com\"), person(\"Arun\",\"Srinivasan\", role=\"aut\", email=\"asrini@pm.me\"), person(\"Jan\",\"Gorecki\", role=\"aut\"), person(\"Michael\",\"Chirico\", role=\"aut\", comment = c(ORCID=\"0000-0003-0787-087X\")), person(\"Toby\",\"Hocking\", role=\"aut\", comment = c(ORCID=\"0000-0002-3146-0865\")), person(\"Benjamin\",\"Schwendinger\",role=\"aut\", comment = c(ORCID=\"0000-0003-3315-8114\")), person(\"Ivan\", \"Krylov\", role=\"aut\", email=\"ikrylov@disroot.org\", comment = c(ORCID=\"0000-0002-0172-3812\")), person(\"Pasha\",\"Stetsenko\", role=\"ctb\"), person(\"Tom\",\"Short\", role=\"ctb\"), person(\"Steve\",\"Lianoglou\", role=\"ctb\"), person(\"Eduard\",\"Antonyan\", role=\"ctb\"), person(\"Markus\",\"Bonsch\", role=\"ctb\"), person(\"Hugh\",\"Parsonage\", role=\"ctb\"), person(\"Scott\",\"Ritchie\", role=\"ctb\"), person(\"Kun\",\"Ren\", role=\"ctb\"), person(\"Xianying\",\"Tan\", role=\"ctb\"), person(\"Rick\",\"Saporta\", role=\"ctb\"), person(\"Otto\",\"Seiskari\", role=\"ctb\"), person(\"Xianghui\",\"Dong\", role=\"ctb\"), person(\"Michel\",\"Lang\", role=\"ctb\"), person(\"Watal\",\"Iwasaki\", role=\"ctb\"), person(\"Seth\",\"Wenchel\", role=\"ctb\"), person(\"Karl\",\"Broman\", role=\"ctb\"), person(\"Tobias\",\"Schmidt\", role=\"ctb\"), person(\"David\",\"Arenburg\", role=\"ctb\"), person(\"Ethan\",\"Smith\", role=\"ctb\"), person(\"Francois\",\"Cocquemas\", role=\"ctb\"), person(\"Matthieu\",\"Gomez\", role=\"ctb\"), person(\"Philippe\",\"Chataignon\", role=\"ctb\"), person(\"Nello\",\"Blaser\", role=\"ctb\"), person(\"Dmitry\",\"Selivanov\", role=\"ctb\"), person(\"Andrey\",\"Riabushenko\", role=\"ctb\"), person(\"Cheng\",\"Lee\", role=\"ctb\"), person(\"Declan\",\"Groves\", role=\"ctb\"), person(\"Daniel\",\"Possenriede\", role=\"ctb\"), person(\"Felipe\",\"Parages\", role=\"ctb\"), person(\"Denes\",\"Toth\", role=\"ctb\"), person(\"Mus\",\"Yaramaz-David\", role=\"ctb\"), person(\"Ayappan\",\"Perumal\", role=\"ctb\"), person(\"James\",\"Sams\", role=\"ctb\"), person(\"Martin\",\"Morgan\", role=\"ctb\"), person(\"Michael\",\"Quinn\", role=\"ctb\"), person(\"@javrucebo\",\"\", role=\"ctb\"), person(\"@marc-outins\",\"\", role=\"ctb\"), person(\"Roy\",\"Storey\", role=\"ctb\"), person(\"Manish\",\"Saraswat\", role=\"ctb\"), person(\"Morgan\",\"Jacob\", role=\"ctb\"), person(\"Michael\",\"Schubmehl\", role=\"ctb\"), person(\"Davis\",\"Vaughan\", role=\"ctb\"), person(\"Leonardo\",\"Silvestri\", role=\"ctb\"), person(\"Jim\",\"Hester\", role=\"ctb\"), person(\"Anthony\",\"Damico\", role=\"ctb\"), person(\"Sebastian\",\"Freundt\", role=\"ctb\"), person(\"David\",\"Simons\", role=\"ctb\"), person(\"Elliott\",\"Sales de Andrade\", role=\"ctb\"), person(\"Cole\",\"Miller\", role=\"ctb\"), person(\"Jens Peder\",\"Meldgaard\", role=\"ctb\"), person(\"Vaclav\",\"Tlapak\", role=\"ctb\"), person(\"Kevin\",\"Ushey\", role=\"ctb\"), person(\"Dirk\",\"Eddelbuettel\", role=\"ctb\"), person(\"Tony\",\"Fischetti\", role=\"ctb\"), person(\"Ofek\",\"Shilon\", role=\"ctb\"), person(\"Vadim\",\"Khotilovich\", role=\"ctb\"), person(\"Hadley\",\"Wickham\", role=\"ctb\"), person(\"Bennet\",\"Becker\", role=\"ctb\"), person(\"Kyle\",\"Haynes\", role=\"ctb\"), person(\"Boniface Christian\",\"Kamgang\", role=\"ctb\"), person(\"Olivier\",\"Delmarcell\", role=\"ctb\"), person(\"Josh\",\"O'Brien\", role=\"ctb\"), person(\"Dereck\",\"de Mezquita\", role=\"ctb\"), person(\"Michael\",\"Czekanski\", role=\"ctb\"), person(\"Dmitry\", \"Shemetov\", role=\"ctb\"), person(\"Nitish\", \"Jha\", role=\"ctb\"), person(\"Joshua\", \"Wu\", role=\"ctb\"), person(\"Iago\", \"Giné-Vázquez\", role=\"ctb\"), person(\"Anirban\", \"Chetia\", role=\"ctb\"), person(\"Doris\", \"Amoakohene\", role=\"ctb\"), person(\"Angel\", \"Feliz\", role=\"ctb\"), person(\"Michael\",\"Young\", role=\"ctb\"), person(\"Mark\", \"Seeto\", role=\"ctb\"), person(\"Philippe\", \"Grosjean\", role=\"ctb\"), person(\"Vincent\", \"Runge\", role=\"ctb\"), person(\"Christian\", \"Wia\", role=\"ctb\"), person(\"Elise\", \"Maigné\", role=\"ctb\"), person(\"Vincent\", \"Rocher\", role=\"ctb\"), person(\"Vijay\", \"Lulla\", role=\"ctb\"), person(\"Aljaž\", \"Sluga\", role=\"ctb\"), person(\"Bill\", \"Evans\", role=\"ctb\") )", + "NeedsCompilation": "yes", + "Author": "Tyson Barrett [aut, cre] (ORCID: ), Matt Dowle [aut], Arun Srinivasan [aut], Jan Gorecki [aut], Michael Chirico [aut] (ORCID: ), Toby Hocking [aut] (ORCID: ), Benjamin Schwendinger [aut] (ORCID: ), Ivan Krylov [aut] (ORCID: ), Pasha Stetsenko [ctb], Tom Short [ctb], Steve Lianoglou [ctb], Eduard Antonyan [ctb], Markus Bonsch [ctb], Hugh Parsonage [ctb], Scott Ritchie [ctb], Kun Ren [ctb], Xianying Tan [ctb], Rick Saporta [ctb], Otto Seiskari [ctb], Xianghui Dong [ctb], Michel Lang [ctb], Watal Iwasaki [ctb], Seth Wenchel [ctb], Karl Broman [ctb], Tobias Schmidt [ctb], David Arenburg [ctb], Ethan Smith [ctb], Francois Cocquemas [ctb], Matthieu Gomez [ctb], Philippe Chataignon [ctb], Nello Blaser [ctb], Dmitry Selivanov [ctb], Andrey Riabushenko [ctb], Cheng Lee [ctb], Declan Groves [ctb], Daniel Possenriede [ctb], Felipe Parages [ctb], Denes Toth [ctb], Mus Yaramaz-David [ctb], Ayappan Perumal [ctb], James Sams [ctb], Martin Morgan [ctb], Michael Quinn [ctb], @javrucebo [ctb], @marc-outins [ctb], Roy Storey [ctb], Manish Saraswat [ctb], Morgan Jacob [ctb], Michael Schubmehl [ctb], Davis Vaughan [ctb], Leonardo Silvestri [ctb], Jim Hester [ctb], Anthony Damico [ctb], Sebastian Freundt [ctb], David Simons [ctb], Elliott Sales de Andrade [ctb], Cole Miller [ctb], Jens Peder Meldgaard [ctb], Vaclav Tlapak [ctb], Kevin Ushey [ctb], Dirk Eddelbuettel [ctb], Tony Fischetti [ctb], Ofek Shilon [ctb], Vadim Khotilovich [ctb], Hadley Wickham [ctb], Bennet Becker [ctb], Kyle Haynes [ctb], Boniface Christian Kamgang [ctb], Olivier Delmarcell [ctb], Josh O'Brien [ctb], Dereck de Mezquita [ctb], Michael Czekanski [ctb], Dmitry Shemetov [ctb], Nitish Jha [ctb], Joshua Wu [ctb], Iago Giné-Vázquez [ctb], Anirban Chetia [ctb], Doris Amoakohene [ctb], Angel Feliz [ctb], Michael Young [ctb], Mark Seeto [ctb], Philippe Grosjean [ctb], Vincent Runge [ctb], Christian Wia [ctb], Elise Maigné [ctb], Vincent Rocher [ctb], Vijay Lulla [ctb], Aljaž Sluga [ctb], Bill Evans [ctb]", + "Maintainer": "Tyson Barrett ", + "Repository": "CRAN" + }, + "dplyr": { + "Package": "dplyr", + "Version": "1.1.4", + "Source": "Repository", + "Type": "Package", + "Title": "A Grammar of Data Manipulation", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Romain\", \"François\", role = \"aut\", comment = c(ORCID = \"0000-0002-2444-4226\")), person(\"Lionel\", \"Henry\", role = \"aut\"), person(\"Kirill\", \"Müller\", role = \"aut\", comment = c(ORCID = \"0000-0002-1416-3412\")), person(\"Davis\", \"Vaughan\", , \"davis@posit.co\", role = \"aut\", comment = c(ORCID = \"0000-0003-4777-038X\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "A fast, consistent tool for working with data frame like objects, both in memory and out of memory.", + "License": "MIT + file LICENSE", + "URL": "https://dplyr.tidyverse.org, https://github.com/tidyverse/dplyr", + "BugReports": "https://github.com/tidyverse/dplyr/issues", + "Depends": [ + "R (>= 3.5.0)" + ], + "Imports": [ + "cli (>= 3.4.0)", + "generics", + "glue (>= 1.3.2)", + "lifecycle (>= 1.0.3)", + "magrittr (>= 1.5)", + "methods", + "pillar (>= 1.9.0)", + "R6", + "rlang (>= 1.1.0)", + "tibble (>= 3.2.0)", + "tidyselect (>= 1.2.0)", + "utils", + "vctrs (>= 0.6.4)" + ], + "Suggests": [ + "bench", + "broom", + "callr", + "covr", + "DBI", + "dbplyr (>= 2.2.1)", + "ggplot2", + "knitr", + "Lahman", + "lobstr", + "microbenchmark", + "nycflights13", + "purrr", + "rmarkdown", + "RMySQL", + "RPostgreSQL", + "RSQLite", + "stringi (>= 1.7.6)", + "testthat (>= 3.1.5)", + "tidyr (>= 1.3.0)", + "withr" + ], + "VignetteBuilder": "knitr", + "Config/Needs/website": "tidyverse, shiny, pkgdown, tidyverse/tidytemplate", + "Config/testthat/edition": "3", + "Encoding": "UTF-8", + "LazyData": "true", + "RoxygenNote": "7.2.3", + "NeedsCompilation": "yes", + "Author": "Hadley Wickham [aut, cre] (), Romain François [aut] (), Lionel Henry [aut], Kirill Müller [aut] (), Davis Vaughan [aut] (), Posit Software, PBC [cph, fnd]", + "Maintainer": "Hadley Wickham ", + "Repository": "CRAN" + }, + "generics": { + "Package": "generics", + "Version": "0.1.4", + "Source": "Repository", + "Title": "Common S3 Generics not Provided by Base R Methods Related to Model Fitting", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Max\", \"Kuhn\", , \"max@posit.co\", role = \"aut\"), person(\"Davis\", \"Vaughan\", , \"davis@posit.co\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\"), comment = c(ROR = \"https://ror.org/03wc8by49\")) )", + "Description": "In order to reduce potential package dependencies and conflicts, generics provides a number of commonly used S3 generics.", + "License": "MIT + file LICENSE", + "URL": "https://generics.r-lib.org, https://github.com/r-lib/generics", + "BugReports": "https://github.com/r-lib/generics/issues", + "Depends": [ + "R (>= 3.6)" + ], + "Imports": [ + "methods" + ], + "Suggests": [ + "covr", + "pkgload", + "testthat (>= 3.0.0)", + "tibble", + "withr" + ], + "Config/Needs/website": "tidyverse/tidytemplate", + "Config/testthat/edition": "3", + "Encoding": "UTF-8", + "RoxygenNote": "7.3.2", + "NeedsCompilation": "no", + "Author": "Hadley Wickham [aut, cre] (ORCID: ), Max Kuhn [aut], Davis Vaughan [aut], Posit Software, PBC [cph, fnd] (ROR: )", + "Maintainer": "Hadley Wickham ", + "Repository": "CRAN" + }, + "glue": { + "Package": "glue", + "Version": "1.8.0", + "Source": "Repository", + "Title": "Interpreted String Literals", + "Authors@R": "c( person(\"Jim\", \"Hester\", role = \"aut\", comment = c(ORCID = \"0000-0002-2739-7082\")), person(\"Jennifer\", \"Bryan\", , \"jenny@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0002-6983-2759\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "An implementation of interpreted string literals, inspired by Python's Literal String Interpolation and Docstrings and Julia's Triple-Quoted String Literals .", + "License": "MIT + file LICENSE", + "URL": "https://glue.tidyverse.org/, https://github.com/tidyverse/glue", + "BugReports": "https://github.com/tidyverse/glue/issues", + "Depends": [ + "R (>= 3.6)" + ], + "Imports": [ + "methods" + ], + "Suggests": [ + "crayon", + "DBI (>= 1.2.0)", + "dplyr", + "knitr", + "magrittr", + "rlang", + "rmarkdown", + "RSQLite", + "testthat (>= 3.2.0)", + "vctrs (>= 0.3.0)", + "waldo (>= 0.5.3)", + "withr" + ], + "VignetteBuilder": "knitr", + "ByteCompile": "true", + "Config/Needs/website": "bench, forcats, ggbeeswarm, ggplot2, R.utils, rprintf, tidyr, tidyverse/tidytemplate", + "Config/testthat/edition": "3", + "Encoding": "UTF-8", + "RoxygenNote": "7.3.2", + "NeedsCompilation": "yes", + "Author": "Jim Hester [aut] (), Jennifer Bryan [aut, cre] (), Posit Software, PBC [cph, fnd]", + "Maintainer": "Jennifer Bryan ", + "Repository": "CRAN" + }, + "hms": { + "Package": "hms", + "Version": "1.1.3", + "Source": "Repository", + "Title": "Pretty Time of Day", + "Date": "2023-03-21", + "Authors@R": "c( person(\"Kirill\", \"Müller\", role = c(\"aut\", \"cre\"), email = \"kirill@cynkra.com\", comment = c(ORCID = \"0000-0002-1416-3412\")), person(\"R Consortium\", role = \"fnd\"), person(\"RStudio\", role = \"fnd\") )", + "Description": "Implements an S3 class for storing and formatting time-of-day values, based on the 'difftime' class.", + "Imports": [ + "lifecycle", + "methods", + "pkgconfig", + "rlang (>= 1.0.2)", + "vctrs (>= 0.3.8)" + ], + "Suggests": [ + "crayon", + "lubridate", + "pillar (>= 1.1.0)", + "testthat (>= 3.0.0)" + ], + "License": "MIT + file LICENSE", + "Encoding": "UTF-8", + "URL": "https://hms.tidyverse.org/, https://github.com/tidyverse/hms", + "BugReports": "https://github.com/tidyverse/hms/issues", + "RoxygenNote": "7.2.3", + "Config/testthat/edition": "3", + "Config/autostyle/scope": "line_breaks", + "Config/autostyle/strict": "false", + "Config/Needs/website": "tidyverse/tidytemplate", + "NeedsCompilation": "no", + "Author": "Kirill Müller [aut, cre] (), R Consortium [fnd], RStudio [fnd]", + "Maintainer": "Kirill Müller ", + "Repository": "CRAN" + }, + "httr": { + "Package": "httr", + "Version": "1.4.7", + "Source": "Repository", + "Title": "Tools for Working with URLs and HTTP", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "Useful tools for working with HTTP organised by HTTP verbs (GET(), POST(), etc). Configuration functions make it easy to control additional request components (authenticate(), add_headers() and so on).", + "License": "MIT + file LICENSE", + "URL": "https://httr.r-lib.org/, https://github.com/r-lib/httr", + "BugReports": "https://github.com/r-lib/httr/issues", + "Depends": [ + "R (>= 3.5)" + ], + "Imports": [ + "curl (>= 5.0.2)", + "jsonlite", + "mime", + "openssl (>= 0.8)", + "R6" + ], + "Suggests": [ + "covr", + "httpuv", + "jpeg", + "knitr", + "png", + "readr", + "rmarkdown", + "testthat (>= 0.8.0)", + "xml2" + ], + "VignetteBuilder": "knitr", + "Config/Needs/website": "tidyverse/tidytemplate", + "Encoding": "UTF-8", + "RoxygenNote": "7.2.3", + "NeedsCompilation": "no", + "Author": "Hadley Wickham [aut, cre], Posit, PBC [cph, fnd]", + "Maintainer": "Hadley Wickham ", + "Repository": "CRAN" + }, + "httr2": { + "Package": "httr2", + "Version": "1.2.1", + "Source": "Repository", + "Title": "Perform HTTP Requests and Process the Responses", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(\"Maximilian\", \"Girlich\", role = \"ctb\") )", + "Description": "Tools for creating and modifying HTTP requests, then performing them and processing the results. 'httr2' is a modern re-imagining of 'httr' that uses a pipe-based interface and solves more of the problems that API wrapping packages face.", + "License": "MIT + file LICENSE", + "URL": "https://httr2.r-lib.org, https://github.com/r-lib/httr2", + "BugReports": "https://github.com/r-lib/httr2/issues", + "Depends": [ + "R (>= 4.1)" + ], + "Imports": [ + "cli (>= 3.0.0)", + "curl (>= 6.4.0)", + "glue", + "lifecycle", + "magrittr", + "openssl", + "R6", + "rappdirs", + "rlang (>= 1.1.0)", + "vctrs (>= 0.6.3)", + "withr" + ], + "Suggests": [ + "askpass", + "bench", + "clipr", + "covr", + "docopt", + "httpuv", + "jose", + "jsonlite", + "knitr", + "later (>= 1.4.0)", + "nanonext", + "paws.common", + "promises", + "rmarkdown", + "testthat (>= 3.1.8)", + "tibble", + "webfakes (>= 1.4.0)", + "xml2" + ], + "VignetteBuilder": "knitr", + "Config/Needs/website": "tidyverse/tidytemplate", + "Config/testthat/edition": "3", + "Config/testthat/parallel": "true", + "Config/testthat/start-first": "resp-stream, req-perform", + "Encoding": "UTF-8", + "RoxygenNote": "7.3.2", + "NeedsCompilation": "no", + "Author": "Hadley Wickham [aut, cre], Posit Software, PBC [cph, fnd], Maximilian Girlich [ctb]", + "Maintainer": "Hadley Wickham ", + "Repository": "CRAN" + }, + "janitor": { + "Package": "janitor", + "Version": "2.2.1", + "Source": "Repository", + "Title": "Simple Tools for Examining and Cleaning Dirty Data", + "Authors@R": "c(person(\"Sam\", \"Firke\", email = \"samuel.firke@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Bill\", \"Denney\", email = \"wdenney@humanpredictions.com\", role = \"ctb\"), person(\"Chris\", \"Haid\", email = \"chrishaid@gmail.com\", role = \"ctb\"), person(\"Ryan\", \"Knight\", email = \"ryangknight@gmail.com\", role = \"ctb\"), person(\"Malte\", \"Grosser\", email = \"malte.grosser@gmail.com\", role = \"ctb\"), person(\"Jonathan\", \"Zadra\", email = \"jonathan.zadra@sorensonimpact.com\", role = \"ctb\"))", + "Description": "The main janitor functions can: perfectly format data.frame column names; provide quick counts of variable combinations (i.e., frequency tables and crosstabs); and explore duplicate records. Other janitor functions nicely format the tabulation results. These tabulate-and-report functions approximate popular features of SPSS and Microsoft Excel. This package follows the principles of the \"tidyverse\" and works well with the pipe function %>%. janitor was built with beginning-to-intermediate R users in mind and is optimized for user-friendliness.", + "URL": "https://github.com/sfirke/janitor, https://sfirke.github.io/janitor/", + "BugReports": "https://github.com/sfirke/janitor/issues", + "Depends": [ + "R (>= 3.1.2)" + ], + "Imports": [ + "dplyr (>= 1.0.0)", + "hms", + "lifecycle", + "lubridate", + "magrittr", + "purrr", + "rlang", + "stringi", + "stringr", + "snakecase (>= 0.9.2)", + "tidyselect (>= 1.0.0)", + "tidyr (>= 0.7.0)" + ], + "License": "MIT + file LICENSE", + "RoxygenNote": "7.2.3", + "Suggests": [ + "dbplyr", + "knitr", + "rmarkdown", + "RSQLite", + "sf", + "testthat (>= 3.0.0)", + "tibble", + "tidygraph" + ], + "VignetteBuilder": "knitr", + "Encoding": "UTF-8", + "Config/testthat/edition": "3", + "NeedsCompilation": "no", + "Author": "Sam Firke [aut, cre], Bill Denney [ctb], Chris Haid [ctb], Ryan Knight [ctb], Malte Grosser [ctb], Jonathan Zadra [ctb]", + "Maintainer": "Sam Firke ", + "Repository": "CRAN" + }, + "jsonlite": { + "Package": "jsonlite", + "Version": "2.0.0", + "Source": "Repository", + "Title": "A Simple and Robust JSON Parser and Generator for R", + "License": "MIT + file LICENSE", + "Depends": [ + "methods" + ], + "Authors@R": "c( person(\"Jeroen\", \"Ooms\", role = c(\"aut\", \"cre\"), email = \"jeroenooms@gmail.com\", comment = c(ORCID = \"0000-0002-4035-0289\")), person(\"Duncan\", \"Temple Lang\", role = \"ctb\"), person(\"Lloyd\", \"Hilaiel\", role = \"cph\", comment=\"author of bundled libyajl\"))", + "URL": "https://jeroen.r-universe.dev/jsonlite https://arxiv.org/abs/1403.2805", + "BugReports": "https://github.com/jeroen/jsonlite/issues", + "Maintainer": "Jeroen Ooms ", + "VignetteBuilder": "knitr, R.rsp", + "Description": "A reasonably fast JSON parser and generator, optimized for statistical data and the web. Offers simple, flexible tools for working with JSON in R, and is particularly powerful for building pipelines and interacting with a web API. The implementation is based on the mapping described in the vignette (Ooms, 2014). In addition to converting JSON data from/to R objects, 'jsonlite' contains functions to stream, validate, and prettify JSON data. The unit tests included with the package verify that all edge cases are encoded and decoded consistently for use with dynamic data in systems and applications.", + "Suggests": [ + "httr", + "vctrs", + "testthat", + "knitr", + "rmarkdown", + "R.rsp", + "sf" + ], + "RoxygenNote": "7.3.2", + "Encoding": "UTF-8", + "NeedsCompilation": "yes", + "Author": "Jeroen Ooms [aut, cre] (), Duncan Temple Lang [ctb], Lloyd Hilaiel [cph] (author of bundled libyajl)", + "Repository": "CRAN" + }, + "lifecycle": { + "Package": "lifecycle", + "Version": "1.0.4", + "Source": "Repository", + "Title": "Manage the Life Cycle of your Package Functions", + "Authors@R": "c( person(\"Lionel\", \"Henry\", , \"lionel@posit.co\", role = c(\"aut\", \"cre\")), person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = \"aut\", comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "Manage the life cycle of your exported functions with shared conventions, documentation badges, and user-friendly deprecation warnings.", + "License": "MIT + file LICENSE", + "URL": "https://lifecycle.r-lib.org/, https://github.com/r-lib/lifecycle", + "BugReports": "https://github.com/r-lib/lifecycle/issues", + "Depends": [ + "R (>= 3.6)" + ], + "Imports": [ + "cli (>= 3.4.0)", + "glue", + "rlang (>= 1.1.0)" + ], + "Suggests": [ + "covr", + "crayon", + "knitr", + "lintr", + "rmarkdown", + "testthat (>= 3.0.1)", + "tibble", + "tidyverse", + "tools", + "vctrs", + "withr" + ], + "VignetteBuilder": "knitr", + "Config/Needs/website": "tidyverse/tidytemplate, usethis", + "Config/testthat/edition": "3", + "Encoding": "UTF-8", + "RoxygenNote": "7.2.1", + "NeedsCompilation": "no", + "Author": "Lionel Henry [aut, cre], Hadley Wickham [aut] (), Posit Software, PBC [cph, fnd]", + "Maintainer": "Lionel Henry ", + "Repository": "CRAN" + }, + "lubridate": { + "Package": "lubridate", + "Version": "1.9.4", + "Source": "Repository", + "Type": "Package", + "Title": "Make Dealing with Dates a Little Easier", + "Authors@R": "c( person(\"Vitalie\", \"Spinu\", , \"spinuvit@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Garrett\", \"Grolemund\", role = \"aut\"), person(\"Hadley\", \"Wickham\", role = \"aut\"), person(\"Davis\", \"Vaughan\", role = \"ctb\"), person(\"Ian\", \"Lyttle\", role = \"ctb\"), person(\"Imanuel\", \"Costigan\", role = \"ctb\"), person(\"Jason\", \"Law\", role = \"ctb\"), person(\"Doug\", \"Mitarotonda\", role = \"ctb\"), person(\"Joseph\", \"Larmarange\", role = \"ctb\"), person(\"Jonathan\", \"Boiser\", role = \"ctb\"), person(\"Chel Hee\", \"Lee\", role = \"ctb\") )", + "Maintainer": "Vitalie Spinu ", + "Description": "Functions to work with date-times and time-spans: fast and user friendly parsing of date-time data, extraction and updating of components of a date-time (years, months, days, hours, minutes, and seconds), algebraic manipulation on date-time and time-span objects. The 'lubridate' package has a consistent and memorable syntax that makes working with dates easy and fun.", + "License": "GPL (>= 2)", + "URL": "https://lubridate.tidyverse.org, https://github.com/tidyverse/lubridate", + "BugReports": "https://github.com/tidyverse/lubridate/issues", + "Depends": [ + "methods", + "R (>= 3.2)" + ], + "Imports": [ + "generics", + "timechange (>= 0.3.0)" + ], + "Suggests": [ + "covr", + "knitr", + "rmarkdown", + "testthat (>= 2.1.0)", + "vctrs (>= 0.6.5)" + ], + "Enhances": [ + "chron", + "data.table", + "timeDate", + "tis", + "zoo" + ], + "VignetteBuilder": "knitr", + "Config/Needs/website": "tidyverse/tidytemplate", + "Config/testthat/edition": "3", + "Encoding": "UTF-8", + "LazyData": "true", + "RoxygenNote": "7.2.3", + "SystemRequirements": "C++11, A system with zoneinfo data (e.g. /usr/share/zoneinfo). On Windows the zoneinfo included with R is used.", + "Collate": "'Dates.r' 'POSIXt.r' 'util.r' 'parse.r' 'timespans.r' 'intervals.r' 'difftimes.r' 'durations.r' 'periods.r' 'accessors-date.R' 'accessors-day.r' 'accessors-dst.r' 'accessors-hour.r' 'accessors-minute.r' 'accessors-month.r' 'accessors-quarter.r' 'accessors-second.r' 'accessors-tz.r' 'accessors-week.r' 'accessors-year.r' 'am-pm.r' 'time-zones.r' 'numeric.r' 'coercion.r' 'constants.r' 'cyclic_encoding.r' 'data.r' 'decimal-dates.r' 'deprecated.r' 'format_ISO8601.r' 'guess.r' 'hidden.r' 'instants.r' 'leap-years.r' 'ops-addition.r' 'ops-compare.r' 'ops-division.r' 'ops-integer-division.r' 'ops-m+.r' 'ops-modulo.r' 'ops-multiplication.r' 'ops-subtraction.r' 'package.r' 'pretty.r' 'round.r' 'stamp.r' 'tzdir.R' 'update.r' 'vctrs.R' 'zzz.R'", + "NeedsCompilation": "yes", + "Author": "Vitalie Spinu [aut, cre], Garrett Grolemund [aut], Hadley Wickham [aut], Davis Vaughan [ctb], Ian Lyttle [ctb], Imanuel Costigan [ctb], Jason Law [ctb], Doug Mitarotonda [ctb], Joseph Larmarange [ctb], Jonathan Boiser [ctb], Chel Hee Lee [ctb]", + "Repository": "CRAN" + }, + "magrittr": { + "Package": "magrittr", + "Version": "2.0.3", + "Source": "Repository", + "Type": "Package", + "Title": "A Forward-Pipe Operator for R", + "Authors@R": "c( person(\"Stefan Milton\", \"Bache\", , \"stefan@stefanbache.dk\", role = c(\"aut\", \"cph\"), comment = \"Original author and creator of magrittr\"), person(\"Hadley\", \"Wickham\", , \"hadley@rstudio.com\", role = \"aut\"), person(\"Lionel\", \"Henry\", , \"lionel@rstudio.com\", role = \"cre\"), person(\"RStudio\", role = c(\"cph\", \"fnd\")) )", + "Description": "Provides a mechanism for chaining commands with a new forward-pipe operator, %>%. This operator will forward a value, or the result of an expression, into the next function call/expression. There is flexible support for the type of right-hand side expressions. For more information, see package vignette. To quote Rene Magritte, \"Ceci n'est pas un pipe.\"", + "License": "MIT + file LICENSE", + "URL": "https://magrittr.tidyverse.org, https://github.com/tidyverse/magrittr", + "BugReports": "https://github.com/tidyverse/magrittr/issues", + "Depends": [ + "R (>= 3.4.0)" + ], + "Suggests": [ + "covr", + "knitr", + "rlang", + "rmarkdown", + "testthat" + ], + "VignetteBuilder": "knitr", + "ByteCompile": "Yes", + "Config/Needs/website": "tidyverse/tidytemplate", + "Encoding": "UTF-8", + "RoxygenNote": "7.1.2", + "NeedsCompilation": "yes", + "Author": "Stefan Milton Bache [aut, cph] (Original author and creator of magrittr), Hadley Wickham [aut], Lionel Henry [cre], RStudio [cph, fnd]", + "Maintainer": "Lionel Henry ", + "Repository": "CRAN" + }, + "mime": { + "Package": "mime", + "Version": "0.13", + "Source": "Repository", + "Type": "Package", + "Title": "Map Filenames to MIME Types", + "Authors@R": "c( person(\"Yihui\", \"Xie\", role = c(\"aut\", \"cre\"), email = \"xie@yihui.name\", comment = c(ORCID = \"0000-0003-0645-5666\", URL = \"https://yihui.org\")), person(\"Jeffrey\", \"Horner\", role = \"ctb\"), person(\"Beilei\", \"Bian\", role = \"ctb\") )", + "Description": "Guesses the MIME type from a filename extension using the data derived from /etc/mime.types in UNIX-type systems.", + "Imports": [ + "tools" + ], + "License": "GPL", + "URL": "https://github.com/yihui/mime", + "BugReports": "https://github.com/yihui/mime/issues", + "RoxygenNote": "7.3.2", + "Encoding": "UTF-8", + "NeedsCompilation": "yes", + "Author": "Yihui Xie [aut, cre] (, https://yihui.org), Jeffrey Horner [ctb], Beilei Bian [ctb]", + "Maintainer": "Yihui Xie ", + "Repository": "CRAN" + }, + "openssl": { + "Package": "openssl", + "Version": "2.3.3", + "Source": "Repository", + "Type": "Package", + "Title": "Toolkit for Encryption, Signatures and Certificates Based on OpenSSL", + "Authors@R": "c(person(\"Jeroen\", \"Ooms\", role = c(\"aut\", \"cre\"), email = \"jeroenooms@gmail.com\", comment = c(ORCID = \"0000-0002-4035-0289\")), person(\"Oliver\", \"Keyes\", role = \"ctb\"))", + "Description": "Bindings to OpenSSL libssl and libcrypto, plus custom SSH key parsers. Supports RSA, DSA and EC curves P-256, P-384, P-521, and curve25519. Cryptographic signatures can either be created and verified manually or via x509 certificates. AES can be used in cbc, ctr or gcm mode for symmetric encryption; RSA for asymmetric (public key) encryption or EC for Diffie Hellman. High-level envelope functions combine RSA and AES for encrypting arbitrary sized data. Other utilities include key generators, hash functions (md5, sha1, sha256, etc), base64 encoder, a secure random number generator, and 'bignum' math methods for manually performing crypto calculations on large multibyte integers.", + "License": "MIT + file LICENSE", + "URL": "https://jeroen.r-universe.dev/openssl", + "BugReports": "https://github.com/jeroen/openssl/issues", + "SystemRequirements": "OpenSSL >= 1.0.2", + "VignetteBuilder": "knitr", + "Imports": [ + "askpass" + ], + "Suggests": [ + "curl", + "testthat (>= 2.1.0)", + "digest", + "knitr", + "rmarkdown", + "jsonlite", + "jose", + "sodium" + ], + "RoxygenNote": "7.3.2", + "Encoding": "UTF-8", + "NeedsCompilation": "yes", + "Author": "Jeroen Ooms [aut, cre] (ORCID: ), Oliver Keyes [ctb]", + "Maintainer": "Jeroen Ooms ", + "Repository": "CRAN" + }, + "pillar": { + "Package": "pillar", + "Version": "1.11.0", + "Source": "Repository", + "Title": "Coloured Formatting for Columns", + "Authors@R": "c(person(given = \"Kirill\", family = \"M\\u00fcller\", role = c(\"aut\", \"cre\"), email = \"kirill@cynkra.com\", comment = c(ORCID = \"0000-0002-1416-3412\")), person(given = \"Hadley\", family = \"Wickham\", role = \"aut\"), person(given = \"RStudio\", role = \"cph\"))", + "Description": "Provides 'pillar' and 'colonnade' generics designed for formatting columns of data using the full range of colours provided by modern terminals.", + "License": "MIT + file LICENSE", + "URL": "https://pillar.r-lib.org/, https://github.com/r-lib/pillar", + "BugReports": "https://github.com/r-lib/pillar/issues", + "Imports": [ + "cli (>= 2.3.0)", + "glue", + "lifecycle", + "rlang (>= 1.0.2)", + "utf8 (>= 1.1.0)", + "utils", + "vctrs (>= 0.5.0)" + ], + "Suggests": [ + "bit64", + "DBI", + "debugme", + "DiagrammeR", + "dplyr", + "formattable", + "ggplot2", + "knitr", + "lubridate", + "nanotime", + "nycflights13", + "palmerpenguins", + "rmarkdown", + "scales", + "stringi", + "survival", + "testthat (>= 3.1.1)", + "tibble", + "units (>= 0.7.2)", + "vdiffr", + "withr" + ], + "VignetteBuilder": "knitr", + "Encoding": "UTF-8", + "RoxygenNote": "7.3.2.9000", + "Config/testthat/edition": "3", + "Config/testthat/parallel": "true", + "Config/testthat/start-first": "format_multi_fuzz, format_multi_fuzz_2, format_multi, ctl_colonnade, ctl_colonnade_1, ctl_colonnade_2", + "Config/autostyle/scope": "line_breaks", + "Config/autostyle/strict": "true", + "Config/gha/extra-packages": "units=?ignore-before-r=4.3.0", + "Config/Needs/website": "tidyverse/tidytemplate", + "NeedsCompilation": "no", + "Author": "Kirill Müller [aut, cre] (ORCID: ), Hadley Wickham [aut], RStudio [cph]", + "Maintainer": "Kirill Müller ", + "Repository": "CRAN" + }, + "pkgconfig": { + "Package": "pkgconfig", + "Version": "2.0.3", + "Source": "Repository", + "Title": "Private Configuration for 'R' Packages", + "Author": "Gábor Csárdi", + "Maintainer": "Gábor Csárdi ", + "Description": "Set configuration options on a per-package basis. Options set by a given package only apply to that package, other packages are unaffected.", + "License": "MIT + file LICENSE", + "LazyData": "true", + "Imports": [ + "utils" + ], + "Suggests": [ + "covr", + "testthat", + "disposables (>= 1.0.3)" + ], + "URL": "https://github.com/r-lib/pkgconfig#readme", + "BugReports": "https://github.com/r-lib/pkgconfig/issues", + "Encoding": "UTF-8", + "NeedsCompilation": "no", + "Repository": "CRAN" + }, + "prettyunits": { + "Package": "prettyunits", + "Version": "1.2.0", + "Source": "Repository", + "Title": "Pretty, Human Readable Formatting of Quantities", + "Authors@R": "c( person(\"Gabor\", \"Csardi\", email=\"csardi.gabor@gmail.com\", role=c(\"aut\", \"cre\")), person(\"Bill\", \"Denney\", email=\"wdenney@humanpredictions.com\", role=c(\"ctb\"), comment=c(ORCID=\"0000-0002-5759-428X\")), person(\"Christophe\", \"Regouby\", email=\"christophe.regouby@free.fr\", role=c(\"ctb\")) )", + "Description": "Pretty, human readable formatting of quantities. Time intervals: '1337000' -> '15d 11h 23m 20s'. Vague time intervals: '2674000' -> 'about a month ago'. Bytes: '1337' -> '1.34 kB'. Rounding: '99' with 3 significant digits -> '99.0' p-values: '0.00001' -> '<0.0001'. Colors: '#FF0000' -> 'red'. Quantities: '1239437' -> '1.24 M'.", + "License": "MIT + file LICENSE", + "URL": "https://github.com/r-lib/prettyunits", + "BugReports": "https://github.com/r-lib/prettyunits/issues", + "Depends": [ + "R(>= 2.10)" + ], + "Suggests": [ + "codetools", + "covr", + "testthat" + ], + "RoxygenNote": "7.2.3", + "Encoding": "UTF-8", + "NeedsCompilation": "no", + "Author": "Gabor Csardi [aut, cre], Bill Denney [ctb] (), Christophe Regouby [ctb]", + "Maintainer": "Gabor Csardi ", + "Repository": "CRAN" + }, + "progress": { + "Package": "progress", + "Version": "1.2.3", + "Source": "Repository", + "Title": "Terminal Progress Bars", + "Authors@R": "c( person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Rich\", \"FitzJohn\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "Configurable Progress bars, they may include percentage, elapsed time, and/or the estimated completion time. They work in terminals, in 'Emacs' 'ESS', 'RStudio', 'Windows' 'Rgui' and the 'macOS' 'R.app'. The package also provides a 'C++' 'API', that works with or without 'Rcpp'.", + "License": "MIT + file LICENSE", + "URL": "https://github.com/r-lib/progress#readme, http://r-lib.github.io/progress/", + "BugReports": "https://github.com/r-lib/progress/issues", + "Depends": [ + "R (>= 3.6)" + ], + "Imports": [ + "crayon", + "hms", + "prettyunits", + "R6" + ], + "Suggests": [ + "Rcpp", + "testthat (>= 3.0.0)", + "withr" + ], + "Config/Needs/website": "tidyverse/tidytemplate", + "Config/testthat/edition": "3", + "Encoding": "UTF-8", + "RoxygenNote": "7.2.3", + "NeedsCompilation": "no", + "Author": "Gábor Csárdi [aut, cre], Rich FitzJohn [aut], Posit Software, PBC [cph, fnd]", + "Maintainer": "Gábor Csárdi ", + "Repository": "CRAN" + }, + "purrr": { + "Package": "purrr", + "Version": "1.1.0", + "Source": "Repository", + "Title": "Functional Programming Tools", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Lionel\", \"Henry\", , \"lionel@posit.co\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\"), comment = c(ROR = \"https://ror.org/03wc8by49\")) )", + "Description": "A complete and consistent functional programming toolkit for R.", + "License": "MIT + file LICENSE", + "URL": "https://purrr.tidyverse.org/, https://github.com/tidyverse/purrr", + "BugReports": "https://github.com/tidyverse/purrr/issues", + "Depends": [ + "R (>= 4.1)" + ], + "Imports": [ + "cli (>= 3.6.1)", + "lifecycle (>= 1.0.3)", + "magrittr (>= 1.5.0)", + "rlang (>= 1.1.1)", + "vctrs (>= 0.6.3)" + ], + "Suggests": [ + "carrier (>= 0.2.0)", + "covr", + "dplyr (>= 0.7.8)", + "httr", + "knitr", + "lubridate", + "mirai (>= 2.4.0)", + "rmarkdown", + "testthat (>= 3.0.0)", + "tibble", + "tidyselect" + ], + "LinkingTo": [ + "cli" + ], + "VignetteBuilder": "knitr", + "Biarch": "true", + "Config/build/compilation-database": "true", + "Config/Needs/website": "tidyverse/tidytemplate, tidyr", + "Config/testthat/edition": "3", + "Config/testthat/parallel": "TRUE", + "Encoding": "UTF-8", + "RoxygenNote": "7.3.2", + "NeedsCompilation": "yes", + "Author": "Hadley Wickham [aut, cre] (ORCID: ), Lionel Henry [aut], Posit Software, PBC [cph, fnd] (ROR: )", + "Maintainer": "Hadley Wickham ", + "Repository": "CRAN" + }, + "rappdirs": { + "Package": "rappdirs", + "Version": "0.3.3", + "Source": "Repository", + "Type": "Package", + "Title": "Application Directories: Determine Where to Save Data, Caches, and Logs", + "Authors@R": "c(person(given = \"Hadley\", family = \"Wickham\", role = c(\"trl\", \"cre\", \"cph\"), email = \"hadley@rstudio.com\"), person(given = \"RStudio\", role = \"cph\"), person(given = \"Sridhar\", family = \"Ratnakumar\", role = \"aut\"), person(given = \"Trent\", family = \"Mick\", role = \"aut\"), person(given = \"ActiveState\", role = \"cph\", comment = \"R/appdir.r, R/cache.r, R/data.r, R/log.r translated from appdirs\"), person(given = \"Eddy\", family = \"Petrisor\", role = \"ctb\"), person(given = \"Trevor\", family = \"Davis\", role = c(\"trl\", \"aut\")), person(given = \"Gabor\", family = \"Csardi\", role = \"ctb\"), person(given = \"Gregory\", family = \"Jefferis\", role = \"ctb\"))", + "Description": "An easy way to determine which directories on the users computer you should use to save data, caches and logs. A port of Python's 'Appdirs' () to R.", + "License": "MIT + file LICENSE", + "URL": "https://rappdirs.r-lib.org, https://github.com/r-lib/rappdirs", + "BugReports": "https://github.com/r-lib/rappdirs/issues", + "Depends": [ + "R (>= 3.2)" + ], + "Suggests": [ + "roxygen2", + "testthat (>= 3.0.0)", + "covr", + "withr" + ], + "Copyright": "Original python appdirs module copyright (c) 2010 ActiveState Software Inc. R port copyright Hadley Wickham, RStudio. See file LICENSE for details.", + "Encoding": "UTF-8", + "RoxygenNote": "7.1.1", + "Config/testthat/edition": "3", + "NeedsCompilation": "yes", + "Author": "Hadley Wickham [trl, cre, cph], RStudio [cph], Sridhar Ratnakumar [aut], Trent Mick [aut], ActiveState [cph] (R/appdir.r, R/cache.r, R/data.r, R/log.r translated from appdirs), Eddy Petrisor [ctb], Trevor Davis [trl, aut], Gabor Csardi [ctb], Gregory Jefferis [ctb]", + "Maintainer": "Hadley Wickham ", + "Repository": "CRAN" + }, + "readr": { + "Package": "readr", + "Version": "2.1.5", + "Source": "Repository", + "Title": "Read Rectangular Text Data", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = \"aut\"), person(\"Jim\", \"Hester\", role = \"aut\"), person(\"Romain\", \"Francois\", role = \"ctb\"), person(\"Jennifer\", \"Bryan\", , \"jenny@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0002-6983-2759\")), person(\"Shelby\", \"Bearrows\", role = \"ctb\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(\"https://github.com/mandreyel/\", role = \"cph\", comment = \"mio library\"), person(\"Jukka\", \"Jylänki\", role = c(\"ctb\", \"cph\"), comment = \"grisu3 implementation\"), person(\"Mikkel\", \"Jørgensen\", role = c(\"ctb\", \"cph\"), comment = \"grisu3 implementation\") )", + "Description": "The goal of 'readr' is to provide a fast and friendly way to read rectangular data (like 'csv', 'tsv', and 'fwf'). It is designed to flexibly parse many types of data found in the wild, while still cleanly failing when data unexpectedly changes.", + "License": "MIT + file LICENSE", + "URL": "https://readr.tidyverse.org, https://github.com/tidyverse/readr", + "BugReports": "https://github.com/tidyverse/readr/issues", + "Depends": [ + "R (>= 3.6)" + ], + "Imports": [ + "cli (>= 3.2.0)", + "clipr", + "crayon", + "hms (>= 0.4.1)", + "lifecycle (>= 0.2.0)", + "methods", + "R6", + "rlang", + "tibble", + "utils", + "vroom (>= 1.6.0)" + ], + "Suggests": [ + "covr", + "curl", + "datasets", + "knitr", + "rmarkdown", + "spelling", + "stringi", + "testthat (>= 3.2.0)", + "tzdb (>= 0.1.1)", + "waldo", + "withr", + "xml2" + ], + "LinkingTo": [ + "cpp11", + "tzdb (>= 0.1.1)" + ], + "VignetteBuilder": "knitr", + "Config/Needs/website": "tidyverse, tidyverse/tidytemplate", + "Config/testthat/edition": "3", + "Config/testthat/parallel": "false", + "Encoding": "UTF-8", + "Language": "en-US", + "RoxygenNote": "7.2.3", + "NeedsCompilation": "yes", + "Author": "Hadley Wickham [aut], Jim Hester [aut], Romain Francois [ctb], Jennifer Bryan [aut, cre] (), Shelby Bearrows [ctb], Posit Software, PBC [cph, fnd], https://github.com/mandreyel/ [cph] (mio library), Jukka Jylänki [ctb, cph] (grisu3 implementation), Mikkel Jørgensen [ctb, cph] (grisu3 implementation)", + "Maintainer": "Jennifer Bryan ", + "Repository": "RSPM" + }, + "renv": { + "Package": "renv", + "Version": "1.1.4", + "Source": "Repository", + "Type": "Package", + "Title": "Project Environments", + "Authors@R": "c( person(\"Kevin\", \"Ushey\", role = c(\"aut\", \"cre\"), email = \"kevin@rstudio.com\", comment = c(ORCID = \"0000-0003-2880-7407\")), person(\"Hadley\", \"Wickham\", role = c(\"aut\"), email = \"hadley@rstudio.com\", comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "A dependency management toolkit for R. Using 'renv', you can create and manage project-local R libraries, save the state of these libraries to a 'lockfile', and later restore your library as required. Together, these tools can help make your projects more isolated, portable, and reproducible.", + "License": "MIT + file LICENSE", + "URL": "https://rstudio.github.io/renv/, https://github.com/rstudio/renv", + "BugReports": "https://github.com/rstudio/renv/issues", + "Imports": [ + "utils" + ], + "Suggests": [ + "BiocManager", + "cli", + "compiler", + "covr", + "cpp11", + "devtools", + "gitcreds", + "jsonlite", + "jsonvalidate", + "knitr", + "miniUI", + "modules", + "packrat", + "pak", + "R6", + "remotes", + "reticulate", + "rmarkdown", + "rstudioapi", + "shiny", + "testthat", + "uuid", + "waldo", + "yaml", + "webfakes" + ], + "Encoding": "UTF-8", + "RoxygenNote": "7.3.2", + "VignetteBuilder": "knitr", + "Config/Needs/website": "tidyverse/tidytemplate", + "Config/testthat/edition": "3", + "Config/testthat/parallel": "true", + "Config/testthat/start-first": "bioconductor,python,install,restore,snapshot,retrieve,remotes", + "NeedsCompilation": "no", + "Author": "Kevin Ushey [aut, cre] (), Hadley Wickham [aut] (), Posit Software, PBC [cph, fnd]", + "Maintainer": "Kevin Ushey ", + "Repository": "CRAN" + }, + "rlang": { + "Package": "rlang", + "Version": "1.1.6", + "Source": "Repository", + "Title": "Functions for Base Types and Core R and 'Tidyverse' Features", + "Description": "A toolbox for working with base types, core R features like the condition system, and core 'Tidyverse' features like tidy evaluation.", + "Authors@R": "c( person(\"Lionel\", \"Henry\", ,\"lionel@posit.co\", c(\"aut\", \"cre\")), person(\"Hadley\", \"Wickham\", ,\"hadley@posit.co\", \"aut\"), person(given = \"mikefc\", email = \"mikefc@coolbutuseless.com\", role = \"cph\", comment = \"Hash implementation based on Mike's xxhashlite\"), person(given = \"Yann\", family = \"Collet\", role = \"cph\", comment = \"Author of the embedded xxHash library\"), person(given = \"Posit, PBC\", role = c(\"cph\", \"fnd\")) )", + "License": "MIT + file LICENSE", + "ByteCompile": "true", + "Biarch": "true", + "Depends": [ + "R (>= 3.5.0)" + ], + "Imports": [ + "utils" + ], + "Suggests": [ + "cli (>= 3.1.0)", + "covr", + "crayon", + "desc", + "fs", + "glue", + "knitr", + "magrittr", + "methods", + "pillar", + "pkgload", + "rmarkdown", + "stats", + "testthat (>= 3.2.0)", + "tibble", + "usethis", + "vctrs (>= 0.2.3)", + "withr" + ], + "Enhances": [ + "winch" + ], + "Encoding": "UTF-8", + "RoxygenNote": "7.3.2", + "URL": "https://rlang.r-lib.org, https://github.com/r-lib/rlang", + "BugReports": "https://github.com/r-lib/rlang/issues", + "Config/build/compilation-database": "true", + "Config/testthat/edition": "3", + "Config/Needs/website": "dplyr, tidyverse/tidytemplate", + "NeedsCompilation": "yes", + "Author": "Lionel Henry [aut, cre], Hadley Wickham [aut], mikefc [cph] (Hash implementation based on Mike's xxhashlite), Yann Collet [cph] (Author of the embedded xxHash library), Posit, PBC [cph, fnd]", + "Maintainer": "Lionel Henry ", + "Repository": "CRAN" + }, + "rvest": { + "Package": "rvest", + "Version": "1.0.4", + "Source": "Repository", + "Title": "Easily Harvest (Scrape) Web Pages", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "Wrappers around the 'xml2' and 'httr' packages to make it easy to download, then manipulate, HTML and XML.", + "License": "MIT + file LICENSE", + "URL": "https://rvest.tidyverse.org/, https://github.com/tidyverse/rvest", + "BugReports": "https://github.com/tidyverse/rvest/issues", + "Depends": [ + "R (>= 3.6)" + ], + "Imports": [ + "cli", + "glue", + "httr (>= 0.5)", + "lifecycle (>= 1.0.3)", + "magrittr", + "rlang (>= 1.1.0)", + "selectr", + "tibble", + "xml2 (>= 1.3)" + ], + "Suggests": [ + "chromote", + "covr", + "knitr", + "R6", + "readr", + "repurrrsive", + "rmarkdown", + "spelling", + "stringi (>= 0.3.1)", + "testthat (>= 3.0.2)", + "webfakes" + ], + "VignetteBuilder": "knitr", + "Config/Needs/website": "tidyverse/tidytemplate", + "Config/testthat/edition": "3", + "Config/testthat/parallel": "true", + "Encoding": "UTF-8", + "Language": "en-US", + "RoxygenNote": "7.3.1", + "NeedsCompilation": "no", + "Author": "Hadley Wickham [aut, cre], Posit Software, PBC [cph, fnd]", + "Maintainer": "Hadley Wickham ", + "Repository": "CRAN" + }, + "selectr": { + "Package": "selectr", + "Version": "0.4-2", + "Source": "Repository", + "Type": "Package", + "Title": "Translate CSS Selectors to XPath Expressions", + "Date": "2019-11-20", + "Authors@R": "c(person(\"Simon\", \"Potter\", role = c(\"aut\", \"trl\", \"cre\"), email = \"simon@sjp.co.nz\"), person(\"Simon\", \"Sapin\", role = \"aut\"), person(\"Ian\", \"Bicking\", role = \"aut\"))", + "License": "BSD_3_clause + file LICENCE", + "Depends": [ + "R (>= 3.0)" + ], + "Imports": [ + "methods", + "stringr", + "R6" + ], + "Suggests": [ + "testthat", + "XML", + "xml2" + ], + "URL": "https://sjp.co.nz/projects/selectr", + "BugReports": "https://github.com/sjp/selectr/issues", + "Description": "Translates a CSS3 selector into an equivalent XPath expression. This allows us to use CSS selectors when working with the XML package as it can only evaluate XPath expressions. Also provided are convenience functions useful for using CSS selectors on XML nodes. This package is a port of the Python package 'cssselect' ().", + "NeedsCompilation": "no", + "Author": "Simon Potter [aut, trl, cre], Simon Sapin [aut], Ian Bicking [aut]", + "Maintainer": "Simon Potter ", + "Repository": "CRAN" + }, + "snakecase": { + "Package": "snakecase", + "Version": "0.11.1", + "Source": "Repository", + "Date": "2023-08-27", + "Title": "Convert Strings into any Case", + "Description": "A consistent, flexible and easy to use tool to parse and convert strings into cases like snake or camel among others.", + "Authors@R": "c( person(\"Malte\", \"Grosser\", , \"malte.grosser@gmail.com\", role = c(\"aut\", \"cre\")))", + "Maintainer": "Malte Grosser ", + "Depends": [ + "R (>= 3.2)" + ], + "Imports": [ + "stringr", + "stringi" + ], + "Suggests": [ + "testthat", + "covr", + "tibble", + "purrrlyr", + "knitr", + "rmarkdown", + "magrittr" + ], + "URL": "https://github.com/Tazinho/snakecase", + "BugReports": "https://github.com/Tazinho/snakecase/issues", + "Encoding": "UTF-8", + "License": "GPL-3", + "RoxygenNote": "6.1.1", + "VignetteBuilder": "knitr", + "NeedsCompilation": "no", + "Author": "Malte Grosser [aut, cre]", + "Repository": "CRAN" + }, + "stringi": { + "Package": "stringi", + "Version": "1.8.7", + "Source": "Repository", + "Date": "2025-03-27", + "Title": "Fast and Portable Character String Processing Facilities", + "Description": "A collection of character string/text/natural language processing tools for pattern searching (e.g., with 'Java'-like regular expressions or the 'Unicode' collation algorithm), random string generation, case mapping, string transliteration, concatenation, sorting, padding, wrapping, Unicode normalisation, date-time formatting and parsing, and many more. They are fast, consistent, convenient, and - thanks to 'ICU' (International Components for Unicode) - portable across all locales and platforms. Documentation about 'stringi' is provided via its website at and the paper by Gagolewski (2022, ).", + "URL": "https://stringi.gagolewski.com/, https://github.com/gagolews/stringi, https://icu.unicode.org/", + "BugReports": "https://github.com/gagolews/stringi/issues", + "SystemRequirements": "ICU4C (>= 61, optional)", + "Type": "Package", + "Depends": [ + "R (>= 3.4)" + ], + "Imports": [ + "tools", + "utils", + "stats" + ], + "Biarch": "TRUE", + "License": "file LICENSE", + "Authors@R": "c(person(given = \"Marek\", family = \"Gagolewski\", role = c(\"aut\", \"cre\", \"cph\"), email = \"marek@gagolewski.com\", comment = c(ORCID = \"0000-0003-0637-6028\")), person(given = \"Bartek\", family = \"Tartanus\", role = \"ctb\"), person(\"Unicode, Inc. and others\", role=\"ctb\", comment = \"ICU4C source code, Unicode Character Database\") )", + "RoxygenNote": "7.3.2", + "Encoding": "UTF-8", + "NeedsCompilation": "yes", + "Author": "Marek Gagolewski [aut, cre, cph] (), Bartek Tartanus [ctb], Unicode, Inc. and others [ctb] (ICU4C source code, Unicode Character Database)", + "Maintainer": "Marek Gagolewski ", + "License_is_FOSS": "yes", + "Repository": "CRAN" + }, + "stringr": { + "Package": "stringr", + "Version": "1.5.1", + "Source": "Repository", + "Title": "Simple, Consistent Wrappers for Common String Operations", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\", \"cph\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "A consistent, simple and easy to use set of wrappers around the fantastic 'stringi' package. All function and argument names (and positions) are consistent, all functions deal with \"NA\"'s and zero length vectors in the same way, and the output from one function is easy to feed into the input of another.", + "License": "MIT + file LICENSE", + "URL": "https://stringr.tidyverse.org, https://github.com/tidyverse/stringr", + "BugReports": "https://github.com/tidyverse/stringr/issues", + "Depends": [ + "R (>= 3.6)" + ], + "Imports": [ + "cli", + "glue (>= 1.6.1)", + "lifecycle (>= 1.0.3)", + "magrittr", + "rlang (>= 1.0.0)", + "stringi (>= 1.5.3)", + "vctrs (>= 0.4.0)" + ], + "Suggests": [ + "covr", + "dplyr", + "gt", + "htmltools", + "htmlwidgets", + "knitr", + "rmarkdown", + "testthat (>= 3.0.0)", + "tibble" + ], + "VignetteBuilder": "knitr", + "Config/Needs/website": "tidyverse/tidytemplate", + "Config/testthat/edition": "3", + "Encoding": "UTF-8", + "LazyData": "true", + "RoxygenNote": "7.2.3", + "NeedsCompilation": "no", + "Author": "Hadley Wickham [aut, cre, cph], Posit Software, PBC [cph, fnd]", + "Maintainer": "Hadley Wickham ", + "Repository": "CRAN" + }, + "sys": { + "Package": "sys", + "Version": "3.4.3", + "Source": "Repository", + "Type": "Package", + "Title": "Powerful and Reliable Tools for Running System Commands in R", + "Authors@R": "c(person(\"Jeroen\", \"Ooms\", role = c(\"aut\", \"cre\"), email = \"jeroenooms@gmail.com\", comment = c(ORCID = \"0000-0002-4035-0289\")), person(\"Gábor\", \"Csárdi\", , \"csardi.gabor@gmail.com\", role = \"ctb\"))", + "Description": "Drop-in replacements for the base system2() function with fine control and consistent behavior across platforms. Supports clean interruption, timeout, background tasks, and streaming STDIN / STDOUT / STDERR over binary or text connections. Arguments on Windows automatically get encoded and quoted to work on different locales.", + "License": "MIT + file LICENSE", + "URL": "https://jeroen.r-universe.dev/sys", + "BugReports": "https://github.com/jeroen/sys/issues", + "Encoding": "UTF-8", + "RoxygenNote": "7.1.1", + "Suggests": [ + "unix (>= 1.4)", + "spelling", + "testthat" + ], + "Language": "en-US", + "NeedsCompilation": "yes", + "Author": "Jeroen Ooms [aut, cre] (), Gábor Csárdi [ctb]", + "Maintainer": "Jeroen Ooms ", + "Repository": "CRAN" + }, + "tibble": { + "Package": "tibble", + "Version": "3.3.0", + "Source": "Repository", + "Title": "Simple Data Frames", + "Authors@R": "c(person(given = \"Kirill\", family = \"M\\u00fcller\", role = c(\"aut\", \"cre\"), email = \"kirill@cynkra.com\", comment = c(ORCID = \"0000-0002-1416-3412\")), person(given = \"Hadley\", family = \"Wickham\", role = \"aut\", email = \"hadley@rstudio.com\"), person(given = \"Romain\", family = \"Francois\", role = \"ctb\", email = \"romain@r-enthusiasts.com\"), person(given = \"Jennifer\", family = \"Bryan\", role = \"ctb\", email = \"jenny@rstudio.com\"), person(given = \"RStudio\", role = c(\"cph\", \"fnd\")))", + "Description": "Provides a 'tbl_df' class (the 'tibble') with stricter checking and better formatting than the traditional data frame.", + "License": "MIT + file LICENSE", + "URL": "https://tibble.tidyverse.org/, https://github.com/tidyverse/tibble", + "BugReports": "https://github.com/tidyverse/tibble/issues", + "Depends": [ + "R (>= 3.4.0)" + ], + "Imports": [ + "cli", + "lifecycle (>= 1.0.0)", + "magrittr", + "methods", + "pillar (>= 1.8.1)", + "pkgconfig", + "rlang (>= 1.0.2)", + "utils", + "vctrs (>= 0.5.0)" + ], + "Suggests": [ + "bench", + "bit64", + "blob", + "brio", + "callr", + "DiagrammeR", + "dplyr", + "evaluate", + "formattable", + "ggplot2", + "here", + "hms", + "htmltools", + "knitr", + "lubridate", + "nycflights13", + "pkgload", + "purrr", + "rmarkdown", + "stringi", + "testthat (>= 3.0.2)", + "tidyr", + "withr" + ], + "VignetteBuilder": "knitr", + "Encoding": "UTF-8", + "RoxygenNote": "7.3.2.9000", + "Config/testthat/edition": "3", + "Config/testthat/parallel": "true", + "Config/testthat/start-first": "vignette-formats, as_tibble, add, invariants", + "Config/autostyle/scope": "line_breaks", + "Config/autostyle/strict": "true", + "Config/autostyle/rmd": "false", + "Config/Needs/website": "tidyverse/tidytemplate", + "NeedsCompilation": "yes", + "Author": "Kirill Müller [aut, cre] (ORCID: ), Hadley Wickham [aut], Romain Francois [ctb], Jennifer Bryan [ctb], RStudio [cph, fnd]", + "Maintainer": "Kirill Müller ", + "Repository": "CRAN" + }, + "tidyr": { + "Package": "tidyr", + "Version": "1.3.1", + "Source": "Repository", + "Title": "Tidy Messy Data", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = c(\"aut\", \"cre\")), person(\"Davis\", \"Vaughan\", , \"davis@posit.co\", role = \"aut\"), person(\"Maximilian\", \"Girlich\", role = \"aut\"), person(\"Kevin\", \"Ushey\", , \"kevin@posit.co\", role = \"ctb\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "Tools to help to create tidy data, where each column is a variable, each row is an observation, and each cell contains a single value. 'tidyr' contains tools for changing the shape (pivoting) and hierarchy (nesting and 'unnesting') of a dataset, turning deeply nested lists into rectangular data frames ('rectangling'), and extracting values out of string columns. It also includes tools for working with missing values (both implicit and explicit).", + "License": "MIT + file LICENSE", + "URL": "https://tidyr.tidyverse.org, https://github.com/tidyverse/tidyr", + "BugReports": "https://github.com/tidyverse/tidyr/issues", + "Depends": [ + "R (>= 3.6)" + ], + "Imports": [ + "cli (>= 3.4.1)", + "dplyr (>= 1.0.10)", + "glue", + "lifecycle (>= 1.0.3)", + "magrittr", + "purrr (>= 1.0.1)", + "rlang (>= 1.1.1)", + "stringr (>= 1.5.0)", + "tibble (>= 2.1.1)", + "tidyselect (>= 1.2.0)", + "utils", + "vctrs (>= 0.5.2)" + ], + "Suggests": [ + "covr", + "data.table", + "knitr", + "readr", + "repurrrsive (>= 1.1.0)", + "rmarkdown", + "testthat (>= 3.0.0)" + ], + "LinkingTo": [ + "cpp11 (>= 0.4.0)" + ], + "VignetteBuilder": "knitr", + "Config/Needs/website": "tidyverse/tidytemplate", + "Config/testthat/edition": "3", + "Encoding": "UTF-8", + "LazyData": "true", + "RoxygenNote": "7.3.0", + "NeedsCompilation": "yes", + "Author": "Hadley Wickham [aut, cre], Davis Vaughan [aut], Maximilian Girlich [aut], Kevin Ushey [ctb], Posit Software, PBC [cph, fnd]", + "Maintainer": "Hadley Wickham ", + "Repository": "CRAN" + }, + "tidyselect": { + "Package": "tidyselect", + "Version": "1.2.1", + "Source": "Repository", + "Title": "Select from a Set of Strings", + "Authors@R": "c( person(\"Lionel\", \"Henry\", , \"lionel@posit.co\", role = c(\"aut\", \"cre\")), person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = \"aut\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "A backend for the selecting functions of the 'tidyverse'. It makes it easy to implement select-like functions in your own packages in a way that is consistent with other 'tidyverse' interfaces for selection.", + "License": "MIT + file LICENSE", + "URL": "https://tidyselect.r-lib.org, https://github.com/r-lib/tidyselect", + "BugReports": "https://github.com/r-lib/tidyselect/issues", + "Depends": [ + "R (>= 3.4)" + ], + "Imports": [ + "cli (>= 3.3.0)", + "glue (>= 1.3.0)", + "lifecycle (>= 1.0.3)", + "rlang (>= 1.0.4)", + "vctrs (>= 0.5.2)", + "withr" + ], + "Suggests": [ + "covr", + "crayon", + "dplyr", + "knitr", + "magrittr", + "rmarkdown", + "stringr", + "testthat (>= 3.1.1)", + "tibble (>= 2.1.3)" + ], + "VignetteBuilder": "knitr", + "ByteCompile": "true", + "Config/testthat/edition": "3", + "Config/Needs/website": "tidyverse/tidytemplate", + "Encoding": "UTF-8", + "RoxygenNote": "7.3.0.9000", + "NeedsCompilation": "yes", + "Author": "Lionel Henry [aut, cre], Hadley Wickham [aut], Posit Software, PBC [cph, fnd]", + "Maintainer": "Lionel Henry ", + "Repository": "CRAN" + }, + "tidytable": { + "Package": "tidytable", + "Version": "0.11.2", + "Source": "Repository", + "Title": "Tidy Interface to 'data.table'", + "Authors@R": "c( person(\"Mark\", \"Fairbanks\", role = c(\"aut\", \"cre\"), email = \"mark.t.fairbanks@gmail.com\"), person(\"Abdessabour\", \"Moutik\", role = \"ctb\"), person(\"Matt\", \"Carlson\", role = \"ctb\"), person(\"Ivan\", \"Leung\", role = \"ctb\"), person(\"Ross\", \"Kennedy\", role = \"ctb\"), person(\"Robert\", \"On\", role = \"ctb\"), person(\"Alexander\", \"Sevostianov\", role = \"ctb\"), person(\"Koen\", \"ter Berg\", role = \"ctb\") )", + "Description": "A tidy interface to 'data.table', giving users the speed of 'data.table' while using tidyverse-like syntax.", + "License": "MIT + file LICENSE", + "Encoding": "UTF-8", + "Imports": [ + "data.table (>= 1.16.0)", + "glue (>= 1.4.0)", + "lifecycle (>= 1.0.3)", + "magrittr (>= 2.0.3)", + "pillar (>= 1.8.0)", + "rlang (>= 1.1.0)", + "tidyselect (>= 1.2.0)", + "vctrs (>= 0.6.0)" + ], + "RoxygenNote": "7.3.2", + "Config/testthat/edition": "3", + "URL": "https://markfairbanks.github.io/tidytable/, https://github.com/markfairbanks/tidytable", + "BugReports": "https://github.com/markfairbanks/tidytable/issues", + "Suggests": [ + "testthat (>= 2.1.0)", + "bit64", + "knitr", + "rmarkdown", + "crayon" + ], + "NeedsCompilation": "no", + "Author": "Mark Fairbanks [aut, cre], Abdessabour Moutik [ctb], Matt Carlson [ctb], Ivan Leung [ctb], Ross Kennedy [ctb], Robert On [ctb], Alexander Sevostianov [ctb], Koen ter Berg [ctb]", + "Maintainer": "Mark Fairbanks ", + "Repository": "CRAN" + }, + "timechange": { + "Package": "timechange", + "Version": "0.3.0", + "Source": "Repository", + "Title": "Efficient Manipulation of Date-Times", + "Authors@R": "c(person(\"Vitalie\", \"Spinu\", email = \"spinuvit@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Google Inc.\", role = c(\"ctb\", \"cph\")))", + "Description": "Efficient routines for manipulation of date-time objects while accounting for time-zones and daylight saving times. The package includes utilities for updating of date-time components (year, month, day etc.), modification of time-zones, rounding of date-times, period addition and subtraction etc. Parts of the 'CCTZ' source code, released under the Apache 2.0 License, are included in this package. See for more details.", + "Depends": [ + "R (>= 3.3)" + ], + "License": "GPL (>= 3)", + "Encoding": "UTF-8", + "LinkingTo": [ + "cpp11 (>= 0.2.7)" + ], + "Suggests": [ + "testthat (>= 0.7.1.99)", + "knitr" + ], + "SystemRequirements": "A system with zoneinfo data (e.g. /usr/share/zoneinfo) as well as a recent-enough C++11 compiler (such as g++-4.8 or later). On Windows the zoneinfo included with R is used.", + "BugReports": "https://github.com/vspinu/timechange/issues", + "URL": "https://github.com/vspinu/timechange/", + "RoxygenNote": "7.2.1", + "NeedsCompilation": "yes", + "Author": "Vitalie Spinu [aut, cre], Google Inc. [ctb, cph]", + "Maintainer": "Vitalie Spinu ", + "Repository": "CRAN" + }, + "tzdb": { + "Package": "tzdb", + "Version": "0.5.0", + "Source": "Repository", + "Title": "Time Zone Database Information", + "Authors@R": "c( person(\"Davis\", \"Vaughan\", , \"davis@posit.co\", role = c(\"aut\", \"cre\")), person(\"Howard\", \"Hinnant\", role = \"cph\", comment = \"Author of the included date library\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "Provides an up-to-date copy of the Internet Assigned Numbers Authority (IANA) Time Zone Database. It is updated periodically to reflect changes made by political bodies to time zone boundaries, UTC offsets, and daylight saving time rules. Additionally, this package provides a C++ interface for working with the 'date' library. 'date' provides comprehensive support for working with dates and date-times, which this package exposes to make it easier for other R packages to utilize. Headers are provided for calendar specific calculations, along with a limited interface for time zone manipulations.", + "License": "MIT + file LICENSE", + "URL": "https://tzdb.r-lib.org, https://github.com/r-lib/tzdb", + "BugReports": "https://github.com/r-lib/tzdb/issues", + "Depends": [ + "R (>= 4.0.0)" + ], + "Suggests": [ + "covr", + "testthat (>= 3.0.0)" + ], + "LinkingTo": [ + "cpp11 (>= 0.5.2)" + ], + "Biarch": "yes", + "Config/Needs/website": "tidyverse/tidytemplate", + "Config/testthat/edition": "3", + "Encoding": "UTF-8", + "RoxygenNote": "7.3.2", + "NeedsCompilation": "yes", + "Author": "Davis Vaughan [aut, cre], Howard Hinnant [cph] (Author of the included date library), Posit Software, PBC [cph, fnd]", + "Maintainer": "Davis Vaughan ", + "Repository": "CRAN" + }, + "utf8": { + "Package": "utf8", + "Version": "1.2.6", + "Source": "Repository", + "Title": "Unicode Text Processing", + "Authors@R": "c(person(given = c(\"Patrick\", \"O.\"), family = \"Perry\", role = c(\"aut\", \"cph\")), person(given = \"Kirill\", family = \"M\\u00fcller\", role = \"cre\", email = \"kirill@cynkra.com\", comment = c(ORCID = \"0000-0002-1416-3412\")), person(given = \"Unicode, Inc.\", role = c(\"cph\", \"dtc\"), comment = \"Unicode Character Database\"))", + "Description": "Process and print 'UTF-8' encoded international text (Unicode). Input, validate, normalize, encode, format, and display.", + "License": "Apache License (== 2.0) | file LICENSE", + "URL": "https://krlmlr.github.io/utf8/, https://github.com/krlmlr/utf8", + "BugReports": "https://github.com/krlmlr/utf8/issues", + "Depends": [ + "R (>= 2.10)" + ], + "Suggests": [ + "cli", + "covr", + "knitr", + "rlang", + "rmarkdown", + "testthat (>= 3.0.0)", + "withr" + ], + "VignetteBuilder": "knitr, rmarkdown", + "Config/testthat/edition": "3", + "Encoding": "UTF-8", + "RoxygenNote": "7.3.2.9000", + "NeedsCompilation": "yes", + "Author": "Patrick O. Perry [aut, cph], Kirill Müller [cre] (ORCID: ), Unicode, Inc. [cph, dtc] (Unicode Character Database)", + "Maintainer": "Kirill Müller ", + "Repository": "CRAN" + }, + "vctrs": { + "Package": "vctrs", + "Version": "0.6.5", + "Source": "Repository", + "Title": "Vector Helpers", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = \"aut\"), person(\"Lionel\", \"Henry\", , \"lionel@posit.co\", role = \"aut\"), person(\"Davis\", \"Vaughan\", , \"davis@posit.co\", role = c(\"aut\", \"cre\")), person(\"data.table team\", role = \"cph\", comment = \"Radix sort based on data.table's forder() and their contribution to R's order()\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "Defines new notions of prototype and size that are used to provide tools for consistent and well-founded type-coercion and size-recycling, and are in turn connected to ideas of type- and size-stability useful for analysing function interfaces.", + "License": "MIT + file LICENSE", + "URL": "https://vctrs.r-lib.org/, https://github.com/r-lib/vctrs", + "BugReports": "https://github.com/r-lib/vctrs/issues", + "Depends": [ + "R (>= 3.5.0)" + ], + "Imports": [ + "cli (>= 3.4.0)", + "glue", + "lifecycle (>= 1.0.3)", + "rlang (>= 1.1.0)" + ], + "Suggests": [ + "bit64", + "covr", + "crayon", + "dplyr (>= 0.8.5)", + "generics", + "knitr", + "pillar (>= 1.4.4)", + "pkgdown (>= 2.0.1)", + "rmarkdown", + "testthat (>= 3.0.0)", + "tibble (>= 3.1.3)", + "waldo (>= 0.2.0)", + "withr", + "xml2", + "zeallot" + ], + "VignetteBuilder": "knitr", + "Config/Needs/website": "tidyverse/tidytemplate", + "Config/testthat/edition": "3", + "Encoding": "UTF-8", + "Language": "en-GB", + "RoxygenNote": "7.2.3", + "NeedsCompilation": "yes", + "Author": "Hadley Wickham [aut], Lionel Henry [aut], Davis Vaughan [aut, cre], data.table team [cph] (Radix sort based on data.table's forder() and their contribution to R's order()), Posit Software, PBC [cph, fnd]", + "Maintainer": "Davis Vaughan ", + "Repository": "CRAN" + }, + "vroom": { + "Package": "vroom", + "Version": "1.6.5", + "Source": "Repository", + "Title": "Read and Write Rectangular Text Data Quickly", + "Authors@R": "c( person(\"Jim\", \"Hester\", role = \"aut\", comment = c(ORCID = \"0000-0002-2739-7082\")), person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = \"aut\", comment = c(ORCID = \"0000-0003-4757-117X\")), person(\"Jennifer\", \"Bryan\", , \"jenny@posit.co\", role = c(\"aut\", \"cre\"), comment = c(ORCID = \"0000-0002-6983-2759\")), person(\"Shelby\", \"Bearrows\", role = \"ctb\"), person(\"https://github.com/mandreyel/\", role = \"cph\", comment = \"mio library\"), person(\"Jukka\", \"Jylänki\", role = \"cph\", comment = \"grisu3 implementation\"), person(\"Mikkel\", \"Jørgensen\", role = \"cph\", comment = \"grisu3 implementation\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "The goal of 'vroom' is to read and write data (like 'csv', 'tsv' and 'fwf') quickly. When reading it uses a quick initial indexing step, then reads the values lazily , so only the data you actually use needs to be read. The writer formats the data in parallel and writes to disk asynchronously from formatting.", + "License": "MIT + file LICENSE", + "URL": "https://vroom.r-lib.org, https://github.com/tidyverse/vroom", + "BugReports": "https://github.com/tidyverse/vroom/issues", + "Depends": [ + "R (>= 3.6)" + ], + "Imports": [ + "bit64", + "cli (>= 3.2.0)", + "crayon", + "glue", + "hms", + "lifecycle (>= 1.0.3)", + "methods", + "rlang (>= 0.4.2)", + "stats", + "tibble (>= 2.0.0)", + "tidyselect", + "tzdb (>= 0.1.1)", + "vctrs (>= 0.2.0)", + "withr" + ], + "Suggests": [ + "archive", + "bench (>= 1.1.0)", + "covr", + "curl", + "dplyr", + "forcats", + "fs", + "ggplot2", + "knitr", + "patchwork", + "prettyunits", + "purrr", + "rmarkdown", + "rstudioapi", + "scales", + "spelling", + "testthat (>= 2.1.0)", + "tidyr", + "utils", + "waldo", + "xml2" + ], + "LinkingTo": [ + "cpp11 (>= 0.2.0)", + "progress (>= 1.2.1)", + "tzdb (>= 0.1.1)" + ], + "VignetteBuilder": "knitr", + "Config/Needs/website": "nycflights13, tidyverse/tidytemplate", + "Config/testthat/edition": "3", + "Config/testthat/parallel": "false", + "Copyright": "file COPYRIGHTS", + "Encoding": "UTF-8", + "Language": "en-US", + "RoxygenNote": "7.2.3.9000", + "NeedsCompilation": "yes", + "Author": "Jim Hester [aut] (), Hadley Wickham [aut] (), Jennifer Bryan [aut, cre] (), Shelby Bearrows [ctb], https://github.com/mandreyel/ [cph] (mio library), Jukka Jylänki [cph] (grisu3 implementation), Mikkel Jørgensen [cph] (grisu3 implementation), Posit Software, PBC [cph, fnd]", + "Maintainer": "Jennifer Bryan ", + "Repository": "RSPM" + }, + "withr": { + "Package": "withr", + "Version": "3.0.2", + "Source": "Repository", + "Title": "Run Code 'With' Temporarily Modified Global State", + "Authors@R": "c( person(\"Jim\", \"Hester\", role = \"aut\"), person(\"Lionel\", \"Henry\", , \"lionel@posit.co\", role = c(\"aut\", \"cre\")), person(\"Kirill\", \"Müller\", , \"krlmlr+r@mailbox.org\", role = \"aut\"), person(\"Kevin\", \"Ushey\", , \"kevinushey@gmail.com\", role = \"aut\"), person(\"Hadley\", \"Wickham\", , \"hadley@posit.co\", role = \"aut\"), person(\"Winston\", \"Chang\", role = \"aut\"), person(\"Jennifer\", \"Bryan\", role = \"ctb\"), person(\"Richard\", \"Cotton\", role = \"ctb\"), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")) )", + "Description": "A set of functions to run code 'with' safely and temporarily modified global state. Many of these functions were originally a part of the 'devtools' package, this provides a simple package with limited dependencies to provide access to these functions.", + "License": "MIT + file LICENSE", + "URL": "https://withr.r-lib.org, https://github.com/r-lib/withr#readme", + "BugReports": "https://github.com/r-lib/withr/issues", + "Depends": [ + "R (>= 3.6.0)" + ], + "Imports": [ + "graphics", + "grDevices" + ], + "Suggests": [ + "callr", + "DBI", + "knitr", + "methods", + "rlang", + "rmarkdown (>= 2.12)", + "RSQLite", + "testthat (>= 3.0.0)" + ], + "VignetteBuilder": "knitr", + "Config/Needs/website": "tidyverse/tidytemplate", + "Config/testthat/edition": "3", + "Encoding": "UTF-8", + "RoxygenNote": "7.3.2", + "Collate": "'aaa.R' 'collate.R' 'connection.R' 'db.R' 'defer-exit.R' 'standalone-defer.R' 'defer.R' 'devices.R' 'local_.R' 'with_.R' 'dir.R' 'env.R' 'file.R' 'language.R' 'libpaths.R' 'locale.R' 'makevars.R' 'namespace.R' 'options.R' 'par.R' 'path.R' 'rng.R' 'seed.R' 'wrap.R' 'sink.R' 'tempfile.R' 'timezone.R' 'torture.R' 'utils.R' 'with.R'", + "NeedsCompilation": "no", + "Author": "Jim Hester [aut], Lionel Henry [aut, cre], Kirill Müller [aut], Kevin Ushey [aut], Hadley Wickham [aut], Winston Chang [aut], Jennifer Bryan [ctb], Richard Cotton [ctb], Posit Software, PBC [cph, fnd]", + "Maintainer": "Lionel Henry ", + "Repository": "CRAN" + }, + "xml2": { + "Package": "xml2", + "Version": "1.3.8", + "Source": "Repository", + "Title": "Parse XML", + "Authors@R": "c( person(\"Hadley\", \"Wickham\", role = \"aut\"), person(\"Jim\", \"Hester\", role = \"aut\"), person(\"Jeroen\", \"Ooms\", email = \"jeroenooms@gmail.com\", role = c(\"aut\", \"cre\")), person(\"Posit Software, PBC\", role = c(\"cph\", \"fnd\")), person(\"R Foundation\", role = \"ctb\", comment = \"Copy of R-project homepage cached as example\") )", + "Description": "Bindings to 'libxml2' for working with XML data using a simple, consistent interface based on 'XPath' expressions. Also supports XML schema validation; for 'XSLT' transformations see the 'xslt' package.", + "License": "MIT + file LICENSE", + "URL": "https://xml2.r-lib.org, https://r-lib.r-universe.dev/xml2", + "BugReports": "https://github.com/r-lib/xml2/issues", + "Depends": [ + "R (>= 3.6.0)" + ], + "Imports": [ + "cli", + "methods", + "rlang (>= 1.1.0)" + ], + "Suggests": [ + "covr", + "curl", + "httr", + "knitr", + "magrittr", + "mockery", + "rmarkdown", + "testthat (>= 3.2.0)", + "xslt" + ], + "VignetteBuilder": "knitr", + "Config/Needs/website": "tidyverse/tidytemplate", + "Encoding": "UTF-8", + "RoxygenNote": "7.2.3", + "SystemRequirements": "libxml2: libxml2-dev (deb), libxml2-devel (rpm)", + "Collate": "'S4.R' 'as_list.R' 'xml_parse.R' 'as_xml_document.R' 'classes.R' 'format.R' 'import-standalone-obj-type.R' 'import-standalone-purrr.R' 'import-standalone-types-check.R' 'init.R' 'nodeset_apply.R' 'paths.R' 'utils.R' 'xml2-package.R' 'xml_attr.R' 'xml_children.R' 'xml_document.R' 'xml_find.R' 'xml_missing.R' 'xml_modify.R' 'xml_name.R' 'xml_namespaces.R' 'xml_node.R' 'xml_nodeset.R' 'xml_path.R' 'xml_schema.R' 'xml_serialize.R' 'xml_structure.R' 'xml_text.R' 'xml_type.R' 'xml_url.R' 'xml_write.R' 'zzz.R'", + "Config/testthat/edition": "3", + "NeedsCompilation": "yes", + "Author": "Hadley Wickham [aut], Jim Hester [aut], Jeroen Ooms [aut, cre], Posit Software, PBC [cph, fnd], R Foundation [ctb] (Copy of R-project homepage cached as example)", + "Maintainer": "Jeroen Ooms ", + "Repository": "CRAN" + } + } +} diff --git a/tests/testthat/test-crosswalk_data.R b/tests/testthat/test-crosswalk_data.R new file mode 100644 index 0000000..b6ea1fd --- /dev/null +++ b/tests/testthat/test-crosswalk_data.R @@ -0,0 +1,614 @@ +# Tests for crosswalk_data() - data transformation function + +# ============================================================================== +# Basic functionality tests +# ============================================================================== + +test_that("crosswalk_data returns expected structure", { + skip_if_offline() + + # Create mock data + mock_data <- tibble::tibble( + tract_geoid = c("01001020100", "01001020200", "01001020300"), + count_population = c(1000, 2000, 1500), + mean_income = c(50000, 60000, 45000)) + + # Get a crosswalk first + crosswalk <- get_crosswalk( + source_geography = "tract", + target_geography = "zcta", + weight = "population") + + result <- crosswalk_data( + data = mock_data, + crosswalk = crosswalk, + geoid_column = "tract_geoid", + count_columns = c("count_population"), + non_count_columns = c("mean_income")) + + expect_s3_class(result, "tbl_df") + expect_true("geoid" %in% colnames(result)) + expect_true("count_population" %in% colnames(result)) + expect_true("mean_income" %in% colnames(result)) +}) + +test_that("crosswalk_data attaches metadata", { + skip_if_offline() + + mock_data <- tibble::tibble( + tract_geoid = c("01001020100"), + count_population = c(1000)) + + crosswalk <- get_crosswalk( + source_geography = "tract", + target_geography = "zcta", + weight = "population") + + result <- crosswalk_data( + data = mock_data, + crosswalk = crosswalk, + geoid_column = "tract_geoid", + count_columns = c("count_population")) + + metadata <- attr(result, "crosswalk_metadata") + expect_type(metadata, "list") +}) + +# ============================================================================== +# Count variable interpolation tests +# ============================================================================== + +test_that("crosswalk_data correctly interpolates count variables", { + # Create a controlled test case + mock_data <- tibble::tibble( + source_geoid = c("A", "B"), + count_value = c(100, 200)) + + # Create a mock crosswalk + mock_crosswalk <- tibble::tibble( + source_geoid = c("A", "A", "B"), + target_geoid = c("X", "Y", "X"), + target_geography_name = c("test", "test", "test"), + allocation_factor_source_to_target = c(0.6, 0.4, 1.0)) + + result <- crosswalk_data( + data = mock_data, + crosswalk = mock_crosswalk, + geoid_column = "source_geoid", + count_columns = "count_value") + + # A -> X: 100 * 0.6 = 60 + # B -> X: 200 * 1.0 = 200 + # X total: 260 + x_result <- result |> dplyr::filter(geoid == "X") |> dplyr::pull(count_value) + expect_equal(x_result, 260) + + # A -> Y: 100 * 0.4 = 40 + y_result <- result |> dplyr::filter(geoid == "Y") |> dplyr::pull(count_value) + expect_equal(y_result, 40) +}) + +# ============================================================================== +# Non-count variable interpolation tests +# ============================================================================== + +test_that("crosswalk_data correctly interpolates non-count variables", { + # Create a controlled test case with weighted means + mock_data <- tibble::tibble( + source_geoid = c("A", "B"), + mean_income = c(50000, 80000)) + + # Create a mock crosswalk + mock_crosswalk <- tibble::tibble( + source_geoid = c("A", "B"), + target_geoid = c("X", "X"), + target_geography_name = c("test", "test"), + allocation_factor_source_to_target = c(0.3, 0.7)) + + result <- crosswalk_data( + data = mock_data, + crosswalk = mock_crosswalk, + geoid_column = "source_geoid", + non_count_columns = "mean_income") + + # For non-count variables, we use weighted mean + # Weighted mean = (50000 * 0.3 + 80000 * 0.7) / (0.3 + 0.7) = 71000 + expected_x <- stats::weighted.mean(c(50000, 80000), c(0.3, 0.7)) + + x_result <- result |> dplyr::filter(geoid == "X") |> dplyr::pull(mean_income) + expect_equal(x_result, expected_x) +}) + +# ============================================================================== +# Auto-detection of column types tests +# ============================================================================== + +test_that("crosswalk_data auto-detects count_ prefixed columns", { + mock_data <- tibble::tibble( + geoid = c("A"), + count_population = c(1000), + count_housing = c(500), + other_column = c(100)) + + mock_crosswalk <- tibble::tibble( + source_geoid = c("A"), + target_geoid = c("X"), + target_geography_name = c("test"), + allocation_factor_source_to_target = c(1.0)) + + result <- crosswalk_data( + data = mock_data, + crosswalk = mock_crosswalk, + geoid_column = "geoid") + + # Auto-detected count columns should be in result + expect_true("count_population" %in% colnames(result)) + expect_true("count_housing" %in% colnames(result)) +}) + +test_that("crosswalk_data auto-detects mean_ prefixed columns", { + mock_data <- tibble::tibble( + geoid = c("A"), + count_population = c(1000), + mean_income = c(50000)) + + mock_crosswalk <- tibble::tibble( + source_geoid = c("A"), + target_geoid = c("X"), + target_geography_name = c("test"), + allocation_factor_source_to_target = c(1.0)) + + result <- crosswalk_data( + data = mock_data, + crosswalk = mock_crosswalk, + geoid_column = "geoid") + + expect_true("mean_income" %in% colnames(result)) +}) + +test_that("crosswalk_data auto-detects median_ prefixed columns", { + mock_data <- tibble::tibble( + geoid = c("A"), + count_population = c(1000), + median_age = c(35)) + + mock_crosswalk <- tibble::tibble( + source_geoid = c("A"), + target_geoid = c("X"), + target_geography_name = c("test"), + allocation_factor_source_to_target = c(1.0)) + + result <- crosswalk_data( + data = mock_data, + crosswalk = mock_crosswalk, + geoid_column = "geoid") + + expect_true("median_age" %in% colnames(result)) +}) + +test_that("crosswalk_data auto-detects percent_ prefixed columns", { + mock_data <- tibble::tibble( + geoid = c("A"), + count_population = c(1000), + percent_employed = c(0.65)) + + mock_crosswalk <- tibble::tibble( + source_geoid = c("A"), + target_geoid = c("X"), + target_geography_name = c("test"), + allocation_factor_source_to_target = c(1.0)) + + result <- crosswalk_data( + data = mock_data, + crosswalk = mock_crosswalk, + geoid_column = "geoid") + + expect_true("percent_employed" %in% colnames(result)) +}) + +test_that("crosswalk_data auto-detects ratio_ prefixed columns", { + mock_data <- tibble::tibble( + geoid = c("A"), + count_population = c(1000), + ratio_income_to_poverty = c(2.5)) + + mock_crosswalk <- tibble::tibble( + source_geoid = c("A"), + target_geoid = c("X"), + target_geography_name = c("test"), + allocation_factor_source_to_target = c(1.0)) + + result <- crosswalk_data( + data = mock_data, + crosswalk = mock_crosswalk, + geoid_column = "geoid") + + expect_true("ratio_income_to_poverty" %in% colnames(result)) +}) + +# ============================================================================== +# Error handling tests +# ============================================================================== + +test_that("crosswalk_data errors when geoid_column not found", { + mock_data <- tibble::tibble( + tract_geoid = c("01001020100"), + count_population = c(1000)) + + mock_crosswalk <- tibble::tibble( + source_geoid = c("01001020100"), + target_geoid = c("X"), + allocation_factor_source_to_target = c(1.0)) + + expect_error( + crosswalk_data( + data = mock_data, + crosswalk = mock_crosswalk, + geoid_column = "wrong_column", + count_columns = c("count_population")), + regexp = "not found") +}) + +test_that("crosswalk_data errors when no columns to crosswalk", { + mock_data <- tibble::tibble( + geoid = c("A"), + some_column = c(100)) + + mock_crosswalk <- tibble::tibble( + source_geoid = c("A"), + target_geoid = c("X"), + allocation_factor_source_to_target = c(1.0)) + + expect_error( + crosswalk_data( + data = mock_data, + crosswalk = mock_crosswalk, + geoid_column = "geoid"), + regexp = "No columns to crosswalk") +}) + +test_that("crosswalk_data errors when specified columns not found", { + mock_data <- tibble::tibble( + geoid = c("A"), + count_population = c(1000)) + + mock_crosswalk <- tibble::tibble( + source_geoid = c("A"), + target_geoid = c("X"), + allocation_factor_source_to_target = c(1.0)) + + expect_error( + crosswalk_data( + data = mock_data, + crosswalk = mock_crosswalk, + geoid_column = "geoid", + count_columns = c("nonexistent_column")), + regexp = "not found") +}) + +test_that("crosswalk_data errors on invalid crosswalk input", { + mock_data <- tibble::tibble( + geoid = c("A"), + count_population = c(1000)) + + # Completely invalid input should error + expect_error( + crosswalk_data( + data = mock_data, + crosswalk = list(other_field = "something"), + geoid_column = "geoid", + count_columns = c("count_population")), + regexp = "Invalid crosswalk input") + + # String input should error + expect_error( + crosswalk_data( + data = mock_data, + crosswalk = "not_a_crosswalk", + geoid_column = "geoid", + count_columns = c("count_population")), + regexp = "Invalid crosswalk input") +}) + +test_that("crosswalk_data errors when crosswalk missing required columns", { + mock_data <- tibble::tibble( + geoid = c("A"), + count_population = c(1000)) + + mock_crosswalk <- tibble::tibble( + source_geoid = c("A"), + target_geoid = c("X")) + + expect_error( + crosswalk_data( + data = mock_data, + crosswalk = mock_crosswalk, + geoid_column = "geoid", + count_columns = c("count_population")), + regexp = "missing required columns") +}) + +# ============================================================================== +# Empty crosswalk handling tests +# ============================================================================== + +test_that("crosswalk_data warns for empty crosswalk", { + mock_data <- tibble::tibble( + geoid = c("A"), + count_population = c(1000)) + + empty_crosswalk <- tibble::tibble( + source_geoid = character(), + target_geoid = character(), + allocation_factor_source_to_target = numeric()) + + expect_warning( + result <- crosswalk_data( + data = mock_data, + crosswalk = empty_crosswalk, + geoid_column = "geoid", + count_columns = c("count_population")), + regexp = "empty") + + # Should return empty tibble + expect_equal(nrow(result), 0) +}) + +# ============================================================================== +# Inter-temporal crosswalk tests +# ============================================================================== + +test_that("crosswalk_data works with NHGIS inter-temporal crosswalk", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + mock_data <- tibble::tibble( + tract_geoid = c("01001020100", "01001020200"), + count_population = c(1000, 2000)) + + crosswalk <- get_crosswalk( + source_geography = "tract", + target_geography = "tract", + source_year = 2010, + target_year = 2020) + + result <- crosswalk_data( + data = mock_data, + crosswalk = crosswalk, + geoid_column = "tract_geoid", + count_columns = c("count_population")) + + expect_s3_class(result, "tbl_df") + expect_true("geoid" %in% colnames(result)) + expect_true("count_population" %in% colnames(result)) +}) + +# ============================================================================== +# 2020-2022 crosswalk tests +# ============================================================================== + +test_that("crosswalk_data works with 2020-2022 CT crosswalk", { + skip_if_offline() + + # Use CT tract GEOIDs + mock_data <- tibble::tibble( + tract_geoid = c("09001010101", "09001010102"), + count_population = c(1000, 2000)) + + crosswalk <- get_crosswalk( + source_geography = "tract", + target_geography = "tract", + source_year = 2020, + target_year = 2022) + + result <- crosswalk_data( + data = mock_data, + crosswalk = crosswalk, + geoid_column = "tract_geoid", + count_columns = c("count_population")) + + expect_s3_class(result, "tbl_df") + expect_true("geoid" %in% colnames(result)) + + # Since CT 2020-2022 is identity mapping for tracts, values should be preserved + expect_equal(sum(result$count_population), 3000) +}) + +# ============================================================================== +# Multi-step crosswalk tests +# ============================================================================== + +test_that("crosswalk_data automatically applies multi-step crosswalk from get_crosswalk", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + mock_data <- tibble::tibble( + tract_geoid = c("01001020100", "01001020200"), + count_population = c(1000, 2000)) + + # Get multi-step crosswalk (returns list with multiple crosswalks) + crosswalk <- get_crosswalk( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020, + weight = "population") + + expect_type(crosswalk, "list") + expect_true("crosswalks" %in% names(crosswalk)) + expect_equal(length(crosswalk$crosswalks), 2) + + # crosswalk_data automatically applies all steps + result <- crosswalk_data( + data = mock_data, + crosswalk = crosswalk, + geoid_column = "tract_geoid", + count_columns = c("count_population")) + + expect_s3_class(result, "tbl_df") + expect_true("geoid" %in% colnames(result)) + expect_true("count_population" %in% colnames(result)) + + # Total population should approximately be preserved + expect_gt(sum(result$count_population, na.rm = TRUE), 0) +}) + +test_that("crosswalk_data can apply individual crosswalk steps manually", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + mock_data <- tibble::tibble( + tract_geoid = c("01001020100", "01001020200"), + count_population = c(1000, 2000)) + + # Get multi-step crosswalk (returns list) + chain <- get_crosswalk( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020, + weight = "population") + + # Apply step 1 manually (passing individual tibble) + step1_result <- crosswalk_data( + data = mock_data, + crosswalk = chain$crosswalks$step_1, + geoid_column = "tract_geoid", + count_columns = c("count_population")) + + expect_s3_class(step1_result, "tbl_df") + expect_true("geoid" %in% colnames(step1_result)) + + # Apply step 2 manually + step2_result <- crosswalk_data( + data = step1_result, + crosswalk = chain$crosswalks$step_2, + geoid_column = "geoid", + count_columns = c("count_population")) + + expect_s3_class(step2_result, "tbl_df") + expect_true("geoid" %in% colnames(step2_result)) + expect_true("count_population" %in% colnames(step2_result)) + + # Total population should approximately be preserved + expect_gt(sum(step2_result$count_population, na.rm = TRUE), 0) +}) + +# ============================================================================== +# return_intermediate parameter tests +# ============================================================================== + +test_that("crosswalk_data return_intermediate=TRUE returns intermediate results for multi-step", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + mock_data <- tibble::tibble( + tract_geoid = c("01001020100", "01001020200"), + count_population = c(1000, 2000)) + + crosswalk <- get_crosswalk( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020, + weight = "population") + + result <- crosswalk_data( + data = mock_data, + crosswalk = crosswalk, + geoid_column = "tract_geoid", + count_columns = c("count_population"), + return_intermediate = TRUE) + + # Should return a list with final and intermediate + expect_type(result, "list") + expect_true("final" %in% names(result)) + expect_true("intermediate" %in% names(result)) + + # Final should be a tibble + expect_s3_class(result$final, "tbl_df") + expect_true("geoid" %in% colnames(result$final)) + + # Intermediate should have results from each step + expect_type(result$intermediate, "list") + expect_true("step_1" %in% names(result$intermediate)) + expect_true("step_2" %in% names(result$intermediate)) + expect_s3_class(result$intermediate$step_1, "tbl_df") + expect_s3_class(result$intermediate$step_2, "tbl_df") +}) + +test_that("crosswalk_data return_intermediate=FALSE returns tibble for multi-step", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + mock_data <- tibble::tibble( + tract_geoid = c("01001020100", "01001020200"), + count_population = c(1000, 2000)) + + crosswalk <- get_crosswalk( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020, + weight = "population") + + result <- crosswalk_data( + data = mock_data, + crosswalk = crosswalk, + geoid_column = "tract_geoid", + count_columns = c("count_population"), + return_intermediate = FALSE) + + # Should return just a tibble, not a list + expect_s3_class(result, "tbl_df") + expect_true("geoid" %in% colnames(result)) +}) + +test_that("crosswalk_data return_intermediate=TRUE returns tibble for single-step", { + skip_if_offline() + + mock_data <- tibble::tibble( + tract_geoid = c("01001020100", "01001020200"), + count_population = c(1000, 2000)) + + # Single-step crosswalk + crosswalk <- get_crosswalk( + source_geography = "tract", + target_geography = "zcta", + weight = "population") + + result <- crosswalk_data( + data = mock_data, + crosswalk = crosswalk, + geoid_column = "tract_geoid", + count_columns = c("count_population"), + return_intermediate = TRUE) + + # For single-step, return_intermediate=TRUE returns tibble (no intermediate to return) + expect_s3_class(result, "tbl_df") +}) + +# ============================================================================== +# Character GEOID handling tests +# ============================================================================== + +test_that("crosswalk_data handles numeric GEOIDs", { + mock_data <- tibble::tibble( + geoid = c(1, 2), # Numeric + count_population = c(1000, 2000)) + + mock_crosswalk <- tibble::tibble( + source_geoid = c("1", "2"), + target_geoid = c("X", "X"), + target_geography_name = c("test", "test"), + allocation_factor_source_to_target = c(0.5, 0.5)) + + # Should work - function converts geoid to character + result <- crosswalk_data( + data = mock_data, + crosswalk = mock_crosswalk, + geoid_column = "geoid", + count_columns = c("count_population")) + + expect_s3_class(result, "tbl_df") +}) diff --git a/tests/testthat/test-get_crosswalk.R b/tests/testthat/test-get_crosswalk.R new file mode 100644 index 0000000..34c4e4d --- /dev/null +++ b/tests/testthat/test-get_crosswalk.R @@ -0,0 +1,322 @@ +# Tests for get_crosswalk() - the main user-facing function + +# ============================================================================== +# Basic functionality tests +# ============================================================================== + +test_that("get_crosswalk returns list with expected structure for single-step", { + skip_if_offline() + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "tract", + source_year = 2020, + target_year = 2022) + + # Always returns list structure + + expect_type(result, "list") + expect_true("crosswalks" %in% names(result)) + expect_true("plan" %in% names(result)) + expect_true("message" %in% names(result)) + + # Single-step has one crosswalk + expect_equal(length(result$crosswalks), 1) + expect_true("step_1" %in% names(result$crosswalks)) + + # The crosswalk tibble has expected columns + crosswalk_tibble <- result$crosswalks$step_1 + expect_s3_class(crosswalk_tibble, "tbl_df") + expected_cols <- c("source_geoid", "target_geoid", "allocation_factor_source_to_target") + expect_true(all(expected_cols %in% colnames(crosswalk_tibble))) +}) + +test_that("get_crosswalk attaches comprehensive metadata attribute to crosswalk tibble", { + skip_if_offline() + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "tract", + source_year = 2020, + target_year = 2022) + + # Metadata is attached to individual crosswalk tibbles + metadata <- attr(result$crosswalks$step_1, "crosswalk_metadata") + + expect_type(metadata, "list") + expect_true("call_parameters" %in% names(metadata)) + expect_true("data_source" %in% names(metadata)) + expect_true("data_source_full_name" %in% names(metadata)) + expect_true("source_geography" %in% names(metadata)) + expect_true("target_geography" %in% names(metadata)) + expect_true("source_year" %in% names(metadata)) + expect_true("target_year" %in% names(metadata)) + expect_true("crosswalk_package_version" %in% names(metadata)) + + expect_type(metadata$call_parameters, "list") + expect_equal(metadata$call_parameters$source_geography, "tract") + expect_equal(metadata$call_parameters$target_geography, "tract") +}) + +# ============================================================================== +# Routing tests +# ============================================================================== + +test_that("get_crosswalk routes 2020-2022 to CTData", { + skip_if_offline() + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "tract", + source_year = 2020, + target_year = 2022) + + metadata <- attr(result$crosswalks$step_1, "crosswalk_metadata") + expect_equal(metadata$data_source, "ctdata_nhgis_combined") +}) + +test_that("get_crosswalk routes to NHGIS for inter-temporal requests", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "tract", + source_year = 2010, + target_year = 2020) + + metadata <- attr(result$crosswalks$step_1, "crosswalk_metadata") + expect_equal(metadata$data_source, "nhgis") +}) + +test_that("get_crosswalk routes to Geocorr for same-year requests", { + skip_if_offline() + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "zcta", + weight = "population") + + metadata <- attr(result$crosswalks$step_1, "crosswalk_metadata") + expect_equal(metadata$data_source, "geocorr") +}) + +# ============================================================================== +# Nested geography tests +# ============================================================================== + +test_that("get_crosswalk warns for nested geographies", { + expect_warning( + result <- get_crosswalk( + source_geography = "tract", + target_geography = "county"), + regexp = "nested within") + + # Returns list structure with empty crosswalk + expect_type(result, "list") + expect_true("crosswalks" %in% names(result)) + expect_equal(nrow(result$crosswalks$step_1), 0) +}) + +test_that("get_crosswalk warns for block to tract nested geography", { + expect_warning( + result <- get_crosswalk( + source_geography = "block", + target_geography = "tract"), + regexp = "nested within") + + # Returns list structure with empty crosswalk + expect_type(result, "list") + expect_equal(nrow(result$crosswalks$step_1), 0) +}) + +# ============================================================================== +# Multi-step crosswalk tests +# ============================================================================== + +test_that("get_crosswalk returns list for multi-step requests (geography + year change)", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020, + weight = "population") + + # Multi-step returns a list, not a tibble + expect_type(result, "list") + expect_true("crosswalks" %in% names(result)) + expect_true("plan" %in% names(result)) + expect_true("message" %in% names(result)) + + # Check that individual crosswalks are tibbles + expect_s3_class(result$crosswalks$step_1, "tbl_df") + expect_s3_class(result$crosswalks$step_2, "tbl_df") +}) + +test_that("get_crosswalk multi-step crosswalks have valid allocation factors", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020, + weight = "population") + + step1 <- result$crosswalks$step_1 + step2 <- result$crosswalks$step_2 + + expect_true(all(step1$allocation_factor_source_to_target >= 0)) + expect_true(all(step1$allocation_factor_source_to_target <= 1)) + expect_true(all(step2$allocation_factor_source_to_target >= 0)) + expect_true(all(step2$allocation_factor_source_to_target <= 1)) +}) + +test_that("get_crosswalk multi-step plan is correct", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020, + weight = "population") + + expect_true(result$plan$is_multi_step) + expect_s3_class(result$plan$steps, "tbl_df") + expect_equal(nrow(result$plan$steps), 2) +}) + +# ============================================================================== +# 2020 to 2022 specific tests +# ============================================================================== + +test_that("get_crosswalk 2020-2022 metadata contains correct info", { + skip_if_offline() + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "tract", + source_year = 2020, + target_year = 2022) + + metadata <- attr(result$crosswalks$step_1, "crosswalk_metadata") + + expect_equal(metadata$source_year, "2020") + expect_equal(metadata$target_year, "2022") + expect_equal(metadata$data_source, "ctdata_nhgis_combined") + expect_true(length(metadata$notes) > 0) +}) + +test_that("get_crosswalk 2020-2022 returns nationally comprehensive data", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "tract", + source_year = 2020, + target_year = 2022) + + crosswalk_tibble <- result$crosswalks$step_1 + + # Should have multiple states (nationally comprehensive) + state_fips <- unique(crosswalk_tibble$state_fips) + expect_gt(length(state_fips), 1) + + # Connecticut should be included + expect_true("09" %in% state_fips) +}) + +test_that("get_crosswalk 2020-2022 errors on unsupported geography", { + expect_error( + get_crosswalk( + source_geography = "zcta", + target_geography = "zcta", + source_year = 2020, + target_year = 2022), + regexp = "not supported") +}) + +# ============================================================================== +# get_crosswalk_2020_2022() internal function tests +# ============================================================================== + +test_that("get_crosswalk_2020_2022 returns CT crosswalk with attributes", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- crosswalk:::get_crosswalk_2020_2022(geography = "tract") + + expect_s3_class(result, "tbl_df") + + # Should be nationally comprehensive + state_fips <- unique(result$state_fips) + expect_gt(length(state_fips), 1) +}) + +test_that("get_crosswalk_2020_2022 errors on invalid geography", { + expect_error( + crosswalk:::get_crosswalk_2020_2022(geography = "puma"), + regexp = "not supported") +}) + +# ============================================================================== +# Consistent list return type tests +# ============================================================================== + +test_that("get_crosswalk returns list for single-step spatial", { + skip_if_offline() + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "zcta", + weight = "population") + + # Always returns list structure + expect_type(result, "list") + expect_true("crosswalks" %in% names(result)) + expect_equal(length(result$crosswalks), 1) + expect_s3_class(result$crosswalks$step_1, "tbl_df") +}) + +test_that("get_crosswalk returns list for single-step temporal", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "tract", + source_year = 2010, + target_year = 2020) + + # Always returns list structure + expect_type(result, "list") + expect_true("crosswalks" %in% names(result)) + expect_equal(length(result$crosswalks), 1) + expect_s3_class(result$crosswalks$step_1, "tbl_df") +}) + +test_that("get_crosswalk returns list for multi-step", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020, + weight = "population") + + # Always returns list structure + expect_type(result, "list") + expect_true("crosswalks" %in% names(result)) + expect_equal(length(result$crosswalks), 2) + expect_s3_class(result$crosswalks$step_1, "tbl_df") + expect_s3_class(result$crosswalks$step_2, "tbl_df") +}) diff --git a/tests/testthat/test-get_crosswalk_chain.R b/tests/testthat/test-get_crosswalk_chain.R new file mode 100644 index 0000000..6746bb5 --- /dev/null +++ b/tests/testthat/test-get_crosswalk_chain.R @@ -0,0 +1,238 @@ +# Tests for get_crosswalk_chain() - exported function for multi-step crosswalks + +# ============================================================================== +# get_crosswalk_chain() basic structure tests +# ============================================================================== + +test_that("get_crosswalk_chain returns expected structure", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_crosswalk_chain( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020, + weight = "population") + + expect_type(result, "list") + expect_true("plan" %in% names(result)) + expect_true("crosswalks" %in% names(result)) + expect_true("message" %in% names(result)) +}) + +test_that("get_crosswalk_chain returns plan object", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_crosswalk_chain( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020, + weight = "population") + + plan <- result$plan + + expect_type(plan, "list") + expect_true(plan$is_multi_step) + expect_s3_class(plan$steps, "tbl_df") + expect_equal(nrow(plan$steps), 2) +}) + +# ============================================================================== +# Multi-step crosswalk chain tests +# ============================================================================== + +test_that("get_crosswalk_chain fetches both steps for multi-step request", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_crosswalk_chain( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020, + weight = "population") + + expect_true("step_1" %in% names(result$crosswalks)) + expect_true("step_2" %in% names(result$crosswalks)) + + # Step 1 should be NHGIS (temporal) + step1_meta <- attr(result$crosswalks$step_1, "crosswalk_metadata") + expect_equal(step1_meta$data_source, "nhgis") + + # Step 2 should be Geocorr (spatial) + step2_meta <- attr(result$crosswalks$step_2, "crosswalk_metadata") + expect_equal(step2_meta$data_source, "geocorr") +}) + +test_that("get_crosswalk_chain crosswalks have correct structure", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_crosswalk_chain( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020, + weight = "population") + + step1 <- result$crosswalks$step_1 + step2 <- result$crosswalks$step_2 + + # Both should be tibbles with required columns + expect_s3_class(step1, "tbl_df") + expect_s3_class(step2, "tbl_df") + + expect_true("source_geoid" %in% colnames(step1)) + expect_true("target_geoid" %in% colnames(step1)) + expect_true("allocation_factor_source_to_target" %in% colnames(step1)) + + expect_true("source_geoid" %in% colnames(step2)) + expect_true("target_geoid" %in% colnames(step2)) + expect_true("allocation_factor_source_to_target" %in% colnames(step2)) +}) + +test_that("get_crosswalk_chain crosswalks have valid allocation factors", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_crosswalk_chain( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020, + weight = "population") + + step1 <- result$crosswalks$step_1 + step2 <- result$crosswalks$step_2 + + # Allocation factors should be between 0 and 1 + expect_true(all(step1$allocation_factor_source_to_target >= 0)) + expect_true(all(step1$allocation_factor_source_to_target <= 1)) + expect_true(all(step2$allocation_factor_source_to_target >= 0)) + expect_true(all(step2$allocation_factor_source_to_target <= 1)) +}) + +# ============================================================================== +# Single-step chain tests +# ============================================================================== + +test_that("get_crosswalk_chain handles single-step spatial request", { + skip_if_offline() + + result <- get_crosswalk_chain( + source_geography = "tract", + target_geography = "zcta", + weight = "population") + + expect_false(result$plan$is_multi_step) + expect_equal(length(result$crosswalks), 1) + expect_true("step_1" %in% names(result$crosswalks)) +}) + +test_that("get_crosswalk_chain handles single-step temporal request", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_crosswalk_chain( + source_geography = "tract", + target_geography = "tract", + source_year = 2010, + target_year = 2020) + + expect_false(result$plan$is_multi_step) + expect_equal(length(result$crosswalks), 1) +}) + +# ============================================================================== +# Error handling tests +# ============================================================================== + +test_that("get_crosswalk_chain errors for unsupported multi-step", { + # ZCTA as source for multi-step isn't supported by NHGIS + expect_error( + get_crosswalk_chain( + source_geography = "zcta", + target_geography = "puma", + source_year = 2010, + target_year = 2020), + regexp = "NHGIS does not support") +}) + +# ============================================================================== +# Integration with get_crosswalk() tests +# ============================================================================== + +test_that("get_crosswalk returns list for multi-step", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020, + weight = "population") + + # Multi-step should return a list + expect_type(result, "list") + expect_true("crosswalks" %in% names(result)) + expect_true("plan" %in% names(result)) + expect_true("message" %in% names(result)) + + # Should have two steps + expect_true("step_1" %in% names(result$crosswalks)) + expect_true("step_2" %in% names(result$crosswalks)) +}) + +test_that("get_crosswalk returns list for single-step (consistent structure)", { + skip_if_offline() + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "zcta", + weight = "population") + + # Single-step now also returns list (consistent structure) + expect_type(result, "list") + expect_true("crosswalks" %in% names(result)) + expect_true("plan" %in% names(result)) + expect_true("message" %in% names(result)) + + # Should have one step + expect_equal(length(result$crosswalks), 1) + expect_true("step_1" %in% names(result$crosswalks)) + + # The crosswalk tibble has expected columns + crosswalk_tibble <- result$crosswalks$step_1 + expect_s3_class(crosswalk_tibble, "tbl_df") + expect_true("source_geoid" %in% colnames(crosswalk_tibble)) + expect_true("target_geoid" %in% colnames(crosswalk_tibble)) + expect_true("allocation_factor_source_to_target" %in% colnames(crosswalk_tibble)) +}) + +# ============================================================================== +# Sequential application tests +# ============================================================================== + +test_that("get_crosswalk_chain crosswalks can be applied sequentially", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_crosswalk_chain( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020, + weight = "population") + + # Step 1 target_geoid should match Step 2 source_geoid + step1_targets <- unique(result$crosswalks$step_1$target_geoid) + step2_sources <- unique(result$crosswalks$step_2$source_geoid) + + # There should be overlap - step1 targets should be usable as step2 sources + overlap <- intersect(step1_targets, step2_sources) + expect_gt(length(overlap), 0) +}) diff --git a/tests/testthat/test-get_ctdata_crosswalk.R b/tests/testthat/test-get_ctdata_crosswalk.R new file mode 100644 index 0000000..67b1e82 --- /dev/null +++ b/tests/testthat/test-get_ctdata_crosswalk.R @@ -0,0 +1,281 @@ +# Tests for get_ctdata_crosswalk() - CT Data Collaborative crosswalks + +# ============================================================================== +# Basic structure tests +# ============================================================================== + +test_that("get_ctdata_crosswalk returns correct structure for tracts", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_ctdata_crosswalk(geography = "tract") + + expect_s3_class(result, "tbl_df") + + expected_cols <- c( + "source_geoid", "target_geoid", + "source_geography_name", "target_geography_name", + "source_year", "target_year", + "allocation_factor_source_to_target", + "weighting_factor", "state_fips") + + expect_true(all(expected_cols %in% colnames(result))) +}) + +test_that("get_ctdata_crosswalk tract data has correct values", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_ctdata_crosswalk(geography = "tract") + + ## CT tracts have different source and target geoids (879 CT tracts) + ## Other states have identical source and target geoids (identity mapping) + ct_tracts <- result |> + dplyr::filter(source_geoid != target_geoid) + expect_equal(nrow(ct_tracts), 879) + expect_true(all(ct_tracts$state_fips == "09")) + + non_ct_tracts <- result |> + dplyr::filter(source_geoid == target_geoid) + expect_true(all(non_ct_tracts$state_fips != "09")) + + expect_equal(unique(result$source_year), "2020") + expect_equal(unique(result$target_year), "2022") + expect_equal(length(unique(result$state_fips)), 52) + expect_equal(unique(result$weighting_factor), "identity") + expect_true(all(result$allocation_factor_source_to_target == 1)) + + expect_equal(unique(result$source_geography_name), "tract") + expect_equal(unique(result$target_geography_name), "tract") +}) + +test_that("get_ctdata_crosswalk returns nationally comprehensive data", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_ctdata_crosswalk(geography = "tract") + + # Should have 52 states/territories (50 states + DC + PR) + expect_equal(length(unique(result$state_fips)), 52) + + # Should have many more than 879 tracts (national coverage) + expect_gt(nrow(result), 70000) + + # CT should be included + expect_true("09" %in% result$state_fips) +}) + +# ============================================================================== +# Geography handling tests +# ============================================================================== + +test_that("get_ctdata_crosswalk handles block geography", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_ctdata_crosswalk(geography = "block") + + expect_s3_class(result, "tbl_df") + expect_equal(unique(result$source_geography_name), "block") + expect_equal(unique(result$target_geography_name), "block") + + expect_true(all(stringr::str_length(result$source_geoid) == 15)) + expect_true(all(stringr::str_length(result$target_geoid) == 15)) +}) + +test_that("get_ctdata_crosswalk handles block_group geography", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_ctdata_crosswalk(geography = "block_group") + + expect_s3_class(result, "tbl_df") + expect_equal(unique(result$source_geography_name), "block_group") + expect_equal(unique(result$target_geography_name), "block_group") + + expect_true(all(stringr::str_length(result$source_geoid) == 12)) + expect_true(all(stringr::str_length(result$target_geoid) == 12)) +}) + +test_that("get_ctdata_crosswalk handles county geography", { + skip_if_offline() + skip_if_not_installed("tidycensus") + skip_if(Sys.getenv("CENSUS_API_KEY") == "", "CENSUS_API_KEY not set") + + result <- get_ctdata_crosswalk(geography = "county") + + expect_s3_class(result, "tbl_df") + expect_equal(unique(result$source_geography_name), "county") + + # Filter to CT for county-specific checks + ct_result <- result |> dplyr::filter(state_fips == "09") + + n_source_counties <- length(unique(ct_result$source_geoid)) + n_target_regions <- length(unique(ct_result$target_geoid)) + expect_equal(n_source_counties, 8) + expect_equal(n_target_regions, 9) + + # CT uses population weighting, other states use identity + expect_equal(unique(ct_result$weighting_factor), "population") + + # CT allocation factors should sum to 1 for each source county + ct_allocation_sums <- ct_result |> + dplyr::summarize( + total = sum(allocation_factor_source_to_target), + .by = "source_geoid") + expect_true(all(abs(ct_allocation_sums$total - 1) < 0.001)) + + # Non-CT counties should have identity mapping + non_ct_result <- result |> dplyr::filter(state_fips != "09") + expect_equal(unique(non_ct_result$weighting_factor), "identity") +}) + +# ============================================================================== +# Error handling tests +# ============================================================================== + +test_that("get_ctdata_crosswalk errors on unsupported geography", { + expect_error( + get_ctdata_crosswalk(geography = "zcta"), + regexp = "not supported") + + expect_error( + get_ctdata_crosswalk(geography = "place"), + regexp = "not supported") +}) + +# ============================================================================== +# Geography spelling variations tests +# ============================================================================== + +test_that("get_ctdata_crosswalk accepts various geography spellings", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result1 <- get_ctdata_crosswalk(geography = "tract") + result2 <- get_ctdata_crosswalk(geography = "tracts") + result3 <- get_ctdata_crosswalk(geography = "tr") + + expect_equal(nrow(result1), nrow(result2)) + expect_equal(nrow(result1), nrow(result3)) +}) + +test_that("get_ctdata_crosswalk accepts block group spelling variations", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result1 <- get_ctdata_crosswalk(geography = "block_group") + result2 <- get_ctdata_crosswalk(geography = "block group") + result3 <- get_ctdata_crosswalk(geography = "bg") + + expect_equal(nrow(result1), nrow(result2)) + expect_equal(nrow(result1), nrow(result3)) +}) + +# ============================================================================== +# Caching tests +# ============================================================================== + +test_that("get_ctdata_crosswalk caching works", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + cache_dir <- tempfile("crosswalk_cache_") + dir.create(cache_dir) + on.exit(unlink(cache_dir, recursive = TRUE)) + + result1 <- get_ctdata_crosswalk(geography = "tract", cache = cache_dir) + + # Correct filename for national crosswalk + cached_file <- file.path(cache_dir, "crosswalk_national_2020_to_2022_tract.csv") + expect_true(file.exists(cached_file)) + + result2 <- get_ctdata_crosswalk(geography = "tract", cache = cache_dir) + + # Compare data only, excluding metadata attributes that differ between fresh and cached + # (retrieved_at differs, read_from_cache differs) + expect_equal( + result1 |> tibble::as_tibble(), + result2 |> tibble::as_tibble(), + ignore_attr = TRUE) + + # Verify cached read has correct read_from_cache flag + meta1 <- attr(result1, "crosswalk_metadata") + meta2 <- attr(result2, "crosswalk_metadata") + expect_false(meta1$read_from_cache) + expect_true(meta2$read_from_cache) +}) + +# ============================================================================== +# Metadata tests +# ============================================================================== + +test_that("get_ctdata_crosswalk attaches metadata", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_ctdata_crosswalk(geography = "tract") + + metadata <- attr(result, "crosswalk_metadata") + + expect_type(metadata, "list") + expect_equal(metadata$data_source, "ctdata_nhgis_combined") + expect_true(stringr::str_detect(metadata$ctdata_download_url, "https://raw.githubusercontent.com/CT-Data-Collaborative")) + expect_equal(metadata$source_year, "2020") + expect_equal(metadata$target_year, "2022") + expect_equal(metadata$state_coverage, "National (all 50 states, DC, and Puerto Rico)") +}) + +# ============================================================================== +# Data integrity tests +# ============================================================================== + +test_that("CT tract GEOIDs have correct format changes", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_ctdata_crosswalk(geography = "tract") + + # Filter to CT records only for this test + + ct_result <- result |> dplyr::filter(state_fips == "09") + + expect_true(all(stringr::str_starts(ct_result$source_geoid, "09"))) + expect_true(all(stringr::str_starts(ct_result$target_geoid, "09"))) + + expect_true(all(stringr::str_length(ct_result$source_geoid) == 11)) + expect_true(all(stringr::str_length(ct_result$target_geoid) == 11)) + + # CT county FIPS codes changed between 2020 and 2022 + source_counties <- stringr::str_sub(ct_result$source_geoid, 3, 5) + target_counties <- stringr::str_sub(ct_result$target_geoid, 3, 5) + expect_false(all(source_counties == target_counties)) +}) + +test_that("CT county crosswalk maps 8 old counties to 9 planning regions", { + skip_if_offline() + skip_if_not_installed("tidycensus") + skip_if(Sys.getenv("CENSUS_API_KEY") == "", "CENSUS_API_KEY not set") + + result <- get_ctdata_crosswalk(geography = "county") + + # Filter to CT only for this test + ct_result <- result |> dplyr::filter(state_fips == "09") + + n_source_counties <- length(unique(ct_result$source_geoid)) + n_target_regions <- length(unique(ct_result$target_geoid)) + + expect_equal(n_source_counties, 8) + expect_equal(n_target_regions, 9) + + # Should have more rows than source counties due to many-to-many mapping + expect_gt(nrow(ct_result), 8) + + expect_true(all(ct_result$allocation_factor_source_to_target > 0)) + expect_true(all(ct_result$allocation_factor_source_to_target <= 1)) + + # Non-CT counties should have identity mapping (allocation_factor = 1) + non_ct_result <- result |> dplyr::filter(state_fips != "09") + expect_true(all(non_ct_result$allocation_factor_source_to_target == 1)) + expect_true(all(non_ct_result$source_geoid == non_ct_result$target_geoid)) +}) diff --git a/tests/testthat/test-get_geocorr_crosswalk.R b/tests/testthat/test-get_geocorr_crosswalk.R new file mode 100644 index 0000000..197e2bc --- /dev/null +++ b/tests/testthat/test-get_geocorr_crosswalk.R @@ -0,0 +1,327 @@ +# Tests for get_geocorr_crosswalk() - Geocorr 2022 crosswalks + +# ============================================================================== +# Basic structure tests +# ============================================================================== + +test_that("get_geocorr_crosswalk returns correct structure for tract to zcta", { + skip_if_offline() + + result <- crosswalk:::get_geocorr_crosswalk( + source_geography = "tract", + target_geography = "zcta", + weight = "population") + + expect_s3_class(result, "tbl_df") + + expected_cols <- c( + "source_geoid", "target_geoid", + "source_geography_name", "target_geography_name", + "allocation_factor_source_to_target", + "allocation_factor_target_to_source", + "weighting_factor") + + expect_true(all(expected_cols %in% colnames(result))) +}) + +test_that("get_geocorr_crosswalk returns data for all states", { + skip_if_offline() + + result <- crosswalk:::get_geocorr_crosswalk( + source_geography = "county", + target_geography = "puma22", + weight = "population") + + state_fips <- unique(result$state_fips) + + # Should have all 50 states + DC + PR = 52 + expect_gte(length(state_fips), 50) + expect_true("01" %in% state_fips) # Alabama + expect_true("06" %in% state_fips) # California + expect_true("36" %in% state_fips) # New York + expect_true("48" %in% state_fips) # Texas +}) + +# ============================================================================== +# Weighting variable tests +# ============================================================================== + +test_that("get_geocorr_crosswalk accepts population weight", { + skip_if_offline() + + result <- crosswalk:::get_geocorr_crosswalk( + source_geography = "county", + target_geography = "zcta", + weight = "population") + + expect_equal(unique(result$weighting_factor), "population") + expect_true("population_2020" %in% colnames(result)) +}) + +test_that("get_geocorr_crosswalk accepts housing weight", { + skip_if_offline() + + result <- crosswalk:::get_geocorr_crosswalk( + source_geography = "county", + target_geography = "zcta", + weight = "housing") + + expect_equal(unique(result$weighting_factor), "housing") + expect_true("housing_2020" %in% colnames(result)) +}) + +test_that("get_geocorr_crosswalk accepts land weight", { + skip_if_offline() + + result <- crosswalk:::get_geocorr_crosswalk( + source_geography = "county", + target_geography = "zcta", + weight = "land") + + expect_equal(unique(result$weighting_factor), "land") + expect_true("land_area_sqmi" %in% colnames(result)) +}) + +# ============================================================================== +# Geography type tests +# ============================================================================== + +test_that("get_geocorr_crosswalk handles tract geography", { + skip_if_offline() + + result <- crosswalk:::get_geocorr_crosswalk( + source_geography = "tract", + target_geography = "puma22", + weight = "population") + + expect_s3_class(result, "tbl_df") + expect_equal(unique(result$source_geography), "tract") + expect_equal(unique(result$target_geography), "puma22") + + # Tract GEOIDs should be 11 characters + expect_true(all(stringr::str_length(result$source_geoid) == 11)) +}) + +test_that("get_geocorr_crosswalk handles blockgroup geography", { + skip_if_offline() + + result <- crosswalk:::get_geocorr_crosswalk( + source_geography = "blockgroup", + target_geography = "zcta", + weight = "population") + + expect_s3_class(result, "tbl_df") + expect_equal(unique(result$source_geography), "blockgroup") + + # Block group GEOIDs should be 12 characters + expect_true(all(stringr::str_length(result$source_geoid) == 12)) +}) + +test_that("get_geocorr_crosswalk handles place geography", { + skip_if_offline() + + result <- crosswalk:::get_geocorr_crosswalk( + source_geography = "place", + target_geography = "county", + weight = "population") + + expect_s3_class(result, "tbl_df") + expect_equal(unique(result$source_geography), "place") + expect_equal(unique(result$target_geography), "county") +}) + +test_that("get_geocorr_crosswalk handles puma22 geography", { + skip_if_offline() + + result <- crosswalk:::get_geocorr_crosswalk( + source_geography = "puma22", + target_geography = "county", + weight = "population") + + expect_s3_class(result, "tbl_df") + expect_equal(unique(result$source_geography), "puma22") +}) + +test_that("get_geocorr_crosswalk handles congressional district geography", { + skip_if_offline() + + result <- crosswalk:::get_geocorr_crosswalk( + source_geography = "cd119", + target_geography = "county", + weight = "population") + + expect_s3_class(result, "tbl_df") + expect_equal(unique(result$source_geography), "cd119") +}) + +# ============================================================================== +# Allocation factor tests +# ============================================================================== + +test_that("get_geocorr_crosswalk allocation factors are valid", { + skip_if_offline() + + result <- crosswalk:::get_geocorr_crosswalk( + source_geography = "tract", + target_geography = "zcta", + weight = "population") + + # Allocation factors should be between 0 and 1 + expect_true(all(result$allocation_factor_source_to_target >= 0)) + expect_true(all(result$allocation_factor_source_to_target <= 1)) + + # Reverse allocation factors should also be between 0 and 1 + expect_true(all(result$allocation_factor_target_to_source >= 0)) + expect_true(all(result$allocation_factor_target_to_source <= 1)) +}) + +test_that("get_geocorr_crosswalk allocation factors sum to 1 per source", { + skip_if_offline() + + result <- crosswalk:::get_geocorr_crosswalk( + source_geography = "tract", + target_geography = "zcta", + weight = "population") + + # Check that allocation factors sum to approximately 1 for each source + allocation_sums <- result |> + dplyr::summarize( + total = sum(allocation_factor_source_to_target, na.rm = TRUE), + .by = "source_geoid") + + # Allow small floating point tolerance + expect_true(all(abs(allocation_sums$total - 1) < 0.01)) +}) + +# ============================================================================== +# Caching tests +# ============================================================================== + +test_that("get_geocorr_crosswalk caching works", { + skip_if_offline() + + cache_dir <- tempfile("crosswalk_cache_") + dir.create(cache_dir) + on.exit(unlink(cache_dir, recursive = TRUE)) + + # First call should fetch and cache + result1 <- crosswalk:::get_geocorr_crosswalk( + source_geography = "county", + target_geography = "zcta", + weight = "population", + cache = cache_dir) + + # Check cache file exists + cached_file <- file.path( + cache_dir, + "crosswalk_geocorr_2022_to_2022_county_to_zcta_weightedby_population.csv") + expect_true(file.exists(cached_file)) + + # Second call should read from cache + result2 <- crosswalk:::get_geocorr_crosswalk( + source_geography = "county", + target_geography = "zcta", + weight = "population", + cache = cache_dir) + + expect_equal(nrow(result1), nrow(result2)) +}) + +# ============================================================================== +# Metadata tests +# ============================================================================== + +test_that("get_geocorr_crosswalk attaches metadata", { + skip_if_offline() + + result <- crosswalk:::get_geocorr_crosswalk( + source_geography = "tract", + target_geography = "zcta", + weight = "population") + + metadata <- attr(result, "crosswalk_metadata") + + expect_type(metadata, "list") + expect_equal(metadata$data_source, "geocorr") + expect_equal(metadata$data_source_full_name, "Geocorr 2022 (Missouri Census Data Center)") + expect_equal(metadata$source_geography, "tract") + expect_equal(metadata$target_geography, "zcta") + expect_equal(metadata$weighting_variable, "population") + expect_equal(metadata$reference_year, "2022") + expect_true("api_endpoint" %in% names(metadata)) + expect_true("documentation_url" %in% names(metadata)) +}) + +test_that("get_geocorr_crosswalk cached metadata is marked as cached", { + skip_if_offline() + + cache_dir <- tempfile("crosswalk_cache_") + dir.create(cache_dir) + on.exit(unlink(cache_dir, recursive = TRUE)) + + # Fetch and cache + result1 <- crosswalk:::get_geocorr_crosswalk( + source_geography = "county", + target_geography = "puma22", + weight = "population", + cache = cache_dir) + + # Read from cache + result2 <- crosswalk:::get_geocorr_crosswalk( + source_geography = "county", + target_geography = "puma22", + weight = "population", + cache = cache_dir) + + metadata2 <- attr(result2, "crosswalk_metadata") + expect_true(metadata2$read_from_cache) +}) + +# ============================================================================== +# Block geography tests (special chunking) +# ============================================================================== + +test_that("get_geocorr_crosswalk handles block geography with chunking", { + skip_if_offline() + skip("Block-level crosswalks are slow; run manually if needed") + + # Block-level crosswalks require chunking (max 13 states per request) + result <- crosswalk:::get_geocorr_crosswalk( + source_geography = "block", + target_geography = "tract", + weight = "population") + + expect_s3_class(result, "tbl_df") + expect_equal(unique(result$source_geography), "block") + + # Block GEOIDs should be 15 characters + expect_true(all(stringr::str_length(result$source_geoid) == 15)) +}) + +# ============================================================================== +# Integration with get_crosswalk() tests +# ============================================================================== + +test_that("get_crosswalk routes same-year requests to Geocorr", { + skip_if_offline() + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "zcta", + weight = "population") + + metadata <- attr(result$crosswalks$step_1, "crosswalk_metadata") + expect_equal(metadata$data_source, "geocorr") +}) + +test_that("get_crosswalk routes to Geocorr when no years specified", { + skip_if_offline() + + result <- get_crosswalk( + source_geography = "county", + target_geography = "puma22", + weight = "population") + + metadata <- attr(result$crosswalks$step_1, "crosswalk_metadata") + expect_equal(metadata$data_source, "geocorr") +}) diff --git a/tests/testthat/test-get_nhgis_crosswalk.R b/tests/testthat/test-get_nhgis_crosswalk.R new file mode 100644 index 0000000..cadcd91 --- /dev/null +++ b/tests/testthat/test-get_nhgis_crosswalk.R @@ -0,0 +1,482 @@ +# Tests for get_nhgis_crosswalk() and list_nhgis_crosswalks() - NHGIS crosswalks + +# ============================================================================== +# list_nhgis_crosswalks() tests +# ============================================================================== + +test_that("list_nhgis_crosswalks returns expected structure", { + crosswalks <- list_nhgis_crosswalks() + + expect_s3_class(crosswalks, "tbl_df") + expect_true("source_year" %in% colnames(crosswalks)) + expect_true("source_geography" %in% colnames(crosswalks)) + expect_true("target_year" %in% colnames(crosswalks)) + expect_true("target_geography" %in% colnames(crosswalks)) + expect_true("crosswalk_path" %in% colnames(crosswalks)) +}) + +test_that("list_nhgis_crosswalks includes non-census target years", { + crosswalks <- list_nhgis_crosswalks() + + target_years <- unique(crosswalks$target_year) + + expect_true("2011" %in% target_years) + expect_true("2012" %in% target_years) + expect_true("2014" %in% target_years) + expect_true("2015" %in% target_years) + expect_true("2022" %in% target_years) +}) + +test_that("list_nhgis_crosswalks includes non-census SOURCE years", { + crosswalks <- list_nhgis_crosswalks() + + source_years <- unique(crosswalks$source_year) + + # Non-census years should be valid as source years + expect_true("2011" %in% source_years) + expect_true("2012" %in% source_years) + expect_true("2014" %in% source_years) + expect_true("2015" %in% source_years) + expect_true("2022" %in% source_years) +}) + +test_that("list_nhgis_crosswalks non-census years only have bg/tr/co targets", { + crosswalks <- list_nhgis_crosswalks() + + noncensus_years <- c("2011", "2012", "2014", "2015", "2022") + + noncensus_crosswalks <- crosswalks |> + dplyr::filter(target_year %in% noncensus_years) + + target_geogs <- unique(noncensus_crosswalks$target_geography) + + expect_true(all(target_geogs %in% c("block_group", "tract", "county"))) + expect_false("place" %in% target_geogs) + expect_false("zcta" %in% target_geogs) + expect_false("puma" %in% target_geogs) +}) + +test_that("list_nhgis_crosswalks non-census source years only have bg/tr sources", { + crosswalks <- list_nhgis_crosswalks() + + noncensus_years <- c("2011", "2012", "2014", "2015", "2022") + + noncensus_source_crosswalks <- crosswalks |> + dplyr::filter(source_year %in% noncensus_years) + + source_geogs <- unique(noncensus_source_crosswalks$source_geography) + + # Non-census source years only support bg and tr (not block) + expect_true(all(source_geogs %in% c("block_group", "tract"))) + expect_false("block" %in% source_geogs) +}) + +test_that("list_nhgis_crosswalks includes 2010 to 2022 crosswalks", { + crosswalks <- list_nhgis_crosswalks() + + crosswalks_2010_to_2022 <- crosswalks |> + dplyr::filter(source_year == "2010", target_year == "2022") + + expect_gt(nrow(crosswalks_2010_to_2022), 0) + + expect_true("block_group" %in% crosswalks_2010_to_2022$target_geography) + expect_true("tract" %in% crosswalks_2010_to_2022$target_geography) + expect_true("county" %in% crosswalks_2010_to_2022$target_geography) +}) + +test_that("list_nhgis_crosswalks includes bidirectional crosswalks", { + crosswalks <- list_nhgis_crosswalks() + + # 2014 to 2020 should exist + expect_gt(nrow(crosswalks |> + dplyr::filter(source_year == "2014", target_year == "2020")), 0) + + # 2022 to 2010 should exist + expect_gt(nrow(crosswalks |> + dplyr::filter(source_year == "2022", target_year == "2010")), 0) + + # 2011 to 2022 should exist (both non-census, different decades) + expect_gt(nrow(crosswalks |> + dplyr::filter(source_year == "2011", target_year == "2022")), 0) +}) + +test_that("list_nhgis_crosswalks does not include block group parts", { + crosswalks <- list_nhgis_crosswalks() + + expect_false(any(stringr::str_detect(crosswalks$source_geography, "part"))) + expect_false(any(stringr::str_detect(crosswalks$target_geography, "part"))) +}) + +# ============================================================================== +# standardize_geography() tests +# ============================================================================== + +test_that("standardize_geography handles various block spellings", { + expect_equal(crosswalk:::standardize_geography("block", "source"), "blk") + expect_equal(crosswalk:::standardize_geography("blocks", "source"), "blk") + expect_equal(crosswalk:::standardize_geography("blk", "source"), "blk") + expect_equal(crosswalk:::standardize_geography("census block", "source"), "blk") +}) + +test_that("standardize_geography handles various block group spellings", { + expect_equal(crosswalk:::standardize_geography("block_group", "source"), "bg") + expect_equal(crosswalk:::standardize_geography("block group", "source"), "bg") + expect_equal(crosswalk:::standardize_geography("blockgroup", "source"), "bg") + expect_equal(crosswalk:::standardize_geography("bg", "source"), "bg") +}) + +test_that("standardize_geography handles various tract spellings", { + expect_equal(crosswalk:::standardize_geography("tract", "source"), "tr") + expect_equal(crosswalk:::standardize_geography("tracts", "source"), "tr") + expect_equal(crosswalk:::standardize_geography("tr", "source"), "tr") + expect_equal(crosswalk:::standardize_geography("census tract", "source"), "tr") +}) + +test_that("standardize_geography validates source geographies", { + # Valid source geographies + expect_no_error(crosswalk:::standardize_geography("block", "source")) + expect_no_error(crosswalk:::standardize_geography("block_group", "source")) + expect_no_error(crosswalk:::standardize_geography("tract", "source")) + + # Invalid source geography (county is target-only for NHGIS) + expect_error(crosswalk:::standardize_geography("county", "source")) + expect_error(crosswalk:::standardize_geography("zcta", "source")) +}) + +test_that("standardize_geography validates target geographies", { + # Valid target geographies + expect_no_error(crosswalk:::standardize_geography("block", "target")) + expect_no_error(crosswalk:::standardize_geography("block_group", "target")) + expect_no_error(crosswalk:::standardize_geography("tract", "target")) + expect_no_error(crosswalk:::standardize_geography("county", "target")) + expect_no_error(crosswalk:::standardize_geography("zcta", "target")) + expect_no_error(crosswalk:::standardize_geography("puma", "target")) +}) + +# ============================================================================== +# get_nhgis_crosswalk() validation tests +# ============================================================================== + +test_that("get_nhgis_crosswalk validates non-census year geography restrictions", { + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + expect_error( + get_nhgis_crosswalk( + source_year = 2010, + source_geography = "block", + target_year = 2022, + target_geography = "zcta"), + regexp = "Non-census year crosswalks.*only available") + + expect_error( + get_nhgis_crosswalk( + source_year = 2010, + source_geography = "block", + target_year = 2011, + target_geography = "place"), + regexp = "NHGIS only provides cross-decade") +}) + +test_that("get_nhgis_crosswalk validates non-census SOURCE year geography restrictions", { + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + # Non-census source years only support bg, tr, co - not block + expect_error( + get_nhgis_crosswalk( + source_year = 2014, + source_geography = "block", + target_year = 2020, + target_geography = "tract"), + regexp = "Non-census year crosswalks.*only available") +}) + +test_that("get_nhgis_crosswalk rejects within-decade crosswalks", { + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + # 2010 to 2014 is within-decade (both 2010s) + expect_error( + get_nhgis_crosswalk( + source_year = 2010, + source_geography = "tract", + target_year = 2014, + target_geography = "tract"), + regexp = "cross-decade") + + # 2020 to 2022 is within-decade (both 2020s) + expect_error( + get_nhgis_crosswalk( + source_year = 2020, + source_geography = "tract", + target_year = 2022, + target_geography = "tract"), + regexp = "cross-decade") + + # 2011 to 2015 is within-decade (both 2010s) + expect_error( + get_nhgis_crosswalk( + source_year = 2011, + source_geography = "tract", + target_year = 2015, + target_geography = "tract"), + regexp = "cross-decade") +}) + +test_that("get_nhgis_crosswalk validates county target restrictions for 2011/2012 sources", { + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + # 2011 source year cannot target county + expect_error( + get_nhgis_crosswalk( + source_year = 2011, + source_geography = "block_group", + target_year = 2020, + target_geography = "county"), + regexp = "County crosswalks are not available from source years 2011 or 2012") + + # 2012 source year cannot target county + expect_error( + get_nhgis_crosswalk( + source_year = 2012, + source_geography = "tract", + target_year = 2020, + target_geography = "county"), + regexp = "County crosswalks are not available from source years 2011 or 2012") +}) + +test_that("get_nhgis_crosswalk validates 1990/2000 to county restrictions", { + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + # 1990 to county only supports 2010, 2014, 2015 targets (not 2011 or 2012) + expect_error( + get_nhgis_crosswalk( + source_year = 1990, + source_geography = "block", + target_year = 2011, + target_geography = "county"), + regexp = "years 2010, 2014, and 2015") + + expect_error( + get_nhgis_crosswalk( + source_year = 2000, + source_geography = "block", + target_year = 2012, + target_geography = "county"), + regexp = "years 2010, 2014, and 2015") +}) + +test_that("get_nhgis_crosswalk requires API key", { + # Temporarily unset API key + original_key <- Sys.getenv("IPUMS_API_KEY") + Sys.setenv(IPUMS_API_KEY = "") + on.exit(Sys.setenv(IPUMS_API_KEY = original_key)) + + expect_error( + get_nhgis_crosswalk( + source_year = 2010, + source_geography = "tract", + target_year = 2020, + target_geography = "tract"), + regexp = "API key required") +}) + +# ============================================================================== +# get_nhgis_crosswalk() successful requests +# ============================================================================== + +test_that("get_nhgis_crosswalk accepts valid non-census year requests", { + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + skip_if_offline() + + expect_no_error({ + result <- get_nhgis_crosswalk( + source_year = 2010, + source_geography = "tract", + target_year = 2022, + target_geography = "tract") + }) +}) + +test_that("get_nhgis_crosswalk accepts non-census SOURCE years", { + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + skip_if_offline() + + # 2014 (non-census) to 2020 (decennial) - cross-decade + expect_no_error({ + result <- get_nhgis_crosswalk( + source_year = 2014, + source_geography = "tract", + target_year = 2020, + target_geography = "tract") + }) + + # 2022 (non-census) to 2010 (decennial) - cross-decade + expect_no_error({ + result <- get_nhgis_crosswalk( + source_year = 2022, + source_geography = "block_group", + target_year = 2010, + target_geography = "block_group") + }) +}) + +test_that("get_nhgis_crosswalk returns correct structure", { + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + skip_if_offline() + + result <- get_nhgis_crosswalk( + source_year = 2010, + source_geography = "tract", + target_year = 2020, + target_geography = "tract") + + expect_s3_class(result, "tbl_df") + + expected_cols <- c( + "source_geoid", "target_geoid", + "source_geography_name", "target_geography_name", + "source_year", "target_year", + "weighting_factor", "allocation_factor_source_to_target") + + expect_true(all(expected_cols %in% colnames(result))) +}) + +test_that("get_nhgis_crosswalk returns valid allocation factors", { + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + skip_if_offline() + + result <- get_nhgis_crosswalk( + source_year = 2010, + source_geography = "tract", + target_year = 2020, + target_geography = "tract") + + # Allocation factors should be between 0 and 1 + expect_true(all(result$allocation_factor_source_to_target >= 0)) + expect_true(all(result$allocation_factor_source_to_target <= 1)) +}) + +# ============================================================================== +# Metadata tests +# ============================================================================== + +test_that("get_nhgis_crosswalk attaches metadata", { + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + skip_if_offline() + + result <- get_nhgis_crosswalk( + source_year = 2010, + source_geography = "tract", + target_year = 2020, + target_geography = "tract") + + metadata <- attr(result, "crosswalk_metadata") + + expect_type(metadata, "list") + expect_equal(metadata$data_source, "nhgis") + expect_equal(metadata$data_source_full_name, "IPUMS NHGIS (National Historical Geographic Information System)") + expect_equal(metadata$source_year, "2010") + expect_equal(metadata$target_year, "2020") + expect_true("download_url" %in% names(metadata)) + expect_true("citation_url" %in% names(metadata)) + expect_true("documentation_url" %in% names(metadata)) +}) + +# ============================================================================== +# Caching tests +# ============================================================================== + +test_that("get_nhgis_crosswalk caching works", { + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + skip_if_offline() + + cache_dir <- tempfile("crosswalk_cache_") + dir.create(cache_dir) + on.exit(unlink(cache_dir, recursive = TRUE)) + + # First call should fetch and cache + result1 <- get_nhgis_crosswalk( + source_year = 2010, + source_geography = "tract", + target_year = 2020, + target_geography = "tract", + cache = cache_dir) + + # Check cache file exists + cached_files <- list.files(cache_dir, pattern = "nhgis") + expect_gt(length(cached_files), 0) + + # Second call should read from cache + result2 <- get_nhgis_crosswalk( + source_year = 2010, + source_geography = "tract", + target_year = 2020, + target_geography = "tract", + cache = cache_dir) + + expect_equal(nrow(result1), nrow(result2)) + + metadata2 <- attr(result2, "crosswalk_metadata") + expect_true(metadata2$read_from_cache) +}) + +# ============================================================================== +# Integration with get_crosswalk() tests +# ============================================================================== + +test_that("get_crosswalk routes to NHGIS for inter-temporal requests", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "tract", + source_year = 2010, + target_year = 2020) + + metadata <- attr(result$crosswalks$step_1, "crosswalk_metadata") + expect_equal(metadata$data_source, "nhgis") +}) + +test_that("get_crosswalk routes to NHGIS for non-census year requests", { + skip_if_offline() + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + + result <- get_crosswalk( + source_geography = "tract", + target_geography = "tract", + source_year = 2010, + target_year = 2022) + + metadata <- attr(result$crosswalks$step_1, "crosswalk_metadata") + expect_equal(metadata$data_source, "nhgis") +}) + +# ============================================================================== +# Slow integration tests (optional) +# ============================================================================== + +test_that("get_nhgis_crosswalk works for sample of crosswalks", { + skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") + skip_if(Sys.getenv("CROSSWALK_RUN_SLOW_TESTS") != "true", "Slow tests not enabled") + skip_if_offline() + + crosswalks <- list_nhgis_crosswalks() + + # Sample 5 random crosswalks to test + set.seed(42) + sample_crosswalks <- crosswalks |> + dplyr::slice_sample(n = 5) + + for (i in seq_len(nrow(sample_crosswalks))) { + cw <- sample_crosswalks[i, ] + + result <- get_nhgis_crosswalk( + source_year = cw$source_year, + source_geography = cw$source_geography, + target_year = cw$target_year, + target_geography = cw$target_geography) + + expect_s3_class(result, "tbl_df") + expect_gt(nrow(result), 0) + } +}) diff --git a/tests/testthat/test-nhgis-crosswalk.R b/tests/testthat/test-nhgis-crosswalk.R deleted file mode 100644 index d597ae5..0000000 --- a/tests/testthat/test-nhgis-crosswalk.R +++ /dev/null @@ -1,217 +0,0 @@ -# Test comprehensive NHGIS crosswalk coverage -# These tests verify that the package can successfully retrieve a random sample -# of crosswalks from the NHGIS API. - -test_that("list_nhgis_crosswalks returns expected structure", { - crosswalks <- list_nhgis_crosswalks() - - expect_s3_class(crosswalks, "tbl_df") - expect_true(all(c("source_geography", "source_year", "target_geography", - "target_year", "crosswalk_path") %in% names(crosswalks))) - expect_true(nrow(crosswalks) > 100) # Should have 140+ crosswalks -}) - -test_that("list_nhgis_crosswalks includes all expected crosswalk categories", { - crosswalks <- list_nhgis_crosswalks() - - # Block-to-block crosswalks exist - blk_to_blk <- crosswalks |> - dplyr::filter(source_geography == "block", target_geography == "block") - expect_equal(nrow(blk_to_blk), 4) - - # BG-to-BG bidirectional crosswalks exist - bg_to_bg <- crosswalks |> - dplyr::filter(source_geography == "block_group", target_geography == "block_group") - expect_equal(nrow(bg_to_bg), 20) - - # Tract-to-tract bidirectional crosswalks exist - tr_to_tr <- crosswalks |> - dplyr::filter(source_geography == "tract", target_geography == "tract") - expect_equal(nrow(tr_to_tr), 30) - - # Block to other geographies exist (cbsa, pl, puma, ua, zcta) - blk_to_other <- crosswalks |> - dplyr::filter(source_geography == "block", - target_geography %in% c("core_based_statistical_area", "place", - "puma", "urban_area", "zcta")) - expect_equal(nrow(blk_to_other), 20) -}) - -test_that("list_nhgis_crosswalks includes non-decadal source years", { - crosswalks <- list_nhgis_crosswalks() - source_years <- unique(crosswalks$source_year) - - expect_true("2011" %in% source_years) - expect_true("2012" %in% source_years) - expect_true("2014" %in% source_years) - expect_true("2015" %in% source_years) - expect_true("2022" %in% source_years) -}) - -test_that("list_nhgis_crosswalks includes non-census target years", { - crosswalks <- list_nhgis_crosswalks() - target_years <- unique(crosswalks$target_year) - - expect_true("2011" %in% target_years) - expect_true("2012" %in% target_years) - expect_true("2014" %in% target_years) - expect_true("2015" %in% target_years) - expect_true("2022" %in% target_years) -}) - -test_that("county crosswalks correctly exclude 2011/2012 source years", { - crosswalks <- list_nhgis_crosswalks() - - # 2011 and 2012 should NOT have county targets - co_from_2011 <- crosswalks |> - dplyr::filter(source_year == "2011", target_geography == "county") - expect_equal(nrow(co_from_2011), 0) - - co_from_2012 <- crosswalks |> - dplyr::filter(source_year == "2012", target_geography == "county") - expect_equal(nrow(co_from_2012), 0) - - # But 2014, 2015, 2022 SHOULD have county targets - co_from_2014 <- crosswalks |> - dplyr::filter(source_year == "2014", target_geography == "county") - expect_true(nrow(co_from_2014) > 0) - - co_from_2022 <- crosswalks |> - dplyr::filter(source_year == "2022", target_geography == "county") - expect_true(nrow(co_from_2022) > 0) -}) - -test_that("1990/2000 to county crosswalks only include 2010, 2014, 2015 targets", { - crosswalks <- list_nhgis_crosswalks() - - # From 1990 to county - co_from_1990 <- crosswalks |> - dplyr::filter(source_year == "1990", target_geography == "county") - expect_true(all(co_from_1990$target_year %in% c("2010", "2014", "2015"))) - expect_false(any(co_from_1990$target_year %in% c("2011", "2012"))) - - # From 2000 to county - co_from_2000 <- crosswalks |> - dplyr::filter(source_year == "2000", target_geography == "county") - expect_true(all(co_from_2000$target_year %in% c("2010", "2014", "2015"))) - expect_false(any(co_from_2000$target_year %in% c("2011", "2012"))) -}) - -test_that("get_nhgis_crosswalk rejects 2011/2012 source years to county", { - skip_if_not(Sys.getenv("IPUMS_API_KEY") != "", "IPUMS_API_KEY not set") - - expect_error( - get_nhgis_crosswalk( - source_year = 2011, - source_geography = "block group", - target_year = 2020, - target_geography = "county"), - regexp = "County crosswalks are not available from source years 2011 or 2012") - - expect_error( - get_nhgis_crosswalk( - source_year = 2012, - source_geography = "tract", - target_year = 2020, - target_geography = "county"), - regexp = "County crosswalks are not available from source years 2011 or 2012") -}) - -test_that("get_nhgis_crosswalk rejects 1990/2000 to county 2011/2012", { - skip_if_not(Sys.getenv("IPUMS_API_KEY") != "", "IPUMS_API_KEY not set") - - expect_error( - get_nhgis_crosswalk( - source_year = 1990, - source_geography = "block", - target_year = 2011, - target_geography = "county"), - regexp = "years 2010, 2014, and 2015") - - expect_error( - get_nhgis_crosswalk( - source_year = 2000, - source_geography = "block", - target_year = 2012, - target_geography = "county"), - regexp = "years 2010, 2014, and 2015") -}) - - -# Integration tests that actually query the NHGIS API -# These are slow tests that should only run when IPUMS_API_KEY is available -# and when explicitly requested via an environment variable - -test_that("random sample of 20 NHGIS crosswalks can be retrieved", { - skip_if_not(Sys.getenv("IPUMS_API_KEY") != "", "IPUMS_API_KEY not set") - skip_if_not( - Sys.getenv("CROSSWALK_RUN_SLOW_TESTS") == "true", - "Set CROSSWALK_RUN_SLOW_TESTS=true to run slow integration tests") - - # Set seed for reproducibility - set.seed(12345) - - # Get all available crosswalks and select a sample of 20 - sample_crosswalks <- list_nhgis_crosswalks() |> - dplyr::slice_sample(n = 20) - - test_results = purrr::pmap( - sample_crosswalks |> dplyr::select(-crosswalk_path), - get_nhgis_crosswalk) - - valid_queries = purrr::map( - test_results, - function(crosswalk) { - metadata = attr(crosswalk, "crosswalk_metadata") - - ## if this is null, the query failed - if (is.null(metadata$retrieved_at)) 0 else 1 - }) |> - purrr::reduce(sum) - - expect_equal(valid_queries, 20) - -}) - -test_that("specific crosswalk types work correctly", { - skip_if_not(Sys.getenv("IPUMS_API_KEY") != "", "IPUMS_API_KEY not set") - skip_if_not( - Sys.getenv("CROSSWALK_RUN_SLOW_TESTS") == "true", - "Set CROSSWALK_RUN_SLOW_TESTS=true to run slow integration tests") - - # Test block-to-block (decennial) - blk_blk <- get_nhgis_crosswalk( - source_year = 2010, - source_geography = "block", - target_year = 2020, - target_geography = "block") - expect_s3_class(blk_blk, "tbl_df") - expect_true(nrow(blk_blk) > 0) - - # Test bg-to-bg with non-census source year - bg_bg_noncensus <- get_nhgis_crosswalk( - source_year = 2014, - source_geography = "block group", - target_year = 2020, - target_geography = "block group") - expect_s3_class(bg_bg_noncensus, "tbl_df") - expect_true(nrow(bg_bg_noncensus) > 0) - - # Test tract-to-tract backwards (2020s -> 2010s) - tr_tr_backwards <- get_nhgis_crosswalk( - source_year = 2022, - source_geography = "tract", - target_year = 2014, - target_geography = "tract") - expect_s3_class(tr_tr_backwards, "tbl_df") - expect_true(nrow(tr_tr_backwards) > 0) - - # Test block to ZCTA (decennial only) - blk_zcta <- get_nhgis_crosswalk( - source_year = 2010, - source_geography = "block", - target_year = 2020, - target_geography = "zcta") - expect_s3_class(blk_zcta, "tbl_df") - expect_true(nrow(blk_zcta) > 0) -}) diff --git a/tests/testthat/test-noncensus-crosswalks.R b/tests/testthat/test-noncensus-crosswalks.R deleted file mode 100644 index 7c9e341..0000000 --- a/tests/testthat/test-noncensus-crosswalks.R +++ /dev/null @@ -1,490 +0,0 @@ -# Tests for non-census year crosswalk functionality - -# ============================================================================== -# list_nhgis_crosswalks() tests -# ============================================================================== - -test_that("list_nhgis_crosswalks includes non-census target years", { - crosswalks <- list_nhgis_crosswalks() - - expect_s3_class(crosswalks, "tbl_df") - expect_true("target_year" %in% colnames(crosswalks)) - - target_years <- unique(crosswalks$target_year) - - expect_true("2011" %in% target_years) - expect_true("2012" %in% target_years) - expect_true("2014" %in% target_years) - expect_true("2015" %in% target_years) - expect_true("2022" %in% target_years) -}) - -test_that("list_nhgis_crosswalks non-census years only have bg/tr/co targets", { - crosswalks <- list_nhgis_crosswalks() - - noncensus_years <- c("2011", "2012", "2014", "2015", "2022") - - noncensus_crosswalks <- crosswalks |> - dplyr::filter(target_year %in% noncensus_years) - - target_geogs <- unique(noncensus_crosswalks$target_geography) - - expect_true(all(target_geogs %in% c("block_group", "tract", "county"))) - expect_false("place" %in% target_geogs) - expect_false("zcta" %in% target_geogs) - expect_false("puma" %in% target_geogs) -}) - -test_that("list_nhgis_crosswalks includes 2010 to 2022 crosswalks", { - crosswalks <- list_nhgis_crosswalks() - - crosswalks_2010_to_2022 <- crosswalks |> - dplyr::filter(source_year == "2010", target_year == "2022") - - expect_gt(nrow(crosswalks_2010_to_2022), 0) - - expect_true("block_group" %in% crosswalks_2010_to_2022$target_geography) - expect_true("tract" %in% crosswalks_2010_to_2022$target_geography) - expect_true("county" %in% crosswalks_2010_to_2022$target_geography) -}) - -test_that("list_nhgis_crosswalks includes non-census SOURCE years", { - crosswalks <- list_nhgis_crosswalks() - - source_years <- unique(crosswalks$source_year) - - # Non-census years should be valid as source years - expect_true("2011" %in% source_years) - expect_true("2012" %in% source_years) - expect_true("2014" %in% source_years) - expect_true("2015" %in% source_years) - expect_true("2022" %in% source_years) -}) - -test_that("list_nhgis_crosswalks non-census source years only have bg/tr sources", { - crosswalks <- list_nhgis_crosswalks() - - noncensus_years <- c("2011", "2012", "2014", "2015", "2022") - - noncensus_source_crosswalks <- crosswalks |> - dplyr::filter(source_year %in% noncensus_years) - - source_geogs <- unique(noncensus_source_crosswalks$source_geography) - - # Non-census source years only support bg and tr (not block) - expect_true(all(source_geogs %in% c("block_group", "tract"))) - expect_false("block" %in% source_geogs) -}) - -test_that("list_nhgis_crosswalks includes bidirectional crosswalks", { - crosswalks <- list_nhgis_crosswalks() - - # 2014 to 2020 should exist - expect_gt(nrow(crosswalks |> - dplyr::filter(source_year == "2014", target_year == "2020")), 0) - - # 2022 to 2010 should exist - expect_gt(nrow(crosswalks |> - dplyr::filter(source_year == "2022", target_year == "2010")), 0) - - # 2011 to 2022 should exist (both non-census, different decades) - expect_gt(nrow(crosswalks |> - dplyr::filter(source_year == "2011", target_year == "2022")), 0) -}) - -# ============================================================================== -# get_ctdata_crosswalk() tests -# ============================================================================== - -test_that("get_ctdata_crosswalk returns correct structure for tracts", { - skip_if_offline() - - result <- get_ctdata_crosswalk(geography = "tract") - - expect_s3_class(result, "tbl_df") - - expected_cols <- c( - "source_geoid", "target_geoid", - "source_geography_name", "target_geography_name", - "source_year", "target_year", - "allocation_factor_source_to_target", - "weighting_factor", "state_fips") - - expect_true(all(expected_cols %in% colnames(result))) -}) -test_that("get_ctdata_crosswalk tract data has correct values", { - skip_if_offline() - - result <- get_ctdata_crosswalk(geography = "tract") - - expect_equal(unique(result$source_year), "2020") - expect_equal(unique(result$target_year), "2022") - expect_equal(unique(result$state_fips), "09") - expect_equal(unique(result$weighting_factor), "identity") - expect_true(all(result$allocation_factor_source_to_target == 1)) - - expect_equal(unique(result$source_geography_name), "tract") - expect_equal(unique(result$target_geography_name), "tract") -}) - -test_that("get_ctdata_crosswalk returns 879 CT tracts", -{ - skip_if_offline() - - result <- get_ctdata_crosswalk(geography = "tract") - - expect_equal(nrow(result), 879) -}) - -test_that("get_ctdata_crosswalk handles block_group geography", { - skip_if_offline() - - result <- get_ctdata_crosswalk(geography = "block_group") - - expect_s3_class(result, "tbl_df") - expect_equal(unique(result$source_geography_name), "block_group") - expect_equal(unique(result$target_geography_name), "block_group") - - expect_true(all(stringr::str_length(result$source_geoid) == 12)) - expect_true(all(stringr::str_length(result$target_geoid) == 12)) -}) - -test_that("get_ctdata_crosswalk handles county geography", { - skip_if_offline() - skip_if_not_installed("tidycensus") - skip_if(Sys.getenv("CENSUS_API_KEY") == "", "CENSUS_API_KEY not set") - - result <- get_ctdata_crosswalk(geography = "county") - - expect_s3_class(result, "tbl_df") - expect_equal(unique(result$source_geography_name), "county") - - n_source_counties <- length(unique(result$source_geoid)) - n_target_regions <- length(unique(result$target_geoid)) - expect_equal(n_source_counties, 8) - expect_equal(n_target_regions, 9) - - expect_equal(unique(result$weighting_factor), "population") - - allocation_sums <- result |> - dplyr::summarize( - total = sum(allocation_factor_source_to_target), - .by = "source_geoid") - expect_true(all(abs(allocation_sums$total - 1) < 0.001)) -}) - -test_that("get_ctdata_crosswalk errors on unsupported geography", { - expect_error( - get_ctdata_crosswalk(geography = "zcta"), - regexp = "not supported") - - expect_error( - get_ctdata_crosswalk(geography = "place"), - regexp = "not supported") -}) - - -test_that("get_ctdata_crosswalk accepts various geography spellings", { - skip_if_offline() - - result1 <- get_ctdata_crosswalk(geography = "tract") - result2 <- get_ctdata_crosswalk(geography = "tracts") - result3 <- get_ctdata_crosswalk(geography = "tr") - - expect_equal(nrow(result1), nrow(result2)) - expect_equal(nrow(result1), nrow(result3)) -}) - -test_that("get_ctdata_crosswalk caching works", { - skip_if_offline() - - cache_dir <- tempfile("crosswalk_cache_") - dir.create(cache_dir) - on.exit(unlink(cache_dir, recursive = TRUE)) - - result1 <- get_ctdata_crosswalk(geography = "tract", cache = cache_dir) - - cached_file <- file.path(cache_dir, "crosswalk_ctdata_2020_to_2022_tract.csv") - expect_true(file.exists(cached_file)) - - result2 <- get_ctdata_crosswalk(geography = "tract", cache = cache_dir) - expect_equal(result1, result2) -}) - -# ============================================================================== -# get_crosswalk() routing tests -# ============================================================================== - -test_that("get_crosswalk routes 2020-2022 to CTData", { - skip_if_offline() - - result <- get_crosswalk( - source_geography = "tract", - target_geography = "tract", - source_year = 2020, - target_year = 2022) - - expect_s3_class(result, "tbl_df") - - metadata <- attr(result, "crosswalk_metadata") - expect_equal(metadata$data_source, "ctdata") -}) - -test_that("get_crosswalk attaches comprehensive metadata attribute", { - skip_if_offline() - - result <- get_crosswalk( - source_geography = "tract", - target_geography = "tract", - source_year = 2020, - target_year = 2022) - - metadata <- attr(result, "crosswalk_metadata") - - expect_type(metadata, "list") - - # Check for key metadata fields - expect_true("call_parameters" %in% names(metadata)) - expect_true("data_source" %in% names(metadata)) - expect_true("data_source_full_name" %in% names(metadata)) - expect_true("source_geography" %in% names(metadata)) - expect_true("target_geography" %in% names(metadata)) - expect_true("source_year" %in% names(metadata)) - expect_true("target_year" %in% names(metadata)) - expect_true("crosswalk_package_version" %in% names(metadata)) - - # Call parameters should be a nested list - expect_type(metadata$call_parameters, "list") - expect_equal(metadata$call_parameters$source_geography, "tract") - expect_equal(metadata$call_parameters$target_geography, "tract") - expect_equal(metadata$call_parameters$source_year, "2020") - expect_equal(metadata$call_parameters$target_year, "2022") -}) - -test_that("CTData crosswalk metadata includes download URL", { - skip_if_offline() - - result <- get_crosswalk( - source_geography = "tract", - target_geography = "tract", - source_year = 2020, - target_year = 2022) - - metadata <- attr(result, "crosswalk_metadata") - - expect_equal(metadata$data_source, "ctdata") - expect_true(stringr::str_detect( - metadata$download_url, - "github.com/CT-Data-Collaborative")) - expect_true("github_repository" %in% names(metadata)) -}) - -test_that("get_crosswalk 2020-2022 metadata contains correct info", { - skip_if_offline() - - result <- get_crosswalk( - source_geography = "tract", - target_geography = "tract", - source_year = 2020, - target_year = 2022) - - metadata <- attr(result, "crosswalk_metadata") - - expect_equal(metadata$source_year, "2020") - expect_equal(metadata$target_year, "2022") - expect_equal(metadata$data_source, "ctdata") - expect_equal(metadata$data_source_full_name, "CT Data Collaborative") - expect_true(length(metadata$notes) > 0) -}) - -test_that("get_crosswalk 2020-2022 only returns Connecticut data", { - skip_if_offline() - - result <- get_crosswalk( - source_geography = "tract", - target_geography = "tract", - source_year = 2020, - target_year = 2022) - - state_fips <- unique(result$state_fips) - expect_equal(state_fips, "09") -}) - -test_that("get_crosswalk 2020-2022 errors on unsupported geography", { - expect_error( - get_crosswalk( - source_geography = "zcta", - target_geography = "zcta", - source_year = 2020, - target_year = 2022), - regexp = "not supported") -}) - -# ============================================================================== -# get_crosswalk_2020_2022() tests -# ============================================================================== - -test_that("get_crosswalk_2020_2022 returns CT crosswalk with attributes", { - skip_if_offline() - - result <- crosswalk:::get_crosswalk_2020_2022(geography = "tract") - - expect_s3_class(result, "tbl_df") - - sources_attr <- attr(result, "crosswalk_sources") - expect_type(sources_attr, "list") - expect_equal(sources_attr$connecticut, "ctdata") - expect_equal(sources_attr$other_states, "identity_mapping") - - note_attr <- attr(result, "identity_states_note") - expect_type(note_attr, "character") -}) - -test_that("get_crosswalk_2020_2022 errors on invalid geography", { - expect_error( - crosswalk:::get_crosswalk_2020_2022(geography = "puma"), - regexp = "not supported") -}) - -# ============================================================================== -# get_nhgis_crosswalk() validation tests -# ============================================================================== - -test_that("get_nhgis_crosswalk validates non-census year geography restrictions", { - skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") - - expect_error( - get_nhgis_crosswalk( - source_year = 2010, - source_geography = "block", - target_year = 2022, - target_geography = "zcta"), - regexp = "Non-census year crosswalks.*only available") - - expect_error( - get_nhgis_crosswalk( - source_year = 2010, - source_geography = "block", - target_year = 2011, - target_geography = "place"), - regexp = "Non-census year crosswalks.*only available") -}) - -test_that("get_nhgis_crosswalk validates non-census SOURCE year geography restrictions", { - skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") - - # Non-census source years only support bg, tr, co - not block - expect_error( - get_nhgis_crosswalk( - source_year = 2014, - source_geography = "block", - target_year = 2020, - target_geography = "tract"), - regexp = "Non-census year crosswalks.*only available") -}) - -test_that("get_nhgis_crosswalk rejects within-decade crosswalks", { - skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") - - # 2010 to 2014 is within-decade (both 2010s) - expect_error( - get_nhgis_crosswalk( - source_year = 2010, - source_geography = "tract", - target_year = 2014, - target_geography = "tract"), - regexp = "cross-decade") - - # 2020 to 2022 is within-decade (both 2020s) - expect_error( - get_nhgis_crosswalk( - source_year = 2020, - source_geography = "tract", - target_year = 2022, - target_geography = "tract"), - regexp = "cross-decade") - - # 2011 to 2015 is within-decade (both 2010s) - expect_error( - get_nhgis_crosswalk( - source_year = 2011, - source_geography = "tract", - target_year = 2015, - target_geography = "tract"), - regexp = "cross-decade") -}) - -test_that("get_nhgis_crosswalk accepts valid non-census year requests", { - skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") - skip_if_offline() - - expect_no_error({ - result <- get_nhgis_crosswalk( - source_year = 2010, - source_geography = "block", - target_year = 2022, - target_geography = "tract") - }) -}) - -test_that("get_nhgis_crosswalk accepts non-census SOURCE years", { - skip_if(Sys.getenv("IPUMS_API_KEY") == "", "IPUMS_API_KEY not set") - skip_if_offline() - - # 2014 (non-census) to 2020 (decennial) - cross-decade - expect_no_error({ - result <- get_nhgis_crosswalk( - source_year = 2014, - source_geography = "tract", - target_year = 2020, - target_geography = "tract") - }) - - # 2022 (non-census) to 2010 (decennial) - cross-decade - expect_no_error({ - result <- get_nhgis_crosswalk( - source_year = 2022, - source_geography = "block_group", - target_year = 2010, - target_geography = "block_group") - }) -}) - -# ============================================================================== -# Integration tests -# ============================================================================== - -test_that("CT tract GEOIDs have correct format changes", { - skip_if_offline() - - result <- get_ctdata_crosswalk(geography = "tract") - - expect_true(all(stringr::str_starts(result$source_geoid, "09"))) - expect_true(all(stringr::str_starts(result$target_geoid, "09"))) - - expect_true(all(stringr::str_length(result$source_geoid) == 11)) - expect_true(all(stringr::str_length(result$target_geoid) == 11)) - - source_counties <- stringr::str_sub(result$source_geoid, 3, 5) - target_counties <- stringr::str_sub(result$target_geoid, 3, 5) - expect_false(all(source_counties == target_counties)) -}) - -test_that("CT county crosswalk maps 8 old counties to 9 planning regions", { - skip_if_offline() - skip_if_not_installed("tidycensus") - skip_if(Sys.getenv("CENSUS_API_KEY") == "", "CENSUS_API_KEY not set") - - result <- get_ctdata_crosswalk(geography = "county") - - n_source_counties <- length(unique(result$source_geoid)) - n_target_regions <- length(unique(result$target_geoid)) - - expect_equal(n_source_counties, 8) - expect_equal(n_target_regions, 9) - - expect_gt(nrow(result), 8) - - expect_true(all(result$allocation_factor_source_to_target > 0)) - expect_true(all(result$allocation_factor_source_to_target <= 1)) -}) diff --git a/tests/testthat/test-plan_crosswalk_chain.R b/tests/testthat/test-plan_crosswalk_chain.R new file mode 100644 index 0000000..bd37ae8 --- /dev/null +++ b/tests/testthat/test-plan_crosswalk_chain.R @@ -0,0 +1,216 @@ +# Tests for plan_crosswalk_chain() - crosswalk chain planning + +# ============================================================================== +# Basic structure tests +# ============================================================================== +test_that("plan_crosswalk_chain returns expected structure", { + plan <- plan_crosswalk_chain( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020) + + expect_type(plan, "list") + expect_true("is_multi_step" %in% names(plan)) + expect_true("steps" %in% names(plan)) + expect_true("intermediate_geography" %in% names(plan)) + expect_true("intermediate_year" %in% names(plan)) + expect_true("composition_note" %in% names(plan)) + expect_true("error" %in% names(plan)) +}) + +# ============================================================================== +# Single-step detection tests +# ============================================================================== + +test_that("plan_crosswalk_chain detects same geography/year as no crosswalk", { + plan <- plan_crosswalk_chain( + source_geography = "tract", + target_geography = "tract") + + expect_false(plan$is_multi_step) + expect_equal(plan$steps$crosswalk_source[1], "none") +}) + +test_that("plan_crosswalk_chain detects same-year geography change as single-step", { + plan <- plan_crosswalk_chain( + source_geography = "tract", + target_geography = "zcta") + + expect_false(plan$is_multi_step) + expect_equal(nrow(plan$steps), 1) + expect_equal(plan$steps$crosswalk_source[1], "geocorr") +}) + +test_that("plan_crosswalk_chain detects same-geography year change as single-step", { + plan <- plan_crosswalk_chain( + source_geography = "tract", + target_geography = "tract", + source_year = 2010, + target_year = 2020) + + expect_false(plan$is_multi_step) + expect_equal(nrow(plan$steps), 1) + expect_equal(plan$steps$crosswalk_source[1], "nhgis") +}) + +test_that("plan_crosswalk_chain detects 2020-2022 as CTData", { + plan <- plan_crosswalk_chain( + source_geography = "tract", + target_geography = "tract", + source_year = 2020, + target_year = 2022) + + expect_false(plan$is_multi_step) + expect_equal(plan$steps$crosswalk_source[1], "ctdata_2020_2022") +}) + +# ============================================================================== +# Multi-step detection tests +# ============================================================================== + +test_that("plan_crosswalk_chain detects geography + year change as multi-step", { + plan <- plan_crosswalk_chain( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020) + + expect_true(plan$is_multi_step) + expect_equal(nrow(plan$steps), 2) + expect_equal(plan$intermediate_geography, "tract") + expect_equal(plan$intermediate_year, "2020") +}) + +test_that("plan_crosswalk_chain multi-step has correct step order", { + plan <- plan_crosswalk_chain( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020) + + # Step 1: year change (NHGIS) + expect_equal(plan$steps$source_geography[1], "tract") + expect_equal(plan$steps$target_geography[1], "tract") + expect_equal(plan$steps$source_year[1], "2010") + expect_equal(plan$steps$target_year[1], "2020") + expect_equal(plan$steps$crosswalk_source[1], "nhgis") + + # Step 2: geography change (Geocorr) + expect_equal(plan$steps$source_geography[2], "tract") + expect_equal(plan$steps$target_geography[2], "zcta") + expect_equal(plan$steps$source_year[2], "2020") + expect_equal(plan$steps$target_year[2], "2020") + expect_equal(plan$steps$crosswalk_source[2], "geocorr") +}) + +test_that("plan_crosswalk_chain multi-step works for block_group", { + plan <- plan_crosswalk_chain( + source_geography = "block_group", + target_geography = "puma", + source_year = 2010, + target_year = 2020) + + expect_true(plan$is_multi_step) + expect_equal(plan$intermediate_geography, "block_group") +}) + +# ============================================================================== +# Error handling tests +# ============================================================================== + +test_that("plan_crosswalk_chain errors for unsupported source geography in multi-step", { + plan <- plan_crosswalk_chain( + source_geography = "zcta", + target_geography = "puma", + source_year = 2010, + target_year = 2020) + + expect_false(is.null(plan$error)) + expect_true(stringr::str_detect(plan$error, "NHGIS does not support")) +}) + +test_that("plan_crosswalk_chain errors for puma as source in multi-step", { + plan <- plan_crosswalk_chain( + source_geography = "puma", + target_geography = "zcta", + source_year = 2010, + target_year = 2020) + + expect_false(is.null(plan$error)) +}) + +# ============================================================================== +# Helper function tests +# ============================================================================== + +test_that("standardize_geography_for_chain handles various spellings", { + expect_equal(crosswalk:::standardize_geography_for_chain("tract"), "tract") + expect_equal(crosswalk:::standardize_geography_for_chain("tracts"), "tract") + expect_equal(crosswalk:::standardize_geography_for_chain("tr"), "tract") + expect_equal(crosswalk:::standardize_geography_for_chain("block_group"), "block_group") + expect_equal(crosswalk:::standardize_geography_for_chain("block group"), "block_group") + expect_equal(crosswalk:::standardize_geography_for_chain("bg"), "block_group") + expect_equal(crosswalk:::standardize_geography_for_chain("zcta"), "zcta") + expect_equal(crosswalk:::standardize_geography_for_chain("puma"), "puma") +}) + +test_that("determine_temporal_source returns correct source", { + expect_equal(crosswalk:::determine_temporal_source("2010", "2020"), "nhgis") + expect_equal(crosswalk:::determine_temporal_source("2020", "2022"), "ctdata_2020_2022") + expect_equal(crosswalk:::determine_temporal_source("2022", "2020"), "ctdata_2020_2022") + expect_equal(crosswalk:::determine_temporal_source("1990", "2010"), "nhgis") +}) + +test_that("format_chain_plan_message produces readable output", { + plan <- plan_crosswalk_chain( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020) + + message <- crosswalk:::format_chain_plan_message(plan) + + expect_type(message, "character") + expect_true(stringr::str_detect(message, "Multi-step")) + expect_true(stringr::str_detect(message, "Step 1")) + expect_true(stringr::str_detect(message, "Step 2")) +}) + +test_that("format_chain_plan_message handles single-step", { + plan <- plan_crosswalk_chain( + source_geography = "tract", + target_geography = "zcta") + + message <- crosswalk:::format_chain_plan_message(plan) + + expect_type(message, "character") + expect_true(stringr::str_detect(message, "Single-step")) +}) + +test_that("format_chain_plan_message handles errors", { + plan <- plan_crosswalk_chain( + source_geography = "zcta", + target_geography = "puma", + source_year = 2010, + target_year = 2020) + + message <- crosswalk:::format_chain_plan_message(plan) + + expect_true(stringr::str_detect(message, "Error")) +}) + +# ============================================================================== +# Composition note tests +# ============================================================================== + +test_that("plan_crosswalk_chain includes composition note for multi-step", { + plan <- plan_crosswalk_chain( + source_geography = "tract", + target_geography = "zcta", + source_year = 2010, + target_year = 2020) + + expect_true(stringr::str_detect(plan$composition_note, "step1_allocation")) + expect_true(stringr::str_detect(plan$composition_note, "step2_allocation")) +})