diff --git a/CHANGELOG.md b/CHANGELOG.md index 0aa6c0ee..f95bae31 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog +## 4.3.0 + +- [[#373](https://github.com/IronCoreLabs/ironoxide/pull/373)] Add managed and unmanaged file encrypt/decrypt, which internally stream the bytes for constant memory usage. Note the security disclaimer on file decrypt if you intend to use these APIs. + ## 4.2.1 - [[#372](https://github.com/IronCoreLabs/ironoxide/pull/372)] Hotfix serialized public key cache so that offline initialization works. This fix does require cache regeneration. diff --git a/Cargo.toml b/Cargo.toml index f97739d0..3efc3807 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ironoxide" -version = "4.2.1" +version = "4.3.0" authors = ["IronCore Labs "] readme = "README.md" license = "AGPL-3.0-only" @@ -35,6 +35,7 @@ base64 = "0.22" base64-serde = "0.8" bytes = "1" futures = "0.3.1" +ghash = "0.5" # Incremental GHASH computation for streaming AES-GCM hex = "0.4" ironcore-search-helpers = { version = "0.2", optional = true } itertools = "0.14" @@ -68,6 +69,8 @@ criterion = "0.8" double = "0.2.4" galvanic-assert = "0.8" mut_static = "5" +proptest = "1.6" +tempfile = "3" tokio = { version = "1", features = ["macros", "rt-multi-thread"] } uuid = { version = "1.0", features = ["v4"], default-features = false } diff --git a/flake.nix b/flake.nix index b18f1d54..d08b81cc 100644 --- a/flake.nix +++ b/flake.nix @@ -7,19 +7,22 @@ flake-utils.url = "github:numtide/flake-utils"; }; - outputs = { self, nixpkgs, rust-overlay, flake-utils, ... }: - flake-utils.lib.eachDefaultSystem (system: - let - overlays = [ (import rust-overlay) ]; - pkgs = import nixpkgs { inherit system overlays; }; - rusttoolchain = - pkgs.rust-bin.fromRustupToolchainFile ./rust-toolchain.toml; - in rec { - # nix develop - devShell = pkgs.mkShell { - buildInputs = with pkgs; - [ rusttoolchain pkg-config openssl ]; - }; - - }); + outputs = { + self, + nixpkgs, + rust-overlay, + flake-utils, + ... + }: + flake-utils.lib.eachDefaultSystem (system: let + overlays = [(import rust-overlay)]; + pkgs = import nixpkgs {inherit system overlays;}; + rusttoolchain = + pkgs.rust-bin.fromRustupToolchainFile ./rust-toolchain.toml; + in rec { + # nix develop + devShells.default = pkgs.mkShell { + buildInputs = with pkgs; [rusttoolchain pkg-config openssl]; + }; + }); } diff --git a/src/blocking.rs b/src/blocking.rs index 65cb2dd1..7cd2b6c1 100644 --- a/src/blocking.rs +++ b/src/blocking.rs @@ -231,6 +231,63 @@ impl BlockingIronOxide { self.ironoxide .document_revoke_access_unmanaged(edeks, revoke_list) } + + /// See [ironoxide::document::file::DocumentFileOps::document_file_encrypt](trait.DocumentFileOps.html#tymethod.document_file_encrypt) + pub fn document_file_encrypt( + &self, + source_path: &str, + destination_path: &str, + opts: &DocumentEncryptOpts, + ) -> Result { + self.runtime.block_on(self.ironoxide.document_file_encrypt( + source_path, + destination_path, + opts, + )) + } + + /// See [ironoxide::document::file::DocumentFileOps::document_file_decrypt](trait.DocumentFileOps.html#tymethod.document_file_decrypt) + pub fn document_file_decrypt( + &self, + source_path: &str, + destination_path: &str, + ) -> Result { + self.runtime.block_on( + self.ironoxide + .document_file_decrypt(source_path, destination_path), + ) + } + + /// See [ironoxide::document::file::DocumentFileAdvancedOps::document_file_encrypt_unmanaged](trait.DocumentFileAdvancedOps.html#tymethod.document_file_encrypt_unmanaged) + pub fn document_file_encrypt_unmanaged( + &self, + source_path: &str, + destination_path: &str, + opts: &DocumentEncryptOpts, + ) -> Result { + self.runtime + .block_on(self.ironoxide.document_file_encrypt_unmanaged( + source_path, + destination_path, + opts, + )) + } + + /// See [ironoxide::document::file::DocumentFileAdvancedOps::document_file_decrypt_unmanaged](trait.DocumentFileAdvancedOps.html#tymethod.document_file_decrypt_unmanaged) + pub fn document_file_decrypt_unmanaged( + &self, + source_path: &str, + destination_path: &str, + encrypted_deks: &[u8], + ) -> Result { + self.runtime + .block_on(self.ironoxide.document_file_decrypt_unmanaged( + source_path, + destination_path, + encrypted_deks, + )) + } + /// See [ironoxide::IronOxide::export_public_key_cache](../struct.IronOxide.html#method.export_public_key_cache) pub fn export_public_key_cache(&self) -> Result> { self.ironoxide.export_public_key_cache() diff --git a/src/crypto/aes.rs b/src/crypto/aes.rs index c4db8a01..a61558a3 100644 --- a/src/crypto/aes.rs +++ b/src/crypto/aes.rs @@ -9,9 +9,11 @@ use std::{convert::TryFrom, ops::DerefMut, sync::Mutex}; //There is no way this can fail. Value is most definitely not less than one. const PBKDF2_ITERATIONS: NonZeroU32 = NonZeroU32::new(250_000).unwrap(); const PBKDF2_SALT_LEN: usize = 32; -const AES_GCM_TAG_LEN: usize = 16; -const AES_IV_LEN: usize = 12; -const AES_KEY_LEN: usize = 32; +pub(crate) const AES_GCM_TAG_LEN: usize = 16; +pub(crate) const AES_IV_LEN: usize = 12; +pub(crate) const AES_KEY_LEN: usize = 32; +/// Byte size of AES block (128, 192, and 256 bit keys all have 128 bit blocks) +pub(crate) const AES_BLOCK_SIZE: usize = 16; // 128 bit / 8 bits per byte //The encrypted user master key length will be the size of the encrypted key (32 bytes) plus the size of the GCM auth tag (16 bytes). const ENCRYPTED_KEY_AND_GCM_TAG_LEN: usize = AES_KEY_LEN + AES_GCM_TAG_LEN; @@ -245,6 +247,8 @@ pub fn decrypt( #[cfg(test)] mod tests { use super::*; + use crate::crypto::streaming::tests::{generate_test_key, test_rng}; + use proptest::prelude::*; use std::{convert::TryInto, sync::Arc}; #[test] @@ -275,11 +279,9 @@ mod tests { #[test] fn test_encrypt() { let plaintext = vec![1, 2, 3, 4, 5, 6, 7]; - let mut key = [0u8; 32]; - let mut rng = rand::thread_rng(); - rng.fill_bytes(&mut key); - - let res = encrypt(&Mutex::new(rng), plaintext.clone(), key).unwrap(); + let key = generate_test_key(); + let rng = test_rng(); + let res = encrypt(&rng, plaintext.clone(), key).unwrap(); assert_eq!(res.aes_iv.len(), 12); assert_eq!( res.ciphertext.len(), @@ -290,11 +292,10 @@ mod tests { #[test] fn test_decrypt() { let plaintext = vec![1, 2, 3, 4, 5, 6, 7]; - let mut key = [0u8; 32]; - let mut rng = rand::thread_rng(); - rng.fill_bytes(&mut key); + let key = generate_test_key(); + let rng = test_rng(); - let mut encrypted_result = encrypt(&Mutex::new(rng), plaintext.clone(), key).unwrap(); + let mut encrypted_result = encrypt(&rng, plaintext.clone(), key).unwrap(); let decrypted_plaintext = decrypt(&mut encrypted_result, key).unwrap(); @@ -321,11 +322,9 @@ mod tests { #[test] fn test_parallel_encrypt() { - use rand::SeedableRng; - let plaintext = vec![1, 2, 3, 4, 5, 6, 7]; let mut key = [0u8; 32]; - let rng = Mutex::new(rand_chacha::ChaChaRng::from_entropy()); + let rng = test_rng(); take_lock(&rng).deref_mut().fill_bytes(&mut key); let a_rng = Arc::new(rng); @@ -347,4 +346,51 @@ mod tests { assert_eq!(joined_count, 100); } + + // pulled from https://docs.rs/proptest/latest/src/proptest/array.rs.html#213 + fn uniform48( + strategy: S, + ) -> proptest::array::UniformArrayStrategy { + proptest::array::UniformArrayStrategy::new(strategy) + } + proptest! { + #[test] + fn prop_encrypt_decrypt_roundtrip(plaintext in prop::collection::vec(any::(), 0..10000)) { + let key = generate_test_key(); + let rng = test_rng(); + + let mut encrypted = encrypt(&rng, plaintext.clone(), key).unwrap(); + let decrypted = decrypt(&mut encrypted, key).unwrap(); + + prop_assert_eq!(&plaintext[..], decrypted); + } + + #[test] + fn prop_aes_encrypted_value_roundtrip_bytes( + iv in prop::array::uniform12(any::()), + ciphertext in prop::collection::vec(any::(), AES_GCM_TAG_LEN..1000) + ) { + let value = AesEncryptedValue { aes_iv: iv, ciphertext }; + let bytes = value.bytes(); + let restored: AesEncryptedValue = bytes.as_slice().try_into().unwrap(); + + prop_assert_eq!(value.aes_iv, restored.aes_iv); + prop_assert_eq!(value.ciphertext, restored.ciphertext); + } + + #[test] + fn prop_encrypted_master_key_roundtrip_bytes( + salt in prop::array::uniform32(any::()), + iv in prop::array::uniform12(any::()), + encrypted_key in uniform48(any::()) + ) { + let key = EncryptedMasterKey::new(salt, iv, encrypted_key); + let bytes = key.bytes(); + let restored = EncryptedMasterKey::new_from_slice(&bytes).unwrap(); + + prop_assert_eq!(key.pbkdf2_salt, restored.pbkdf2_salt); + prop_assert_eq!(key.aes_iv, restored.aes_iv); + prop_assert_eq!(key.encrypted_key, restored.encrypted_key); + } + } } diff --git a/src/crypto/streaming.rs b/src/crypto/streaming.rs new file mode 100644 index 00000000..06340ff7 --- /dev/null +++ b/src/crypto/streaming.rs @@ -0,0 +1,1317 @@ +//! Streaming AES-GCM encryption/decryption using AES-CTR + incremental GHASH +//! +//! This module implements streaming AES-GCM by decomposing the algorithm into its two +//! incremental components: +//! - AES-CTR: Stream cipher for encryption/decryption (via aws-lc-rs) +//! - GHASH: Authentication tag computation (via RustCrypto/ghash crate) +//! +//! This allows processing large files with constant memory usage while producing +//! output compatible with standard AES-GCM (same format as `crypto::aes` module). +//! +//! https://en.wikipedia.org/wiki/Galois/Counter_Mode gives a pretty good explanation of the algorithm we're patching +//! together from its constituent parts. The NIST documents lay it out in mathematical notation, if you need further +//! reference. +//! +//! ```text +//! ┌─────────────────────────────────────────────────────────────────────────────┐ +//! │ STREAMING AES-GCM ARCHITECTURE │ +//! │ │ +//! │ Standard AES-GCM is not streamable because authentication requires all │ +//! │ ciphertext before producing/verifying the tag. We decompose GCM into: │ +//! │ │ +//! │ AES-GCM = AES-CTR (encryption) + GHASH (authentication) │ +//! │ │ +//! │ Both components can process data incrementally, enabling constant-memory │ +//! │ streaming while producing output identical to standard AES-GCM. │ +//! └─────────────────────────────────────────────────────────────────────────────┘ +//! +//! ┌─────────────────────────────────────────────────────────────────────────────┐ +//! │ KEY DERIVATION │ +//! │ │ +//! │ Given: K (256-bit key), IV (96-bit random nonce) │ +//! │ │ +//! │ ┌─────────────┐ │ +//! │ │ 0^128 │──────► AES_K ──────► H (GHASH subkey) │ +//! │ │ (128 zeros) │ │ +//! │ └─────────────┘ │ +//! │ │ +//! │ ┌─────────────────────┐ │ +//! │ │ IV │ 0^31 │ 1 │──► AES_K ──► Encrypted_J0 (for final tag XOR) │ +//! │ │ 96b │ 31b │ 1b │ (J0 = initial counter block) │ +//! │ └─────────────────────┘ │ +//! │ │ +//! │ CTR counter starts at J0 + 1 = IV || 0^31 || 2 │ +//! └─────────────────────────────────────────────────────────────────────────────┘ +//! +//! ┌─────────────────────────────────────────────────────────────────────────────┐ +//! │ ENCRYPTION FLOW │ +//! │ │ +//! │ Plaintext chunks AES-CTR Output │ +//! │ ┌──────────┐ ┌─────────────┐ │ +//! │ │ P_1 │────────►│ CTR(IV||2) │──────► C_1 ────────┬──► Ciphertext │ +//! │ └──────────┘ └─────────────┘ │ │ +//! │ ┌──────────┐ ┌─────────────┐ │ │ +//! │ │ P_2 │────────►│ CTR(IV||3) │──────► C_2 ─────┬──┼──► Ciphertext │ +//! │ └──────────┘ └─────────────┘ │ │ │ +//! │ ... ... │ │ │ +//! │ ┌──────────┐ ┌─────────────┐ │ │ │ +//! │ │ P_n │────────►│ CTR(IV||n+1)│──────► C_n ──┬──┼──┼──► Ciphertext │ +//! │ └──────────┘ └─────────────┘ │ │ │ │ +//! │ │ │ │ │ +//! │ GHASH Accumulation │ │ │ │ +//! │ ┌──────────────────────────┐ │ │ │ │ +//! │ │ GhashAccumulator │◄───┴──┴──┘ │ +//! │ │ (buffers partial blocks)│ │ +//! │ └────────────┬─────────────┘ │ +//! │ │ │ +//! │ ▼ │ +//! │ ┌─────────────────────────────────────────────────┐ │ +//! │ │ len_block = [AAD_len_bits || CT_len_bits] │ │ +//! │ │ [ 0^64 || ciphertext_len*8 ] │ │ +//! │ └────────────────────────┬────────────────────────┘ │ +//! │ │ │ +//! │ ▼ │ +//! │ ┌───────────────────────────────────────────────────────────────────┐ │ +//! │ │ Tag = GHASH(C_1 || C_2 || ... || C_n || len_block) ⊕ Encrypted_J0│ │ +//! │ └───────────────────────────────────────────────────────────────────┘ │ +//! └─────────────────────────────────────────────────────────────────────────────┘ +//! +//! ┌─────────────────────────────────────────────────────────────────────────────┐ +//! │ DECRYPTION FLOW │ +//! │ │ +//! │ Input stream: [IV (12 bytes)][ciphertext...][tag (16 bytes)] │ +//! │ │ +//! │ Strategy: Hold back last 16 bytes of each chunk until verify() is called │ +//! │ │ +//! │ ┌──────────┐ │ +//! │ │ Read IV │──────────────► Initialize CTR & GHASH (same as encrypt) │ +//! │ └──────────┘ │ +//! │ │ +//! │ ┌──────────────────┐ ┌──────────────────┐ │ +//! │ │ Ciphertext chunk │───►│ held_back buffer │ (always keep last 16 bytes) │ +//! │ └──────────────────┘ └────────┬─────────┘ │ +//! │ │ │ +//! │ ┌───────────────┴───────────────┐ │ +//! │ │ Process bytes before held_back│ │ +//! │ └───────────────┬───────────────┘ │ +//! │ │ │ +//! │ ┌───────────────┴───────────────┐ │ +//! │ ▼ ▼ │ +//! │ ┌─────────┐ ┌───────────┐ │ +//! │ │ GHASH │ │ AES-CTR │ │ +//! │ │ update │ │ decrypt │ │ +//! │ └────┬────┘ └─────┬─────┘ │ +//! │ │ │ │ +//! │ ▼ ▼ │ +//! │ hash = (hash ⊕ block) * H ┌───────────┐ │ +//! │ (fold into 128-bit hash) │ Write │ │ +//! │ │ plaintext │ │ +//! │ └───────────┘ │ +//! │ │ +//! │ On verify(): │ +//! │ ┌─────────────────────────────────────────────────────────────────────┐ │ +//! │ │ 1. held_back (16 bytes) = expected_tag │ │ +//! │ │ 2. computed_tag = GHASH(all_ciphertext || len_block) ⊕ Encrypted_J0│ │ +//! │ │ 3. constant_time_compare(computed_tag, expected_tag) │ │ +//! │ │ 4. Return remaining plaintext or authentication error │ │ +//! │ └─────────────────────────────────────────────────────────────────────┘ │ +//! └─────────────────────────────────────────────────────────────────────────────┘ +//! +//! ┌─────────────────────────────────────────────────────────────────────────────┐ +//! │ WIRE FORMAT │ +//! │ │ +//! │ ┌────────────┬─────────────────────────────────┬────────────────┐ │ +//! │ │ IV │ Ciphertext │ Tag │ │ +//! │ │ (12 bytes) │ (plaintext_len bytes) │ (16 bytes) │ │ +//! │ └────────────┴─────────────────────────────────┴────────────────┘ │ +//! │ │ +//! │ Total output size = 12 + plaintext_len + 16 = plaintext_len + 28 bytes │ +//! │ │ +//! │ Compatible with: crypto::aes module (bidirectional interop verified) │ +//! └─────────────────────────────────────────────────────────────────────────────┘ +//! ``` +//! +// In Java/Scala/nodejs we were able to find common crypto libraries that implemented this model but did not find any +// in Rust. If one is found (RustCrypto, ring, aws-lc-rs, etc), use that instead of doing it ourselves. + +use aws_lc_rs::{ + cipher::{ + AES_256, DecryptionContext, EncryptingKey, EncryptionContext, StreamingDecryptingKey, + StreamingEncryptingKey, UnboundCipherKey, + }, + constant_time, +}; +use ghash::{ + GHash, + universal_hash::{KeyInit, UniversalHash}, +}; +use rand::{CryptoRng, RngCore}; +use std::{ + io::{BufReader, BufWriter, Read, Write}, + ops::DerefMut, + sync::Mutex, +}; + +use crate::{ + IronOxideErr, Result, + crypto::aes::{AES_BLOCK_SIZE, AES_GCM_TAG_LEN, AES_IV_LEN, AES_KEY_LEN}, + internal::take_lock, +}; + +/// Default block/chunk size for the file input/output (64 KB) +pub(crate) const DEFAULT_IO_BLOCK_SIZE: usize = 64 * 1024; + +/// Build a counter block from IV and counter value. +/// For 96-bit IVs: result = IV || counter (big-endian u32) +fn build_counter_block(iv: &[u8; AES_IV_LEN], counter: u32) -> ghash::Block { + let mut block = ghash::Block::default(); + block[..AES_IV_LEN].copy_from_slice(iv); + block[AES_IV_LEN..].copy_from_slice(&counter.to_be_bytes()); + block +} + +/// Appends the standard GCM length block [len(AAD) || len(ciphertext)] in bits, +/// then XORs the GHASH output with encrypted J0 to produce the final tag. +fn finalize_gcm_tag( + ghash_acc: GhashAccumulator, + encrypted_j0: &ghash::Block, + ciphertext_len: u64, +) -> [u8; AES_GCM_TAG_LEN] { + // Finalize the accumulator (handles any pending partial block) + let mut ghash = ghash_acc.finalize(); + + // Update GHASH with length block: [len(AAD) || len(ciphertext)] in bits, big-endian + let mut len_block = ghash::Block::default(); + // First 8 bytes: AAD length in bits (always 0 for us) + // Last 8 bytes: ciphertext length in bits + len_block[8..].copy_from_slice(&(ciphertext_len * 8).to_be_bytes()); + ghash.update(&[len_block]); + + // Compute final tag: GHASH_output XOR AES_K(J0) + let ghash_output = ghash.finalize(); + let mut tag = [0u8; AES_GCM_TAG_LEN]; + for i in 0..AES_GCM_TAG_LEN { + tag[i] = ghash_output[i] ^ encrypted_j0[i]; + } + tag +} + +/// Accumulator for GHASH that properly handles block boundaries. +/// Only processes complete 16-byte blocks during updates; padding is applied only at finalization. +/// This is needed because while streaming we get a variable number of bytes from the BufReader, depending on its +/// own internal buffering logic. Using this accumulator makes sure we're always feeding appropriately sized blocks +/// into the GHASH update, so it doesn't pad anything prematurely. +struct GhashAccumulator { + ghash: GHash, + /// Partial block pending processing (0-15 bytes) + pending: Vec, +} + +impl GhashAccumulator { + fn new(ghash: GHash) -> Self { + Self { + ghash, + pending: Vec::with_capacity(AES_BLOCK_SIZE), + } + } + + /// Update GHASH with data. Only processes complete 16-byte blocks; + /// partial data is buffered until more data arrives or finalize is called. + fn update(&mut self, data: &[u8]) { + self.pending.extend_from_slice(data); + + // Process all complete 16-byte blocks + let complete_blocks = self.pending.len() / AES_BLOCK_SIZE; + let complete_len = complete_blocks * AES_BLOCK_SIZE; + + for chunk in self.pending[..complete_len].chunks_exact(AES_BLOCK_SIZE) { + let mut block = ghash::Block::default(); + block.copy_from_slice(chunk); + self.ghash.update(&[block]); + } + + // Keep only the remaining partial block + self.pending.drain(..complete_len); + } + + /// Finalize GHASH, padding any remaining partial block. + fn finalize(mut self) -> GHash { + // Process any remaining partial block with zero padding + if !self.pending.is_empty() { + let mut block = ghash::Block::default(); + block[..self.pending.len()].copy_from_slice(&self.pending); + self.ghash.update(&[block]); + } + self.ghash + } +} + +/// Initialize GHASH and pre-compute encrypted initial counter block for AES-GCM. +/// - H = GHASH key +/// - J0 = IV || 0^31 || 1 is the initial counter block +fn init_gcm_state(key: &[u8; AES_KEY_LEN], iv: &[u8; AES_IV_LEN]) -> Result<(GHash, ghash::Block)> { + // Create ECB key for single-block operations (H derivation and J0 encryption) + let ecb_cipher_key = UnboundCipherKey::new(&AES_256, key) + .map_err(|_| IronOxideErr::AesError(aws_lc_rs::error::Unspecified))?; + let ecb_key = EncryptingKey::ecb(ecb_cipher_key) + .map_err(|_| IronOxideErr::AesError(aws_lc_rs::error::Unspecified))?; + + // Derive GHASH key: H = AES_K(0^128) + let mut ghash_key = ghash::Key::default(); + ecb_key + .encrypt(&mut ghash_key) + .map_err(|_| IronOxideErr::AesError(aws_lc_rs::error::Unspecified))?; + let ghash = GHash::new(&ghash_key); + + // Pre-compute encrypted J0 for final tag: AES_K(IV || 0^31 || 1) + let mut encrypted_j0 = build_counter_block(iv, 1); + ecb_key + .encrypt(&mut encrypted_j0) + .map_err(|_| IronOxideErr::AesError(aws_lc_rs::error::Unspecified))?; + + Ok((ghash, encrypted_j0)) +} + +/// Streaming encryptor using AES-CTR + incremental GHASH. Produces output identical to standard AES-GCM encryption. +pub(crate) struct StreamingEncryptor { + /// The cipher/encryptor doing the actual CTR encryption. + ctr_cipher: StreamingEncryptingKey, + /// The GHASH accumulator that handles block boundaries correctly. + ghash_acc: GhashAccumulator, + /// Pre-computed AES_K(J0) for final tag computation + encrypted_j0: ghash::Block, + /// Count of the byte-length written by this encryptor. + ciphertext_len: u64, +} + +impl StreamingEncryptor { + /// Create a new streaming encryptor with the given key and IV. + pub(crate) fn new(key: &[u8; AES_KEY_LEN], iv: [u8; AES_IV_LEN]) -> Result { + let (ghash, encrypted_initial_counter_block) = init_gcm_state(key, &iv)?; + + // Create AES-CTR key starting at counter 2 (J0+1) + let ctr_iv: [u8; AES_BLOCK_SIZE] = build_counter_block(&iv, 2).into(); + let ctr_cipher_key = UnboundCipherKey::new(&AES_256, key) + .map_err(|_| IronOxideErr::AesError(aws_lc_rs::error::Unspecified))?; + let context = EncryptionContext::Iv128(ctr_iv.into()); + // `less_safe_ctr` so we can use the same encryption context as our GCM + let ctr_key = StreamingEncryptingKey::less_safe_ctr(ctr_cipher_key, context) + .map_err(|_| IronOxideErr::AesError(aws_lc_rs::error::Unspecified))?; + + Ok(Self { + ctr_cipher: ctr_key, + ghash_acc: GhashAccumulator::new(ghash), + encrypted_j0: encrypted_initial_counter_block, + ciphertext_len: 0, + }) + } + + /// Encrypt a (input-sized, not AES block) chunk of plaintext and write ciphertext to output buffer. + /// + /// The output buffer must be at least as large as the input. + /// After this call, the GHASH accumulator is updated with the ciphertext. + pub(crate) fn process_chunk(&mut self, input: &[u8], output: &mut [u8]) -> Result { + if output.len() < input.len() { + return Err(IronOxideErr::FileIoError { + path: None, + operation: "encrypt".to_string(), + message: "Output buffer too small".to_string(), + }); + } + + // For CTR mode, output buffer needs extra space for potential block alignment + let min_out_size = input.len() + AES_BLOCK_SIZE - 1; + let mut temp_output = vec![0u8; min_out_size]; + + // Encrypt with AES-CTR + let buffer_update = self + .ctr_cipher + .update(input, &mut temp_output) + .map_err(|_| IronOxideErr::AesError(aws_lc_rs::error::Unspecified))?; + + let written = buffer_update.written(); + let written_len = written.len(); + + // Copy to output + output[..written_len].copy_from_slice(written); + + // Update GHASH accumulator with ciphertext + self.ghash_acc.update(&output[..written_len]); + self.ciphertext_len += written_len as u64; + + Ok(written_len) + } + + /// Finalize encryption and return the authentication tag. + /// This must be called after all plaintext has been processed. The tag should be appended to the ciphertext. + /// Returns the tag and any remaining ciphertext bytes. + pub(crate) fn finalize(mut self) -> Result<(Vec, [u8; AES_GCM_TAG_LEN])> { + // CTR mode is streaming so it doesn't buffer bytes, and final_output should be empty. `aws-lc-rs` has an + // output argument anyway (part of their generic traits), so we'll defensively flush as though there could + // be remaining output. + let mut final_output = vec![0u8; AES_BLOCK_SIZE]; + let (_, buffer_update_info) = self + .ctr_cipher + .finish(&mut final_output) + .map_err(|_| IronOxideErr::AesError(aws_lc_rs::error::Unspecified))?; + + // If we _did_ write something to the final output buffer, make sure that we push those same bytes into GHASH + let remaining = buffer_update_info.written().to_vec(); + if !remaining.is_empty() { + self.ghash_acc.update(&remaining); + self.ciphertext_len += remaining.len() as u64; + } + + let tag = finalize_gcm_tag(self.ghash_acc, &self.encrypted_j0, self.ciphertext_len); + Ok((remaining, tag)) + } +} + +/// Streaming decryptor using AES-CTR + incremental GHASH verification. +pub(crate) struct StreamingDecryptor { + /// The cipher/decryptor doing the actual CTR decryption. + ctr_cipher: StreamingDecryptingKey, + /// The GHASH accumulator that handles block boundaries correctly. + ghash_acc: GhashAccumulator, + /// Count of the byte-length of ciphertext processed. + ciphertext_len: u64, + /// Pre-computed AES_K(J0) for final tag computation + encrypted_j0: ghash::Block, + /// Buffer holding the trailing bytes that might be the GCM tag. + /// We always hold back the last 16 bytes until verify() is called. + held_back: Vec, +} + +impl StreamingDecryptor { + /// Create a new streaming decryptor with the given key and IV. + pub(crate) fn new(key: &[u8; AES_KEY_LEN], iv: [u8; AES_IV_LEN]) -> Result { + let (ghash, encrypted_j0) = init_gcm_state(key, &iv)?; + + // Create AES-CTR key starting at counter 2 + let ctr_iv: [u8; AES_BLOCK_SIZE] = build_counter_block(&iv, 2).into(); + let ctr_cipher_key = UnboundCipherKey::new(&AES_256, key) + .map_err(|_| IronOxideErr::AesError(aws_lc_rs::error::Unspecified))?; + let context = DecryptionContext::Iv128(ctr_iv.into()); + let ctr_key = StreamingDecryptingKey::ctr(ctr_cipher_key, context) + .map_err(|_| IronOxideErr::AesError(aws_lc_rs::error::Unspecified))?; + + Ok(Self { + ctr_cipher: ctr_key, + ghash_acc: GhashAccumulator::new(ghash), + encrypted_j0, + held_back: Vec::with_capacity(AES_GCM_TAG_LEN), + ciphertext_len: 0, + }) + } + + /// Process a (input sized, not AES block) chunk of ciphertext (which includes the trailing GCM tag), + /// writing plaintext to output buffer. + /// + /// This method holds back the last 16 bytes (the GCM tag) while processing. + /// Call `verify()` after all data has been processed to verify the tag + /// and get any remaining plaintext. + pub(crate) fn process_chunk(&mut self, input: &[u8], output: &mut [u8]) -> Result { + // Combine held_back bytes with new input + let mut combined = std::mem::take(&mut self.held_back); + combined.extend_from_slice(input); + + // If we don't have more than tag length, hold it all back + if combined.len() <= AES_GCM_TAG_LEN { + self.held_back = combined; + return Ok(0); + } + + // Process all but the last tag-length bytes + let to_process_len = combined.len() - AES_GCM_TAG_LEN; + let ciphertext = &combined[..to_process_len]; + self.held_back = combined[to_process_len..].to_vec(); + + if output.len() < ciphertext.len() { + return Err(IronOxideErr::FileIoError { + path: None, + operation: "decrypt".to_string(), + message: "Output buffer too small".to_string(), + }); + } + + // Update GHASH accumulator with ciphertext BEFORE decryption + self.ghash_acc.update(ciphertext); + self.ciphertext_len += ciphertext.len() as u64; + + // For CTR mode, output buffer needs extra space for potential block alignment + let min_out_size = ciphertext.len() + AES_BLOCK_SIZE - 1; + let mut temp_output = vec![0u8; min_out_size]; + + // Decrypt with AES-CTR + let buffer_update = self + .ctr_cipher + .update(ciphertext, &mut temp_output) + .map_err(|_| IronOxideErr::AesError(aws_lc_rs::error::Unspecified))?; + + let written = buffer_update.written(); + let written_len = written.len(); + + // Copy to output + output[..written_len].copy_from_slice(written); + + Ok(written_len) + } + + /// Verify the authentication tag and return any remaining plaintext. + /// + /// The tag is extracted from the bytes held back during `process_block` calls. + /// Returns `Ok(remaining_plaintext)` if authentication succeeds, or an error if: + /// - Not enough bytes were provided (less than 16 bytes total) + /// - The authentication tag doesn't match + pub(crate) fn verify(self) -> Result> { + // The held_back buffer should contain exactly the GCM tag + if self.held_back.len() != AES_GCM_TAG_LEN { + return Err(IronOxideErr::AesGcmDecryptError); + } + + let expected_tag: [u8; AES_GCM_TAG_LEN] = self + .held_back + .try_into() + .map_err(|_| IronOxideErr::AesGcmDecryptError)?; + + // Finalize CTR mode - get any remaining bytes + let mut final_output = vec![0u8; AES_BLOCK_SIZE]; + let buffer_update = self + .ctr_cipher + .finish(&mut final_output) + .map_err(|_| IronOxideErr::AesError(aws_lc_rs::error::Unspecified))?; + + let remaining = buffer_update.written().to_vec(); + + let computed_tag = + finalize_gcm_tag(self.ghash_acc, &self.encrypted_j0, self.ciphertext_len); + + constant_time::verify_slices_are_equal(&computed_tag, &expected_tag) + .map_err(|_| IronOxideErr::AesGcmDecryptError)?; + + Ok(remaining) + } +} + +/// Streaming encrypt data from a reader to a writer. +/// Generates the IV internally and writes it to the output before the ciphertext, +/// similar to how `aes::encrypt` bundles IV with ciphertext. +/// Output format: [IV (12 bytes)][ciphertext][tag (16 bytes)] +pub(crate) fn encrypt_stream( + key: &[u8; AES_KEY_LEN], + rng: &Mutex, + reader: &mut BufReader, + writer: &mut BufWriter, +) -> Result<()> { + // Generate IV + let mut iv = [0u8; AES_IV_LEN]; + take_lock(rng).deref_mut().fill_bytes(&mut iv); + + // Write IV to output + writer + .write_all(&iv) + .map_err(|e| IronOxideErr::FileIoError { + path: None, + operation: "write_iv".to_string(), + message: e.to_string(), + })?; + + let mut encryptor = StreamingEncryptor::new(key, iv)?; + let mut input_buffer = vec![0u8; DEFAULT_IO_BLOCK_SIZE]; + let mut output_buffer = vec![0u8; DEFAULT_IO_BLOCK_SIZE + AES_BLOCK_SIZE]; + + while let n @ 1.. = reader + .read(&mut input_buffer) + .map_err(|e| IronOxideErr::FileIoError { + path: None, + operation: "read".to_string(), + message: e.to_string(), + })? + { + let written = encryptor.process_chunk(&input_buffer[..n], &mut output_buffer)?; + writer + .write_all(&output_buffer[..written]) + .map_err(|e| IronOxideErr::FileIoError { + path: None, + operation: "write".to_string(), + message: e.to_string(), + })?; + } + + let (remaining, tag) = encryptor.finalize()?; + if !remaining.is_empty() { + writer + .write_all(&remaining) + .map_err(|e| IronOxideErr::FileIoError { + path: None, + operation: "write".to_string(), + message: e.to_string(), + })?; + } + + writer + .write_all(&tag) + .map_err(|e| IronOxideErr::FileIoError { + path: None, + operation: "write".to_string(), + message: e.to_string(), + })?; + + writer.flush().map_err(|e| IronOxideErr::FileIoError { + path: None, + operation: "flush".to_string(), + message: e.to_string(), + })?; + + Ok(()) +} + +/// Stream-decrypt data from a reader to a writer. +/// Reads the IV from the input before the ciphertext. +/// Expected input format: [IV (12 bytes)][ciphertext][tag (16 bytes)] +pub(crate) fn decrypt_stream( + key: &[u8; AES_KEY_LEN], + reader: &mut BufReader, + writer: &mut BufWriter, +) -> Result<()> { + // Read IV from input + let mut iv = [0u8; AES_IV_LEN]; + reader + .read_exact(&mut iv) + .map_err(|e| IronOxideErr::FileIoError { + path: None, + operation: "read_iv".to_string(), + message: e.to_string(), + })?; + + let mut decryptor = StreamingDecryptor::new(key, iv)?; + let mut input_buffer = vec![0u8; DEFAULT_IO_BLOCK_SIZE]; + // Output buffer needs extra space since we may process more than we read + // (due to combining held_back bytes with new input) + let mut output_buffer = vec![0u8; DEFAULT_IO_BLOCK_SIZE + AES_BLOCK_SIZE + AES_GCM_TAG_LEN]; + + while let n @ 1.. = reader + .read(&mut input_buffer) + .map_err(|e| IronOxideErr::FileIoError { + path: None, + operation: "read".to_string(), + message: e.to_string(), + })? + { + let written = decryptor.process_chunk(&input_buffer[..n], &mut output_buffer)?; + if written > 0 { + writer + .write_all(&output_buffer[..written]) + .map_err(|e| IronOxideErr::FileIoError { + path: None, + operation: "write".to_string(), + message: e.to_string(), + })?; + } + } + + let remaining_plaintext = decryptor.verify()?; + if !remaining_plaintext.is_empty() { + writer + .write_all(&remaining_plaintext) + .map_err(|e| IronOxideErr::FileIoError { + path: None, + operation: "write".to_string(), + message: e.to_string(), + })?; + } + + writer.flush().map_err(|e| IronOxideErr::FileIoError { + path: None, + operation: "flush".to_string(), + message: e.to_string(), + })?; + + Ok(()) +} + +#[cfg(test)] +pub(crate) mod tests { + use super::*; + use crate::crypto::aes; + use proptest::prelude::*; + use rand::RngCore; + use rand::SeedableRng; + use rand_chacha::ChaChaRng; + use std::io::Cursor; + + pub fn generate_test_key() -> [u8; AES_KEY_LEN] { + let mut key = [0u8; AES_KEY_LEN]; + rand::thread_rng().fill_bytes(&mut key); + key + } + + pub fn test_rng() -> Mutex { + Mutex::new(ChaChaRng::from_entropy()) + } + + #[test] + fn test_streaming_encrypt_decrypt_roundtrip() { + let key = generate_test_key(); + let rng = test_rng(); + let plaintext = b"deadbeef"; + let mut ciphertext_buf = Vec::new(); + let mut decrypted_buf = Vec::new(); + + { + let mut reader = BufReader::new(Cursor::new(plaintext.as_slice())); + let mut writer = BufWriter::new(Cursor::new(&mut ciphertext_buf)); + + encrypt_stream(&key, &rng, &mut reader, &mut writer).unwrap(); + } + + { + let mut reader = BufReader::new(Cursor::new(&ciphertext_buf)); + let mut writer = BufWriter::new(Cursor::new(&mut decrypted_buf)); + + decrypt_stream(&key, &mut reader, &mut writer).unwrap(); + } + + assert_eq!(decrypted_buf, plaintext); + } + + #[test] + fn test_streaming_encrypt_decrypt_large_data() { + let key = generate_test_key(); + let rng = test_rng(); + + // 1MB of random data + let mut plaintext = vec![0u8; 1024 * 1024]; + rand::thread_rng().fill_bytes(&mut plaintext); + let mut ciphertext_buf = Vec::new(); + let mut decrypted_buf = Vec::new(); + + { + let mut reader = BufReader::new(Cursor::new(&plaintext)); + let mut writer = BufWriter::new(Cursor::new(&mut ciphertext_buf)); + + encrypt_stream(&key, &rng, &mut reader, &mut writer).unwrap(); + } + let expected_len = AES_IV_LEN + plaintext.len() + AES_GCM_TAG_LEN; + assert_eq!(ciphertext_buf.len(), expected_len); + + { + let mut reader = BufReader::new(Cursor::new(&ciphertext_buf)); + let mut writer = BufWriter::new(Cursor::new(&mut decrypted_buf)); + + decrypt_stream(&key, &mut reader, &mut writer).unwrap(); + } + assert_eq!(decrypted_buf, plaintext); + } + + #[test] + fn test_streaming_decrypt_detects_tampered_ciphertext() { + let key = generate_test_key(); + let rng = test_rng(); + let plaintext = b"deadbeef"; + let mut ciphertext_buf = Vec::new(); + let mut decrypted_buf = Vec::new(); + + { + let mut reader = BufReader::new(Cursor::new(plaintext.as_slice())); + let mut writer = BufWriter::new(Cursor::new(&mut ciphertext_buf)); + + encrypt_stream(&key, &rng, &mut reader, &mut writer).unwrap(); + } + + // Tamper with ciphertext (first byte after IV) + ciphertext_buf[AES_IV_LEN] ^= 0xFF; + + // Decrypt should fail + let mut reader = BufReader::new(Cursor::new(&ciphertext_buf)); + let mut writer = BufWriter::new(Cursor::new(&mut decrypted_buf)); + + let result = decrypt_stream(&key, &mut reader, &mut writer); + assert!(result.is_err()); + } + + #[test] + fn test_streaming_decrypt_detects_tampered_tag() { + let key = generate_test_key(); + let rng = test_rng(); + let plaintext = b"Hello, World!"; + let mut ciphertext_buf = Vec::new(); + let mut decrypted_buf = Vec::new(); + + { + let mut reader = BufReader::new(Cursor::new(plaintext.as_slice())); + let mut writer = BufWriter::new(Cursor::new(&mut ciphertext_buf)); + + encrypt_stream(&key, &rng, &mut reader, &mut writer).unwrap(); + } + + // Tamper with tag (last byte) + let last_idx = ciphertext_buf.len() - 1; + ciphertext_buf[last_idx] ^= 0xFF; + + // Decrypt should fail + let mut reader = BufReader::new(Cursor::new(&ciphertext_buf)); + let mut writer = BufWriter::new(Cursor::new(&mut decrypted_buf)); + + let result = decrypt_stream(&key, &mut reader, &mut writer); + assert!(result.is_err()); + } + + // Encrypt with streaming, decrypt with standard aes module + // This verifies bidirectional interoperability + #[test] + fn test_interop_with_aes_module_decrypt_streaming_encrypted() { + let key = generate_test_key(); + let rng = test_rng(); + let plaintext = b"Test data for interop"; + let mut ciphertext_buf = Vec::new(); + + { + let mut reader = BufReader::new(Cursor::new(plaintext.as_slice())); + let mut writer = BufWriter::new(Cursor::new(&mut ciphertext_buf)); + + encrypt_stream(&key, &rng, &mut reader, &mut writer).unwrap(); + } + + let mut aes_value: aes::AesEncryptedValue = ciphertext_buf.as_slice().try_into().unwrap(); + let decrypted = aes::decrypt(&mut aes_value, key).unwrap(); + assert_eq!(decrypted, plaintext.as_slice()); + } + + // Encrypt with standard aes module, decrypt with streaming + // This test verifies we can decrypt standard AES-GCM output + #[test] + fn test_interop_with_aes_module_streaming_decrypt_aes_encrypted() { + let key = generate_test_key(); + let rng = test_rng(); + let plaintext = b"Test data for interop"; + let mut decrypted_buf = Vec::new(); + + let encrypted = aes::encrypt(&rng, plaintext.to_vec(), key).unwrap(); + let encrypted_bytes = encrypted.bytes(); + { + let mut reader = BufReader::new(Cursor::new(encrypted_bytes)); + let mut writer = BufWriter::new(Cursor::new(&mut decrypted_buf)); + + decrypt_stream(&key, &mut reader, &mut writer).unwrap(); + } + + assert_eq!(decrypted_buf, plaintext); + } + + // Encrypt large data with streaming, decrypt with standard aes module + #[test] + fn test_interop_large_data_streaming_encrypt_standard_decrypt() { + let key = generate_test_key(); + let rng = test_rng(); + let mut ciphertext_buf = Vec::new(); + + // 1MB random data + let mut plaintext = vec![0u8; 1024 * 1024]; + rand::thread_rng().fill_bytes(&mut plaintext); + + { + let mut reader = BufReader::new(Cursor::new(&plaintext)); + let mut writer = BufWriter::new(Cursor::new(&mut ciphertext_buf)); + + encrypt_stream(&key, &rng, &mut reader, &mut writer).unwrap(); + } + + let mut aes_value: aes::AesEncryptedValue = ciphertext_buf.as_slice().try_into().unwrap(); + let decrypted = aes::decrypt(&mut aes_value, key).unwrap(); + assert_eq!(decrypted, plaintext.as_slice()); + } + + // Encrypt large data with standard aes, decrypt with streaming + #[test] + fn test_interop_large_data_standard_encrypt_streaming_decrypt() { + let key = generate_test_key(); + let rng = test_rng(); + let mut decrypted_buf = Vec::new(); + + // 1MB random data + let mut plaintext = vec![0u8; 1024 * 1024]; + rand::thread_rng().fill_bytes(&mut plaintext); + + let encrypted = aes::encrypt(&rng, plaintext.clone(), key).unwrap(); + let encrypted_bytes = encrypted.bytes(); + + { + let mut reader = BufReader::new(Cursor::new(encrypted_bytes)); + let mut writer = BufWriter::new(Cursor::new(&mut decrypted_buf)); + + decrypt_stream(&key, &mut reader, &mut writer).unwrap(); + } + + assert_eq!(decrypted_buf, plaintext); + } + + #[test] + fn test_empty_plaintext() { + let key = generate_test_key(); + let rng = test_rng(); + let plaintext: &[u8] = &[]; + let mut ciphertext_buf = Vec::new(); + let mut decrypted_buf = Vec::new(); + + { + let mut reader = BufReader::new(Cursor::new(plaintext)); + let mut writer = BufWriter::new(Cursor::new(&mut ciphertext_buf)); + + encrypt_stream(&key, &rng, &mut reader, &mut writer).unwrap(); + } + + // Empty plaintext produces IV (12 bytes) + tag (16 bytes) = 28 bytes + assert_eq!(ciphertext_buf.len(), AES_IV_LEN + AES_GCM_TAG_LEN); + + { + let mut reader = BufReader::new(Cursor::new(&ciphertext_buf)); + let mut writer = BufWriter::new(Cursor::new(&mut decrypted_buf)); + + decrypt_stream(&key, &mut reader, &mut writer).unwrap(); + } + + assert_eq!(decrypted_buf.len(), 0); + } + + #[test] + fn test_single_byte_plaintext() { + let key = generate_test_key(); + let rng = test_rng(); + let plaintext = &[42u8]; + let mut ciphertext_buf = Vec::new(); + let mut decrypted_buf = Vec::new(); + + { + let mut reader = BufReader::new(Cursor::new(plaintext.as_slice())); + let mut writer = BufWriter::new(Cursor::new(&mut ciphertext_buf)); + + encrypt_stream(&key, &rng, &mut reader, &mut writer).unwrap(); + } + { + let mut reader = BufReader::new(Cursor::new(&ciphertext_buf)); + let mut writer = BufWriter::new(Cursor::new(&mut decrypted_buf)); + + decrypt_stream(&key, &mut reader, &mut writer).unwrap(); + } + + assert_eq!(decrypted_buf, plaintext); + } + + // Edge case tests for block boundaries and error conditions + #[test] + fn test_exact_block_boundary_16_bytes() { + let key = generate_test_key(); + let rng = test_rng(); + // Exactly one AES block (16 bytes) + let plaintext = [0xABu8; 16]; + let mut ciphertext_buf = Vec::new(); + let mut decrypted_buf = Vec::new(); + + { + let mut reader = BufReader::new(Cursor::new(plaintext.as_slice())); + let mut writer = BufWriter::new(Cursor::new(&mut ciphertext_buf)); + + encrypt_stream(&key, &rng, &mut reader, &mut writer).unwrap(); + } + { + let mut reader = BufReader::new(Cursor::new(&ciphertext_buf)); + let mut writer = BufWriter::new(Cursor::new(&mut decrypted_buf)); + + decrypt_stream(&key, &mut reader, &mut writer).unwrap(); + } + assert_eq!(decrypted_buf, plaintext); + } + + #[test] + fn test_exact_block_boundary_32_bytes() { + let key = generate_test_key(); + let rng = test_rng(); + // Exactly two AES blocks (32 bytes) + let plaintext = [0xCDu8; 32]; + let mut ciphertext_buf = Vec::new(); + let mut decrypted_buf = Vec::new(); + { + let mut reader = BufReader::new(Cursor::new(plaintext.as_slice())); + let mut writer = BufWriter::new(Cursor::new(&mut ciphertext_buf)); + + encrypt_stream(&key, &rng, &mut reader, &mut writer).unwrap(); + } + { + let mut reader = BufReader::new(Cursor::new(&ciphertext_buf)); + let mut writer = BufWriter::new(Cursor::new(&mut decrypted_buf)); + + decrypt_stream(&key, &mut reader, &mut writer).unwrap(); + } + assert_eq!(decrypted_buf, plaintext); + } + + #[test] + fn test_exact_block_boundary_48_bytes() { + let key = generate_test_key(); + let rng = test_rng(); + // Exactly three AES blocks (48 bytes) + let plaintext = [0xEFu8; 48]; + let mut ciphertext_buf = Vec::new(); + let mut decrypted_buf = Vec::new(); + { + let mut reader = BufReader::new(Cursor::new(plaintext.as_slice())); + let mut writer = BufWriter::new(Cursor::new(&mut ciphertext_buf)); + encrypt_stream(&key, &rng, &mut reader, &mut writer).unwrap(); + } + { + let mut reader = BufReader::new(Cursor::new(&ciphertext_buf)); + let mut writer = BufWriter::new(Cursor::new(&mut decrypted_buf)); + decrypt_stream(&key, &mut reader, &mut writer).unwrap(); + } + assert_eq!(decrypted_buf, plaintext); + } + + #[test] + fn test_one_over_block_boundary_17_bytes() { + let key = generate_test_key(); + let rng = test_rng(); + // One byte over block boundary + let plaintext = [0x12u8; 17]; + let mut ciphertext_buf = Vec::new(); + let mut decrypted_buf = Vec::new(); + + { + let mut reader = BufReader::new(Cursor::new(plaintext.as_slice())); + let mut writer = BufWriter::new(Cursor::new(&mut ciphertext_buf)); + encrypt_stream(&key, &rng, &mut reader, &mut writer).unwrap(); + } + { + let mut reader = BufReader::new(Cursor::new(&ciphertext_buf)); + let mut writer = BufWriter::new(Cursor::new(&mut decrypted_buf)); + decrypt_stream(&key, &mut reader, &mut writer).unwrap(); + } + assert_eq!(decrypted_buf, plaintext); + } + + #[test] + fn test_one_under_block_boundary_15_bytes() { + let key = generate_test_key(); + let rng = test_rng(); + // One byte under block boundary + let plaintext = [0x34u8; 15]; + let mut ciphertext_buf = Vec::new(); + let mut decrypted_buf = Vec::new(); + + { + let mut reader = BufReader::new(Cursor::new(plaintext.as_slice())); + let mut writer = BufWriter::new(Cursor::new(&mut ciphertext_buf)); + encrypt_stream(&key, &rng, &mut reader, &mut writer).unwrap(); + } + { + let mut reader = BufReader::new(Cursor::new(&ciphertext_buf)); + let mut writer = BufWriter::new(Cursor::new(&mut decrypted_buf)); + decrypt_stream(&key, &mut reader, &mut writer).unwrap(); + } + + assert_eq!(decrypted_buf, plaintext); + } + + #[test] + fn test_wrong_key_fails_decryption() { + let encrypt_key = generate_test_key(); + let decrypt_key = generate_test_key(); // Different key + let rng = test_rng(); + let plaintext = b"Data encrypted with one key, decrypted with another"; + let mut ciphertext_buf = Vec::new(); + + { + let mut reader = BufReader::new(Cursor::new(plaintext.as_slice())); + let mut writer = BufWriter::new(Cursor::new(&mut ciphertext_buf)); + encrypt_stream(&encrypt_key, &rng, &mut reader, &mut writer).unwrap(); + } + + // Try to decrypt with wrong key + let mut reader = BufReader::new(Cursor::new(&ciphertext_buf)); + let mut decrypted_buf = Vec::new(); + let mut writer = BufWriter::new(Cursor::new(&mut decrypted_buf)); + + let result = decrypt_stream(&decrypt_key, &mut reader, &mut writer); + assert!(result.is_err()); + } + + #[test] + fn test_truncated_ciphertext_fails() { + let key = generate_test_key(); + let rng = test_rng(); + let plaintext = b"Some data to encrypt"; + let mut ciphertext_buf = Vec::new(); + + { + let mut reader = BufReader::new(Cursor::new(plaintext.as_slice())); + let mut writer = BufWriter::new(Cursor::new(&mut ciphertext_buf)); + encrypt_stream(&key, &rng, &mut reader, &mut writer).unwrap(); + } + + // Truncate the ciphertext (remove some bytes from the end) + ciphertext_buf.truncate(ciphertext_buf.len() - 5); + + let mut reader = BufReader::new(Cursor::new(&ciphertext_buf)); + let mut decrypted_buf = Vec::new(); + let mut writer = BufWriter::new(Cursor::new(&mut decrypted_buf)); + + let result = decrypt_stream(&key, &mut reader, &mut writer); + assert!(result.is_err()); + } + + #[test] + fn test_truncated_to_just_iv_fails() { + let key = generate_test_key(); + let rng = test_rng(); + let plaintext = b"Some data"; + let mut ciphertext_buf = Vec::new(); + + { + let mut reader = BufReader::new(Cursor::new(plaintext.as_slice())); + let mut writer = BufWriter::new(Cursor::new(&mut ciphertext_buf)); + encrypt_stream(&key, &rng, &mut reader, &mut writer).unwrap(); + } + + // Truncate to just the IV (12 bytes) - no ciphertext or tag + ciphertext_buf.truncate(AES_IV_LEN); + + let mut reader = BufReader::new(Cursor::new(&ciphertext_buf)); + let mut decrypted_buf = Vec::new(); + let mut writer = BufWriter::new(Cursor::new(&mut decrypted_buf)); + + let result = decrypt_stream(&key, &mut reader, &mut writer); + assert!(result.is_err()); + } + + #[test] + fn test_truncated_missing_partial_tag_fails() { + let key = generate_test_key(); + let rng = test_rng(); + let plaintext = b"Test data for partial tag truncation"; + let mut ciphertext_buf = Vec::new(); + + { + let mut reader = BufReader::new(Cursor::new(plaintext.as_slice())); + let mut writer = BufWriter::new(Cursor::new(&mut ciphertext_buf)); + encrypt_stream(&key, &rng, &mut reader, &mut writer).unwrap(); + } + + // Remove half the tag (8 bytes) + ciphertext_buf.truncate(ciphertext_buf.len() - 8); + + let mut reader = BufReader::new(Cursor::new(&ciphertext_buf)); + let mut decrypted_buf = Vec::new(); + let mut writer = BufWriter::new(Cursor::new(&mut decrypted_buf)); + + let result = decrypt_stream(&key, &mut reader, &mut writer); + assert!(result.is_err()); + } + + #[test] + fn test_iv_modification_fails() { + let key = generate_test_key(); + let rng = test_rng(); + let plaintext = b"Test IV modification detection"; + let mut ciphertext_buf = Vec::new(); + + { + let mut reader = BufReader::new(Cursor::new(plaintext.as_slice())); + let mut writer = BufWriter::new(Cursor::new(&mut ciphertext_buf)); + encrypt_stream(&key, &rng, &mut reader, &mut writer).unwrap(); + } + + // Modify the IV (first 12 bytes) + ciphertext_buf[0] ^= 0xFF; + + let mut reader = BufReader::new(Cursor::new(&ciphertext_buf)); + let mut decrypted_buf = Vec::new(); + let mut writer = BufWriter::new(Cursor::new(&mut decrypted_buf)); + + let result = decrypt_stream(&key, &mut reader, &mut writer); + assert!(result.is_err()); + } + + #[test] + fn test_various_sizes_near_io_block_boundary() { + let key = generate_test_key(); + let rng = test_rng(); + + // Test sizes around the default IO block size (64KB) + let test_sizes = [ + DEFAULT_IO_BLOCK_SIZE - 1, + DEFAULT_IO_BLOCK_SIZE, + DEFAULT_IO_BLOCK_SIZE + 1, + DEFAULT_IO_BLOCK_SIZE * 2 - 1, + DEFAULT_IO_BLOCK_SIZE * 2, + DEFAULT_IO_BLOCK_SIZE * 2 + 1, + ]; + + for size in test_sizes { + let plaintext: Vec = (0..size).map(|i| (i % 256) as u8).collect(); + let mut ciphertext_buf = Vec::new(); + let mut decrypted_buf = Vec::new(); + + { + let mut reader = BufReader::new(Cursor::new(&plaintext)); + let mut writer = BufWriter::new(Cursor::new(&mut ciphertext_buf)); + encrypt_stream(&key, &rng, &mut reader, &mut writer).unwrap(); + } + { + let mut reader = BufReader::new(Cursor::new(&ciphertext_buf)); + let mut writer = BufWriter::new(Cursor::new(&mut decrypted_buf)); + decrypt_stream(&key, &mut reader, &mut writer).unwrap(); + } + + assert_eq!(decrypted_buf, plaintext, "Failed for size {}", size); + } + } + + /// Concrete test showing that naive GHash chunking produces wrong results. + /// This demonstrates why GhashAccumulator is needed. + #[test] + fn test_ghash_naive_chunking_concrete() { + // 6 bytes of data, split at byte 3 + let data = [1u8, 2, 3, 4, 5, 6]; + // Non-zero key so GHASH actually computes something + let key = [ + 0x66, 0xe9, 0x4b, 0xd4, 0xef, 0x8a, 0x2c, 0x3b, 0x88, 0x4c, 0xfa, 0x59, 0xca, 0x34, + 0x2b, 0x2e, + ]; + let ghash_key = ghash::Key::from(key); + + // Correct: all data in one block, padded at end + let output_correct = { + let mut ghash = GHash::new(&ghash_key); + let mut block = ghash::Block::default(); + block[..6].copy_from_slice(&data); + // block = [1,2,3,4,5,6,0,0,0,0,0,0,0,0,0,0] + ghash.update(&[block]); + ghash.finalize() + }; + + // Wrong: split data into two padded blocks + let output_wrong = { + let mut ghash = GHash::new(&ghash_key); + let mut block1 = ghash::Block::default(); + block1[..3].copy_from_slice(&data[..3]); + // block1 = [1,2,3,0,0,0,0,0,0,0,0,0,0,0,0,0] + ghash.update(&[block1]); + + let mut block2 = ghash::Block::default(); + block2[..3].copy_from_slice(&data[3..]); + // block2 = [4,5,6,0,0,0,0,0,0,0,0,0,0,0,0,0] + ghash.update(&[block2]); + ghash.finalize() + }; + + // These MUST be different - 1 block vs 2 blocks + assert_ne!( + output_correct.as_slice(), + output_wrong.as_slice(), + "Naive chunking should produce different result!" + ); + } + + proptest! { + #[test] + fn prop_streaming_roundtrip(plaintext in prop::collection::vec(any::(), 0..100_000)) { + let key = generate_test_key(); + let rng = test_rng(); + let mut ciphertext_buf = Vec::new(); + let mut decrypted_buf = Vec::new(); + + { + let mut reader = BufReader::new(Cursor::new(&plaintext)); + let mut writer = BufWriter::new(Cursor::new(&mut ciphertext_buf)); + encrypt_stream(&key, &rng, &mut reader, &mut writer).unwrap(); + } + { + let mut reader = BufReader::new(Cursor::new(&ciphertext_buf)); + let mut writer = BufWriter::new(Cursor::new(&mut decrypted_buf)); + decrypt_stream(&key, &mut reader, &mut writer).unwrap(); + } + + prop_assert_eq!(plaintext, decrypted_buf); + } + + #[test] + fn prop_streaming_interop_standard_decrypt(plaintext in prop::collection::vec(any::(), 0..50_000)) { + let key = generate_test_key(); + let rng = test_rng(); + let mut ciphertext_buf = Vec::new(); + { + let mut reader = BufReader::new(Cursor::new(&plaintext)); + let mut writer = BufWriter::new(Cursor::new(&mut ciphertext_buf)); + encrypt_stream(&key, &rng, &mut reader, &mut writer).unwrap(); + } + let mut aes_value: aes::AesEncryptedValue = ciphertext_buf.as_slice().try_into().unwrap(); + let decrypted = aes::decrypt(&mut aes_value, key).unwrap(); + + prop_assert_eq!(&plaintext[..], decrypted); + } + + #[test] + fn prop_standard_encrypt_streaming_decrypt(plaintext in prop::collection::vec(any::(), 0..50_000)) { + let key = generate_test_key(); + let rng = test_rng(); + let mut decrypted_buf = Vec::new(); + + let encrypted = aes::encrypt(&rng, plaintext.clone(), key).unwrap(); + let encrypted_bytes = encrypted.bytes(); + { + let mut reader = BufReader::new(Cursor::new(encrypted_bytes)); + let mut writer = BufWriter::new(Cursor::new(&mut decrypted_buf)); + decrypt_stream(&key, &mut reader, &mut writer).unwrap(); + } + prop_assert_eq!(plaintext, decrypted_buf); + } + + /// GhashAccumulator produces same result regardless of chunk sizes + #[test] + fn prop_ghash_accumulator_chunking_invariant(data in prop::collection::vec(any::(), 1..5000), chunk_sizes in prop::collection::vec(1..100usize, 1..50)) { + // Non-zero key so GHASH actually computes something + let key = [ + 0x66, 0xe9, 0x4b, 0xd4, 0xef, 0x8a, 0x2c, 0x3b, + 0x88, 0x4c, 0xfa, 0x59, 0xca, 0x34, 0x2b, 0x2e, + ]; + let ghash_key = ghash::Key::from(key); + + // Compute with all-at-once + let mut acc_single = GhashAccumulator::new(GHash::new(&ghash_key)); + acc_single.update(&data); + let ghash_single = acc_single.finalize(); + + // Now compute with arbitrary chunking. This simulates BufReader giving us any length (not only 16 byte + // multiples) up to 8KiB for a chunk. If you naively break the chunk up into blocks, you'll potentially end + // up with a padded value in the middle of the GHash accumulation, resulting in an incorrect tag. + let mut acc_chunked = GhashAccumulator::new(GHash::new(&ghash_key)); + let mut offset = 0; + for &size in &chunk_sizes { + if offset >= data.len() { break; } + let end = (offset + size).min(data.len()); + acc_chunked.update(&data[offset..end]); + offset = end; + } + // Process any remaining data + if offset < data.len() { + acc_chunked.update(&data[offset..]); + } + let ghash_chunked = acc_chunked.finalize(); + + // Both should produce the same final tag state + let block = ghash::Block::default(); + let output_single = { + let mut g = ghash_single; + g.update(&[block]); + g.finalize() + }; + let output_chunked = { + let mut g = ghash_chunked; + g.update(&[block]); + g.finalize() + }; + + prop_assert_eq!(output_single.as_slice(), output_chunked.as_slice()); + } + } +} diff --git a/src/document.rs b/src/document.rs index 0d4399ab..01de3dca 100644 --- a/src/document.rs +++ b/src/document.rs @@ -19,6 +19,7 @@ use futures::Future; use itertools::{Either, EitherOrBoth, Itertools}; pub mod advanced; +pub mod file; /// List of users and groups that should have access to decrypt a document. #[derive(Clone, Debug, Eq, Hash, PartialEq)] diff --git a/src/document/file.rs b/src/document/file.rs new file mode 100644 index 00000000..25167923 --- /dev/null +++ b/src/document/file.rs @@ -0,0 +1,291 @@ +pub use crate::internal::document_api::file_ops::{ + DocumentFileDecryptResult, DocumentFileDecryptUnmanagedResult, DocumentFileEncryptResult, + DocumentFileEncryptUnmanagedResult, +}; +use crate::{ + Result, SdkOperation, + document::{DocumentEncryptOpts, partition_user_or_group}, + internal::{add_optional_timeout, document_api::file_ops}, +}; +use futures::Future; +use itertools::EitherOrBoth; + +/// IronOxide File-Based Document Operations +/// +/// These operations use streaming I/O with constant memory usage, making them suitable +/// for large files. The encrypted format is identical to [DocumentOps](../trait.DocumentOps.html), +/// ensuring full interoperability between file and memory-based operations. +pub trait DocumentFileOps { + /// Encrypts a file from source path to destination path. + /// + /// Uses streaming I/O with constant memory. Output format is identical to + /// [document_encrypt](../trait.DocumentOps.html#tymethod.document_encrypt) and can be decrypted with + /// [document_decrypt](../trait.DocumentOps.html#tymethod.document_decrypt) provided enough memory. + /// + /// # Arguments + /// - `source_path` - Path to the plaintext file to encrypt + /// - `destination_path` - Path where the encrypted file will be written + /// - `opts` - Encryption options + /// + /// # Examples + /// ```no_run + /// # async fn run() -> Result<(), ironoxide::IronOxideErr> { + /// # use ironoxide::prelude::*; + /// # use ironoxide::document::file::DocumentFileOps; + /// # let sdk: IronOxide = unimplemented!(); + /// let opts = DocumentEncryptOpts::default(); + /// let result = sdk.document_file_encrypt("/path/to/plaintext", "/path/to/encrypted.iron", &opts).await?; + /// # Ok(()) + /// # } + /// ``` + fn document_file_encrypt( + &self, + source_path: &str, + destination_path: &str, + opts: &DocumentEncryptOpts, + ) -> impl Future> + Send; + + /// Decrypts an encrypted file to destination path. + /// + /// Uses streaming I/O with constant memory. Can decrypt files created by either + /// [document_file_encrypt](trait.DocumentFileOps.html#tymethod.document_file_encrypt) + /// or [document_encrypt](../trait.DocumentOps.html#tymethod.document_encrypt). + /// + /// # Arguments + /// - `source_path` - Path to the encrypted file + /// - `destination_path` - Path where the decrypted file will be written + /// + /// # Examples + /// ```no_run + /// # async fn run() -> Result<(), ironoxide::IronOxideErr> { + /// # use ironoxide::prelude::*; + /// # use ironoxide::document::file::DocumentFileOps; + /// # let sdk: IronOxide = unimplemented!(); + /// let result = sdk.document_file_decrypt("/path/to/encrypted.iron", "/path/to/decrypted.dat").await?; + /// # Ok(()) + /// # } + /// ``` + /// + /// # Security + /// During decryption, plaintext is written to the destination file before authentication completes. The file is + /// created with restrictive permissions (0600 on Unix), exclusive share mode on Windows, and is automatically + /// deleted if authentication fails. Permissions are relaxed only after successful verification. + fn document_file_decrypt( + &self, + source_path: &str, + destination_path: &str, + ) -> impl Future> + Send; +} + +/// IronOxide Unmanaged File-Based Document Operations +/// +/// These unmanaged versions allow the API consumer to manage the encrypted document encryption keys (EDEKs) themselves, +/// enabling offline encryption when public keys are pre-cached. +pub trait DocumentFileAdvancedOps { + /// Encrypts a file without storing metadata in the IronCore service. + /// + /// Uses streaming I/O with constant memory. The caller must store the returned + /// EDEKs alongside the encrypted file for later decryption. + /// + /// # Arguments + /// - `source_path` - Path to the plaintext file to encrypt + /// - `destination_path` - Path where the encrypted file will be written + /// - `opts` - Encryption options + /// + /// # Examples + /// ```no_run + /// # async fn run() -> Result<(), ironoxide::IronOxideErr> { + /// # use ironoxide::prelude::*; + /// # use ironoxide::document::file::DocumentFileAdvancedOps; + /// # let sdk: IronOxide = unimplemented!(); + /// let opts = DocumentEncryptOpts::default(); + /// let result = sdk.document_file_encrypt_unmanaged("/path/to/plaintext.dat", "/path/to/encrypted.iron", &opts).await?; + /// // Store encrypted_deks alongside the encrypted file + /// let edeks = result.encrypted_deks(); + /// # Ok(()) + /// # } + /// ``` + fn document_file_encrypt_unmanaged( + &self, + source_path: &str, + destination_path: &str, + opts: &DocumentEncryptOpts, + ) -> impl Future> + Send; + + /// Decrypts an unmanaged encrypted file to destination path. + /// + /// Uses streaming I/O with constant memory. Requires the EDEKs that were + /// returned when the file was encrypted. + /// + /// # Arguments + /// - `source_path` - Path to the encrypted file + /// - `destination_path` - Path where the decrypted file will be written + /// - `encrypted_deks` - EDEKs associated with the encrypted file + /// + /// # Examples + /// ```no_run + /// # async fn run() -> Result<(), ironoxide::IronOxideErr> { + /// # use ironoxide::prelude::*; + /// # use ironoxide::document::file::DocumentFileAdvancedOps; + /// # let sdk: IronOxide = unimplemented!(); + /// # let edeks: Vec = vec![]; + /// let result = sdk.document_file_decrypt_unmanaged("/path/to/encrypted.iron", "/path/to/decrypted.dat", &edeks).await?; + /// # Ok(()) + /// # } + /// ``` + /// + /// # Security + /// During decryption, plaintext is written to the destination file before + /// authentication completes. The file is created with restrictive permissions + /// (0600 on Unix) and is automatically deleted if authentication fails. + /// Permissions are relaxed to 0644 only after successful verification. + fn document_file_decrypt_unmanaged( + &self, + source_path: &str, + destination_path: &str, + encrypted_deks: &[u8], + ) -> impl Future> + Send; +} + +impl DocumentFileOps for crate::IronOxide { + async fn document_file_encrypt( + &self, + source_path: &str, + destination_path: &str, + opts: &DocumentEncryptOpts, + ) -> Result { + let encrypt_opts = opts.clone(); + + let (explicit_users, explicit_groups, grant_to_author, policy_grants) = + match encrypt_opts.grants { + EitherOrBoth::Left(explicit_grants) => { + let (users, groups) = partition_user_or_group(&explicit_grants.grants); + (users, groups, explicit_grants.grant_to_author, None) + } + EitherOrBoth::Right(policy_grant) => (vec![], vec![], false, Some(policy_grant)), + EitherOrBoth::Both(explicit_grants, policy_grant) => { + let (users, groups) = partition_user_or_group(&explicit_grants.grants); + ( + users, + groups, + explicit_grants.grant_to_author, + Some(policy_grant), + ) + } + }; + + add_optional_timeout( + file_ops::encrypt_file_to_path( + self.device.auth(), + &self.config, + &self.recrypt, + &self.user_master_pub_key, + &self.rng, + source_path, + destination_path, + encrypt_opts.id, + encrypt_opts.name, + grant_to_author, + &explicit_users, + &explicit_groups, + policy_grants.as_ref(), + &self.policy_eval_cache, + &self.public_key_cache, + ), + self.config.sdk_operation_timeout, + SdkOperation::DocumentEncrypt, + ) + .await? + } + + async fn document_file_decrypt( + &self, + source_path: &str, + destination_path: &str, + ) -> Result { + add_optional_timeout( + file_ops::decrypt_file_to_path( + self.device.auth(), + self.recrypt.clone(), + self.device.device_private_key(), + source_path, + destination_path, + ), + self.config.sdk_operation_timeout, + SdkOperation::DocumentDecrypt, + ) + .await? + } +} + +impl DocumentFileAdvancedOps for crate::IronOxide { + async fn document_file_encrypt_unmanaged( + &self, + source_path: &str, + destination_path: &str, + opts: &DocumentEncryptOpts, + ) -> Result { + let encrypt_opts = opts.clone(); + + let (explicit_users, explicit_groups, grant_to_author, policy_grants) = + match encrypt_opts.grants { + EitherOrBoth::Left(explicit_grants) => { + let (users, groups) = partition_user_or_group(&explicit_grants.grants); + (users, groups, explicit_grants.grant_to_author, None) + } + EitherOrBoth::Right(policy_grant) => (vec![], vec![], false, Some(policy_grant)), + EitherOrBoth::Both(explicit_grants, policy_grant) => { + let (users, groups) = partition_user_or_group(&explicit_grants.grants); + ( + users, + groups, + explicit_grants.grant_to_author, + Some(policy_grant), + ) + } + }; + + add_optional_timeout( + file_ops::encrypt_file_unmanaged( + self.device.auth(), + &self.config, + &self.recrypt, + &self.user_master_pub_key, + &self.rng, + source_path, + destination_path, + encrypt_opts.id, + grant_to_author, + &explicit_users, + &explicit_groups, + policy_grants.as_ref(), + &self.policy_eval_cache, + &self.public_key_cache, + ), + self.config.sdk_operation_timeout, + SdkOperation::DocumentEncryptUnmanaged, + ) + .await? + } + + async fn document_file_decrypt_unmanaged( + &self, + source_path: &str, + destination_path: &str, + encrypted_deks: &[u8], + ) -> Result { + add_optional_timeout( + file_ops::decrypt_file_unmanaged( + self.device.auth(), + &self.recrypt, + self.device.device_private_key(), + source_path, + destination_path, + encrypted_deks, + ), + self.config.sdk_operation_timeout, + SdkOperation::DocumentDecryptUnmanaged, + ) + .await? + } +} diff --git a/src/internal.rs b/src/internal.rs index 04176836..ce2ed692 100644 --- a/src/internal.rs +++ b/src/internal.rs @@ -97,6 +97,10 @@ pub enum SdkOperation { DocumentRevokeAccess, DocumentEncryptUnmanaged, DocumentDecryptUnmanaged, + DocumentFileEncrypt, + DocumentFileDecrypt, + DocumentFileEncryptUnmanaged, + DocumentFileDecryptUnmanaged, UserCreate, UserListDevices, GenerateNewDevice, @@ -208,6 +212,12 @@ quick_error! { CacheSerdeError(error: postcard::Error) { source(error) } + AesGcmDecryptError { + display("AES-GCM decryption failed: authentication tag verification failed") + } + FileIoError { path: Option, operation: String, message: String } { + display("File I/O error {}during {}: {}", path.as_ref().map(|s| format!("for '{s}' ")).unwrap_or("".to_string()), operation, message) + } } } diff --git a/src/internal/document_api.rs b/src/internal/document_api.rs index 239e41f8..4b40a483 100644 --- a/src/internal/document_api.rs +++ b/src/internal/document_api.rs @@ -39,12 +39,27 @@ use std::{ }; use time::OffsetDateTime; +pub mod file_ops; mod requests; const DOC_VERSION_HEADER_LENGTH: usize = 1; const HEADER_META_LENGTH_LENGTH: usize = 2; const CURRENT_DOCUMENT_ID_VERSION: u8 = 2; +pub(crate) fn parse_header_length(header_prefix: &[u8; 3]) -> Result { + //We're explicitly erroring on version 1 documents since there are so few of them and it seems extremely unlikely + //that anybody will use them with this SDK which was released after we went to version 2. + if header_prefix[0] != CURRENT_DOCUMENT_ID_VERSION { + return Err(IronOxideErr::DocumentHeaderParseFailure( + "Document is not a supported version and may not be an encrypted file.".to_string(), + )); + } + //The 2nd and 3rd bytes of the header are a big-endian u16 that tell us how long the subsequent JSON + //header is in bytes. So we need to convert these two u8s into a single u16. + let encoded_header_size = header_prefix[1] as usize * 256 + header_prefix[2] as usize; + Ok(DOC_VERSION_HEADER_LENGTH + HEADER_META_LENGTH_LENGTH + encoded_header_size) +} + /// ID of a document. /// /// The ID can be validated from a `String` or `&str` using `DocumentId::try_from`. @@ -109,18 +124,18 @@ impl TryFrom for DocumentName { } /// Binary version of the document header. Appropriate for using in edoc serialization. -struct DocHeaderPacked(Vec); +pub(crate) struct DocHeaderPacked(pub Vec); /// Represents a parsed document header which is decoded from JSON #[derive(Debug, PartialEq, Serialize, Deserialize)] -struct DocumentHeader { +pub(crate) struct DocumentHeader { #[serde(rename = "_did_")] - document_id: DocumentId, + pub document_id: DocumentId, #[serde(rename = "_sid_")] - segment_id: usize, + pub segment_id: usize, } impl DocumentHeader { - fn new(document_id: DocumentId, segment_id: usize) -> DocumentHeader { + pub(crate) fn new(document_id: DocumentId, segment_id: usize) -> DocumentHeader { DocumentHeader { document_id, segment_id, @@ -128,7 +143,7 @@ impl DocumentHeader { } /// Generate a documents header given its ID and internal segment ID that is is associated with. Generates /// a Vec which includes the document version, header size, and header JSON as bytes. - fn pack(&self) -> DocHeaderPacked { + pub(crate) fn pack(&self) -> DocHeaderPacked { let mut header_json_bytes = serde_json::to_vec(&self).expect("Serialization of DocumentHeader failed."); //Serializing a string and number shouldn't fail let header_json_len = header_json_bytes.len(); @@ -149,34 +164,22 @@ impl DocumentHeader { fn parse_document_parts( encrypted_document: &[u8], ) -> Result<(DocumentHeader, aes::AesEncryptedValue), IronOxideErr> { - //We're explicitly erroring on version 1 documents since there are so few of them and it seems extremely unlikely - //that anybody will use them with this SDK which was released after we went to version 2. - if encrypted_document[0] != CURRENT_DOCUMENT_ID_VERSION { - Err(IronOxideErr::DocumentHeaderParseFailure( - "Document is not a supported version and may not be an encrypted file.".to_string(), - )) - } else { - let header_len_end = DOC_VERSION_HEADER_LENGTH + HEADER_META_LENGTH_LENGTH; - //The 2nd and 3rd bytes of the header are a big-endian u16 that tell us how long the subsequent JSON - //header is in bytes. So we need to convert these two u8s into a single u16. - let encoded_header_size = - encrypted_document[1] as usize * 256 + encrypted_document[2] as usize; - serde_json::from_slice( - &encrypted_document[header_len_end..(header_len_end + encoded_header_size)], + let header_len = parse_header_length(encrypted_document[..3].try_into().map_err(|_| { + IronOxideErr::DocumentHeaderParseFailure( + "Document is too short to contain a valid header".to_string(), ) + })?)?; + let header_json_start = DOC_VERSION_HEADER_LENGTH + HEADER_META_LENGTH_LENGTH; + serde_json::from_slice(&encrypted_document[header_json_start..header_len]) .map_err(|_| { IronOxideErr::DocumentHeaderParseFailure( "Unable to parse document header. Header value is corrupted.".to_string(), ) }) .and_then(|header_json| { - //Convert the remaining document bytes into an AesEncryptedValue which splits out the IV/data - Ok(( - header_json, - encrypted_document[(header_len_end + encoded_header_size)..].try_into()?, - )) + // Convert the remaining document bytes into an AesEncryptedValue which splits out the IV/data + Ok((header_json, encrypted_document[header_len..].try_into()?)) }) - } } /// The reason a document can be viewed by the requesting user. @@ -298,8 +301,10 @@ impl DocumentMetadataResult { &self.0.visible_to.groups } - // Not exposed outside of the crate - fn to_encrypted_symmetric_key(&self) -> Result { + /// Get the encrypted symmetric key (for internal use) + pub(crate) fn to_encrypted_symmetric_key( + &self, + ) -> Result { self.0.encrypted_symmetric_key.clone().try_into() } } @@ -705,7 +710,7 @@ type UserMasterPublicKey = PublicKey; /// A Future that will resolve to: /// (Left) list of keys for all users and groups that should be granted access to the document /// (Right) errors for any invalid users/groups that were passed -async fn resolve_keys_for_grants( +pub(crate) async fn resolve_keys_for_grants( auth: &RequestAuth, config: &IronOxideConfig, user_grants: &[UserId], @@ -903,7 +908,7 @@ fn dedupe_grants(grants: &[WithKey]) -> Vec> { /// Encrypt the document using transform crypto (recrypt). /// Can be called once you have public keys for users/groups that should have access as well as the /// AES encrypted data. -fn recrypt_document( +pub(crate) fn recrypt_document( signing_keys: &DeviceSigningKeyPair, recrypt: &Recrypt>, dek: Plaintext, @@ -950,8 +955,15 @@ fn recrypt_document( /// It can also be useful to think of an EDEK as representing a "document access grant" to a user/group. #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct EncryptedDek { - grant_to: WithKey, - encrypted_dek_data: recrypt::api::EncryptedValue, + pub(crate) grant_to: WithKey, + pub(crate) encrypted_dek_data: recrypt::api::EncryptedValue, +} + +impl EncryptedDek { + /// Get the user or group this EDEK was encrypted to + pub(crate) fn grant_to(&self) -> &UserOrGroup { + &self.grant_to.id + } } impl TryFrom<&EncryptedDek> for EncryptedDekP { @@ -1020,9 +1032,9 @@ impl TryFrom<&EncryptedDek> for EncryptedDekP { /// `RecryptionResult` is an intermediate value as it cannot be serialized to bytes directly. /// To serialize to bytes, first construct an `EncryptedDoc` #[derive(Clone, Debug)] -struct RecryptionResult { - edeks: Vec, - encryption_errs: Vec, +pub(crate) struct RecryptionResult { + pub edeks: Vec, + pub encryption_errs: Vec, } impl RecryptionResult { @@ -1064,6 +1076,15 @@ impl EncryptedDoc { } } +/// Convert EDEKs to bytes for the given document. +pub(crate) fn edeks_to_bytes( + edeks: &[EncryptedDek], + document_id: &DocumentId, + segment_id: usize, +) -> Result, IronOxideErr> { + edeks_to_edeks_proto(edeks, document_id.id(), segment_id as i32) +} + fn edeks_to_edeks_proto( edeks: &[EncryptedDek], document_id: &str, @@ -1084,7 +1105,6 @@ fn edeks_to_edeks_proto( Ok(edek_bytes) } -/// Creates an encrypted document entry in the IronCore webservice. async fn document_create( auth: &RequestAuth, edoc: EncryptedDoc, @@ -1420,7 +1440,7 @@ pub fn document_revoke_access_unmanaged( } /// Check to see if a set of edeks match a document header -fn edeks_and_header_match_or_err( +pub(crate) fn edeks_and_header_match_or_err( edeks: &EncryptedDeksP, doc_meta: &DocumentHeader, ) -> Result<(), IronOxideErr> { diff --git a/src/internal/document_api/file_ops.rs b/src/internal/document_api/file_ops.rs new file mode 100644 index 00000000..c8c47f9c --- /dev/null +++ b/src/internal/document_api/file_ops.rs @@ -0,0 +1,781 @@ +//! File-based document encryption and decryption operations. +//! +//! This module provides streaming file encryption/decryption with constant memory usage. +//! The encrypted format is identical to the in-memory document encryption, ensuring +//! full interoperability between file and memory operations. + +use crate::{ + PolicyCache, Result, + config::IronOxideConfig, + crypto::{aes::AES_KEY_LEN, streaming, transform}, + group::GroupId, + internal::{ + IronOxideErr, PrivateKey, PublicKey, PublicKeyCache, RequestAuth, + document_api::{ + self, DocAccessEditErr, DocumentHeader, DocumentId, DocumentName, UserOrGroup, + parse_header_length, recrypt_document, + requests::{self, document_create}, + }, + }, + policy::PolicyGrant, + proto::transform::EncryptedDeks as EncryptedDeksP, + user::UserId, +}; +use protobuf::Message; +use rand::{CryptoRng, RngCore}; +use recrypt::prelude::*; +use std::{ + fs::{self, File, OpenOptions}, + io::{BufReader, BufWriter, Read, Seek, SeekFrom, Write}, + sync::Mutex, +}; +use time::OffsetDateTime; + +/// On Unix: mode 0600 (owner read/write only). +/// On Windows: share_mode(0) prevents other processes from accessing while open. +/// On anything else (wasm?): we don't have a clean method of restricting access during decryption. +fn create_output_file(path: &str) -> Result { + #[cfg(unix)] + { + use std::os::unix::fs::OpenOptionsExt; + OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .mode(0o600) + .open(path) + .map_err(|e| IronOxideErr::FileIoError { + path: Some(path.to_string()), + operation: "create".into(), + message: e.to_string(), + }) + } + + #[cfg(windows)] + { + use std::os::windows::fs::OpenOptionsExt; + OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .share_mode(0) + .open(path) + .map_err(|e| IronOxideErr::FileIoError { + path: Some(path.to_string()), + operation: "create".into(), + message: e.to_string(), + }) + } + + #[cfg(not(any(unix, windows)))] + { + OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(path) + .map_err(|e| IronOxideErr::FileIoError { + path: Some(path.to_string()), + operation: "create".into(), + message: e.to_string(), + }) + } +} + +/// On Unix: changes mode from 0600 to 0644 (owner read/write, group/other read). +/// On other platforms: no-op (Windows share_mode releases automatically on close, there was nothing to remove on wasm). +fn reset_file_permissions(path: &str) -> Result<()> { + #[cfg(unix)] + { + use fs::Permissions; + use std::os::unix::fs::PermissionsExt; + fs::set_permissions(path, Permissions::from_mode(0o644)).map_err(|e| { + IronOxideErr::FileIoError { + path: Some(path.to_string()), + operation: "set_permissions".into(), + message: e.to_string(), + } + }) + } + + #[cfg(not(unix))] + { + // suppress unused warning + let _ = path; + Ok(()) + } +} + +/// Used during decryption to ensure unauthenticated plaintext is cleaned up if verification fails or an error/panic +/// occurs before completion. +struct CleanupOnDrop { + path: String, + committed: bool, +} + +impl CleanupOnDrop { + fn new(path: &str) -> Self { + Self { + path: path.to_string(), + committed: false, + } + } + + /// Commit the file, which prevents deletion on drop. Should be done after verification. + fn commit(mut self) { + self.committed = true; + } +} + +impl Drop for CleanupOnDrop { + fn drop(&mut self) { + if !self.committed { + // _ is to intentionally ignore failure here, there's nothing we or the caller can do if we can't remove the + // file (already removed, moved, permissions failure, etc). + let _ = fs::remove_file(&self.path); + } + } +} + +/// Open a source file and read/parse the IronCore document header. +/// Returns the parsed header and the file positioned after the header. +fn read_document_header(source_path: &str) -> Result<(DocumentHeader, File)> { + let mut source_file = File::open(source_path).map_err(|e| IronOxideErr::FileIoError { + path: Some(source_path.to_string()), + operation: "open".into(), + message: e.to_string(), + })?; + + // Read first few bytes to determine header length + let mut header_prefix = [0u8; 3]; + source_file + .read_exact(&mut header_prefix) + .map_err(|e| IronOxideErr::FileIoError { + path: Some(source_path.to_string()), + operation: "read_header".into(), + message: e.to_string(), + })?; + + let header_len = parse_header_length(&header_prefix)?; + + // Read full header from beginning + source_file + .seek(SeekFrom::Start(0)) + .map_err(|e| IronOxideErr::FileIoError { + path: Some(source_path.to_string()), + operation: "seek".into(), + message: e.to_string(), + })?; + + let mut header_bytes = vec![0u8; header_len]; + source_file + .read_exact(&mut header_bytes) + .map_err(|e| IronOxideErr::FileIoError { + path: Some(source_path.to_string()), + operation: "read_header".into(), + message: e.to_string(), + })?; + + // Parse header JSON (skip 3-byte prefix) + let doc_header: DocumentHeader = + serde_json::from_slice(&header_bytes[3..header_len]).map_err(|_| { + IronOxideErr::DocumentHeaderParseFailure( + "Unable to parse document header. Header value is corrupted.".to_string(), + ) + })?; + + Ok((doc_header, source_file)) +} + +/// Stream decrypt from source file to destination, handling cleanup on failure. +/// Creates output with restrictive permissions, streams decryption, verifies tag, +/// and resets permissions on success. +fn stream_decrypt_to_file( + key_bytes: &[u8; AES_KEY_LEN], + source_file: &File, + destination_path: &str, +) -> Result<()> { + let mut output_file = create_output_file(destination_path)?; + let cleanup_guard = CleanupOnDrop::new(destination_path); + + let mut reader = BufReader::new(source_file); + let mut writer = BufWriter::new(&mut output_file); + + streaming::decrypt_stream(key_bytes, &mut reader, &mut writer)?; + + writer.flush().map_err(|e| IronOxideErr::FileIoError { + path: Some(destination_path.to_string()), + operation: "flush".into(), + message: e.to_string(), + })?; + + // Verification succeeded - commit the file (prevents deletion on drop) + cleanup_guard.commit(); + reset_file_permissions(destination_path)?; + + Ok(()) +} + +/// Result of file encryption (managed). +/// +/// Produced by [document_file_encrypt](trait.DocumentFileOps.html#tymethod.document_file_encrypt). +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +pub struct DocumentFileEncryptResult { + id: DocumentId, + name: Option, + created: OffsetDateTime, + updated: OffsetDateTime, + grants: Vec, + access_errs: Vec, +} + +impl DocumentFileEncryptResult { + /// ID of the encrypted document + pub fn id(&self) -> &DocumentId { + &self.id + } + + /// Name of the document + pub fn name(&self) -> Option<&DocumentName> { + self.name.as_ref() + } + + /// Date and time when the document was created + pub fn created(&self) -> &OffsetDateTime { + &self.created + } + + /// Date and time when the document was last updated + pub fn last_updated(&self) -> &OffsetDateTime { + &self.updated + } + /// Users and groups the document was successfully encrypted to + pub fn grants(&self) -> &[UserOrGroup] { + &self.grants + } + + /// Errors resulting from failure to encrypt to specific users/groups + pub fn access_errs(&self) -> &[DocAccessEditErr] { + &self.access_errs + } +} + +/// Result of file encryption (unmanaged). +/// +/// Produced by [document_file_encrypt_unmanaged](trait.DocumentFileUnmanagedOps.html#tymethod.document_file_encrypt_unmanaged). +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +pub struct DocumentFileEncryptUnmanagedResult { + id: DocumentId, + encrypted_deks: Vec, + grants: Vec, + access_errs: Vec, +} + +impl DocumentFileEncryptUnmanagedResult { + /// ID of the encrypted document + pub fn id(&self) -> &DocumentId { + &self.id + } + + /// Bytes of EDEKs of users/groups that have been granted access + pub fn encrypted_deks(&self) -> &[u8] { + &self.encrypted_deks + } + + /// Users and groups the document was successfully encrypted to + pub fn grants(&self) -> &[UserOrGroup] { + &self.grants + } + + /// Errors resulting from failure to encrypt to specific users/groups + pub fn access_errs(&self) -> &[DocAccessEditErr] { + &self.access_errs + } +} + +/// Result of file decryption (managed). +/// +/// Produced by [document_file_decrypt](trait.DocumentFileOps.html#tymethod.document_file_decrypt). +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +pub struct DocumentFileDecryptResult { + id: DocumentId, + name: Option, +} + +impl DocumentFileDecryptResult { + /// ID of the decrypted document + pub fn id(&self) -> &DocumentId { + &self.id + } + + /// Name of the document + pub fn name(&self) -> Option<&DocumentName> { + self.name.as_ref() + } +} + +/// Result of file decryption (unmanaged). +/// +/// Produced by [document_file_decrypt_unmanaged](trait.DocumentFileUnmanagedOps.html#tymethod.document_file_decrypt_unmanaged). +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +pub struct DocumentFileDecryptUnmanagedResult { + id: DocumentId, + access_via: UserOrGroup, +} + +impl DocumentFileDecryptUnmanagedResult { + /// ID of the decrypted document + pub fn id(&self) -> &DocumentId { + &self.id + } + + /// User or group that granted access to the encrypted data + pub fn access_via(&self) -> &UserOrGroup { + &self.access_via + } +} + +/// Encrypt a file from source path to destination path. +/// Uses streaming I/O with constant memory. Output format is identical to `document_encrypt`. +pub async fn encrypt_file_to_path( + auth: &RequestAuth, + config: &IronOxideConfig, + recrypt: &Recrypt>, + user_master_pub_key: &PublicKey, + rng: &Mutex, + source_path: &str, + destination_path: &str, + document_id: Option, + document_name: Option, + grant_to_author: bool, + user_grants: &[UserId], + group_grants: &[GroupId], + policy_grant: Option<&PolicyGrant>, + policy_cache: &PolicyCache, + public_key_cache: &PublicKeyCache, +) -> Result +where + R1: CryptoRng + RngCore, + R2: CryptoRng + RngCore, +{ + let source_file = File::open(source_path).map_err(|e| IronOxideErr::FileIoError { + path: Some(source_path.to_string()), + operation: "open".into(), + message: e.to_string(), + })?; + let (dek, doc_sym_key) = transform::generate_new_doc_key(recrypt); + let doc_id = document_id.unwrap_or_else(|| DocumentId::goo_id(rng)); + let (grants, key_errs) = document_api::resolve_keys_for_grants( + auth, + config, + user_grants, + group_grants, + policy_grant, + if grant_to_author { + Some(user_master_pub_key) + } else { + None + }, + policy_cache, + public_key_cache, + ) + .await?; + let mut output_file = create_output_file(destination_path)?; + + // Write document header + let header = DocumentHeader::new(doc_id.clone(), auth.segment_id); + let header_bytes = header.pack(); + output_file + .write_all(&header_bytes.0) + .map_err(|e| IronOxideErr::FileIoError { + path: Some(destination_path.to_string()), + operation: "write_header".into(), + message: e.to_string(), + })?; + + let mut reader = BufReader::new(source_file); + let mut writer = BufWriter::new(&mut output_file); + + // Stream encrypt the file content (writes IV + ciphertext + auth tag) + let key_bytes = *doc_sym_key.bytes(); + streaming::encrypt_stream(&key_bytes, rng, &mut reader, &mut writer)?; + reset_file_permissions(destination_path)?; + + // Encrypt DEK to all grantees + let recryption_result = + recrypt_document(&auth.signing_private_key, recrypt, dek, &doc_id, grants)?; + + // Create document on server + let create_result = document_create::document_create_request( + auth, + doc_id, + document_name, + recryption_result.edeks, + ) + .await?; + + Ok(DocumentFileEncryptResult { + id: create_result.id, + name: create_result.name, + created: create_result.created, + updated: create_result.updated, + grants: create_result + .shared_with + .into_iter() + .map(|sw| sw.into()) + .collect(), + access_errs: [key_errs, recryption_result.encryption_errs].concat(), + }) +} + +/// Decrypt an encrypted file to destination path. +/// +/// Uses streaming I/O with constant memory. +pub async fn decrypt_file_to_path( + auth: &RequestAuth, + recrypt: std::sync::Arc>>, + device_private_key: &PrivateKey, + source_path: &str, + destination_path: &str, +) -> Result +where + CR: CryptoRng + RngCore + Send + Sync + 'static, +{ + let (doc_header, source_file) = read_document_header(source_path)?; + + // Get document metadata from server + let doc_meta = document_api::document_get_metadata(auth, &doc_header.document_id).await?; + + // Decrypt the symmetric key + let device_private_key = device_private_key.clone(); + let encrypted_symmetric_key = doc_meta.to_encrypted_symmetric_key()?; + + let sym_key = tokio::task::spawn_blocking(move || { + transform::decrypt_as_symmetric_key( + &recrypt, + encrypted_symmetric_key, + device_private_key.recrypt_key(), + ) + }) + .await??; + + let key_bytes: [u8; AES_KEY_LEN] = *sym_key.bytes(); + stream_decrypt_to_file(&key_bytes, &source_file, destination_path)?; + + Ok(DocumentFileDecryptResult { + id: doc_meta.id().clone(), + name: doc_meta.name().cloned(), + }) +} + +/// Encrypt a file (unmanaged) - EDEKs are returned to caller instead of stored on server. +pub async fn encrypt_file_unmanaged( + auth: &RequestAuth, + config: &IronOxideConfig, + recrypt: &Recrypt>, + user_master_pub_key: &PublicKey, + rng: &Mutex, + source_path: &str, + destination_path: &str, + document_id: Option, + grant_to_author: bool, + user_grants: &[UserId], + group_grants: &[GroupId], + policy_grant: Option<&PolicyGrant>, + policy_cache: &PolicyCache, + public_key_cache: &PublicKeyCache, +) -> Result +where + R1: CryptoRng + RngCore, + R2: CryptoRng + RngCore, +{ + // Open source file + let source_file = File::open(source_path).map_err(|e| IronOxideErr::FileIoError { + path: Some(source_path.to_string()), + operation: "open".into(), + message: e.to_string(), + })?; + + // Generate keys + let (dek, doc_sym_key) = transform::generate_new_doc_key(recrypt); + let doc_id = document_id.unwrap_or_else(|| DocumentId::goo_id(rng)); + + // Resolve grants + let (grants, key_errs) = document_api::resolve_keys_for_grants( + auth, + config, + user_grants, + group_grants, + policy_grant, + if grant_to_author { + Some(user_master_pub_key) + } else { + None + }, + policy_cache, + public_key_cache, + ) + .await?; + + // Create output file + let mut output_file = create_output_file(destination_path)?; + + // Write document header + let header = DocumentHeader::new(doc_id.clone(), auth.segment_id); + let header_bytes = header.pack(); + output_file + .write_all(&header_bytes.0) + .map_err(|e| IronOxideErr::FileIoError { + path: Some(destination_path.to_string()), + operation: "write_header".into(), + message: e.to_string(), + })?; + + // Stream encrypt the file content (writes IV + ciphertext + auth tag) + let mut reader = BufReader::new(source_file); + let mut writer = BufWriter::new(&mut output_file); + + let key_bytes: [u8; AES_KEY_LEN] = *doc_sym_key.bytes(); + streaming::encrypt_stream(&key_bytes, rng, &mut reader, &mut writer)?; + + // Encrypt DEK to all grantees + let r = recrypt_document(&auth.signing_private_key, recrypt, dek, &doc_id, grants)?; + + // Convert EDEKs to bytes + let edek_bytes = document_api::edeks_to_bytes(&r.edeks, &doc_id, auth.segment_id)?; + + let successful_grants: Vec = + r.edeks.iter().map(|edek| edek.grant_to().clone()).collect(); + let all_errs: Vec = key_errs + .into_iter() + .chain(r.encryption_errs.clone()) + .collect(); + + // Reset file permissions to normal (0644 on Unix) + reset_file_permissions(destination_path)?; + + Ok(DocumentFileEncryptUnmanagedResult { + id: doc_id, + encrypted_deks: edek_bytes, + grants: successful_grants, + access_errs: all_errs, + }) +} + +/// Decrypt an encrypted file (unmanaged) - caller provides EDEKs. +pub async fn decrypt_file_unmanaged( + auth: &RequestAuth, + recrypt: &Recrypt>, + device_private_key: &PrivateKey, + source_path: &str, + destination_path: &str, + encrypted_deks: &[u8], +) -> Result +where + CR: CryptoRng + RngCore, +{ + let (doc_header, source_file) = read_document_header(source_path)?; + + // Parse and verify EDEKs match document + let proto_edeks = + EncryptedDeksP::parse_from_bytes(encrypted_deks).map_err(IronOxideErr::from)?; + document_api::edeks_and_header_match_or_err(&proto_edeks, &doc_header)?; + + // Transform EDEK + let transform_resp = requests::edek_transform::edek_transform(auth, encrypted_deks).await?; + let requests::edek_transform::EdekTransformResponse { + user_or_group, + encrypted_symmetric_key, + } = transform_resp; + + // Decrypt the symmetric key + let sym_key = transform::decrypt_as_symmetric_key( + recrypt, + encrypted_symmetric_key.try_into()?, + device_private_key.recrypt_key(), + )?; + + let key_bytes: [u8; AES_KEY_LEN] = *sym_key.bytes(); + stream_decrypt_to_file(&key_bytes, &source_file, destination_path)?; + + Ok(DocumentFileDecryptUnmanagedResult { + id: doc_header.document_id, + access_via: user_or_group, + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::fs; + use tempfile::NamedTempFile; + + #[test] + fn cleanup_on_drop_deletes_uncommitted_file() { + let temp_file = NamedTempFile::new().expect("Failed to create temp file"); + let path = temp_file.path().to_str().unwrap().to_string(); + + // Write some content so the file definitely exists + fs::write(&path, b"test content").expect("Failed to write"); + assert!(fs::metadata(&path).is_ok(), "File should exist before drop"); + + // Create guard and drop it without committing + let guard = CleanupOnDrop::new(&path); + drop(guard); + + // File should be deleted + assert!( + fs::metadata(&path).is_err(), + "File should be deleted after drop without commit" + ); + } + + #[test] + fn cleanup_on_drop_preserves_committed_file() { + let temp_file = NamedTempFile::new().expect("Failed to create temp file"); + let path = temp_file.path().to_str().unwrap().to_string(); + + // Write some content + fs::write(&path, b"test content").expect("Failed to write"); + assert!(fs::metadata(&path).is_ok(), "File should exist before drop"); + + // Create guard, commit it, then drop + let guard = CleanupOnDrop::new(&path); + guard.commit(); + + // File should still exist + assert!( + fs::metadata(&path).is_ok(), + "File should exist after committed drop" + ); + + // Clean up manually since we committed + let _ = fs::remove_file(&path); + } + + #[test] + fn cleanup_on_drop_handles_already_deleted_file() { + let temp_file = NamedTempFile::new().expect("Failed to create temp file"); + let path = temp_file.path().to_str().unwrap().to_string(); + + // Delete the file before the guard tries to clean up + let _ = fs::remove_file(&path); + + // This should not panic even though file doesn't exist + let guard = CleanupOnDrop::new(&path); + drop(guard); // Should silently handle the missing file + } + + #[cfg(unix)] + mod unix_permissions { + use super::*; + use std::os::unix::fs::PermissionsExt; + + #[test] + fn create_output_file_sets_restrictive_permissions() { + let temp_dir = tempfile::tempdir().expect("Failed to create temp dir"); + let path = temp_dir.path().join("test_output.txt"); + let path_str = path.to_str().unwrap(); + + let file = create_output_file(path_str).expect("Failed to create file"); + drop(file); + + let metadata = fs::metadata(path_str).expect("Failed to get metadata"); + let mode = metadata.permissions().mode() & 0o777; + + assert_eq!( + mode, 0o600, + "File should have mode 0600 (owner read/write only)" + ); + + // Clean up + let _ = fs::remove_file(path_str); + } + + #[test] + fn reset_file_permissions_sets_normal_permissions() { + let temp_dir = tempfile::tempdir().expect("Failed to create temp dir"); + let path = temp_dir.path().join("test_reset.txt"); + let path_str = path.to_str().unwrap(); + + // Create file with restrictive permissions + let file = create_output_file(path_str).expect("Failed to create file"); + drop(file); + + // Verify it starts with 0600 + let metadata = fs::metadata(path_str).expect("Failed to get metadata"); + let mode_before = metadata.permissions().mode() & 0o777; + assert_eq!(mode_before, 0o600); + + // Reset permissions + reset_file_permissions(path_str).expect("Failed to reset permissions"); + + // Verify it's now 0644 + let metadata = fs::metadata(path_str).expect("Failed to get metadata"); + let mode_after = metadata.permissions().mode() & 0o777; + assert_eq!(mode_after, 0o644, "File should have mode 0644 after reset"); + + // Clean up + let _ = fs::remove_file(path_str); + } + } + + #[cfg(windows)] + mod windows_permissions { + use super::*; + + #[test] + fn create_output_file_has_exclusive_access_on_windows() { + use std::io::Write; + + let temp_dir = tempfile::tempdir().expect("Failed to create temp dir"); + let path = temp_dir.path().join("test_output.txt"); + let path_str = path.to_str().unwrap(); + + // On Windows, create_output_file uses share_mode(0) which prevents + // other processes/handles from accessing while open + let mut file = create_output_file(path_str).expect("Failed to create file"); + + // Write something to the file + file.write_all(b"test").expect("Failed to write"); + + // While the file is still open, attempting to open it again should fail + // due to share_mode(0) denying all sharing + let open_attempt = fs::File::open(path_str); + assert!( + open_attempt.is_err(), + "Opening file should fail while held with exclusive share_mode(0)" + ); + + // Drop the original handle + drop(file); + + // Now opening should succeed + let content = fs::read(path_str).expect("Failed to read file after handle released"); + assert_eq!(content, b"test"); + + // Clean up + let _ = fs::remove_file(path_str); + } + + #[test] + fn reset_file_permissions_is_noop_on_windows() { + let temp_dir = tempfile::tempdir().expect("Failed to create temp dir"); + let path = temp_dir.path().join("test_reset.txt"); + let path_str = path.to_str().unwrap(); + + // Create a file + fs::write(path_str, b"test content").expect("Failed to write file"); + + // reset_file_permissions should succeed (it's a no-op on Windows) + reset_file_permissions(path_str).expect("reset_file_permissions should succeed"); + + // File should still be readable + let content = fs::read(path_str).expect("Failed to read file"); + assert_eq!(content, b"test content"); + + // Clean up + let _ = fs::remove_file(path_str); + } + } +} diff --git a/src/lib.rs b/src/lib.rs index 3e1f1201..8c276fba 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -161,6 +161,7 @@ mod proto { mod crypto { pub mod aes; + pub mod streaming; pub mod transform; } mod internal; diff --git a/src/prelude.rs b/src/prelude.rs index bd8a04c8..b6744d75 100644 --- a/src/prelude.rs +++ b/src/prelude.rs @@ -3,7 +3,7 @@ #[doc(no_inline)] pub use crate::{ InitAndRotationCheck, IronOxide, IronOxideErr, PrivateKeyRotationCheckResult, common::*, - config::*, document::advanced::*, document::*, group::*, policy::*, user::*, + config::*, document::advanced::*, document::file::*, document::*, group::*, policy::*, user::*, }; #[doc(no_inline)] pub use itertools::EitherOrBoth; diff --git a/tests/blocking_ops.rs b/tests/blocking_ops.rs index a502b15e..42bc95df 100644 --- a/tests/blocking_ops.rs +++ b/tests/blocking_ops.rs @@ -6,12 +6,12 @@ mod common; mod blocking_integration_tests { use crate::common::{USER_PASSWORD, create_id_all_classes, gen_jwt}; use galvanic_assert::{matchers::*, *}; - use ironoxide::prelude::*; + use ironoxide::{Result, prelude::*}; use std::{convert::TryInto, time::Duration}; // Tests a UserOp (user_create/generate_new_device), a GroupOp (group_create), // and ironoxide::blocking functions (initialize/initialize_check_rotation) #[test] - fn rotate_all() -> Result<(), IronOxideErr> { + fn rotate_all() -> Result<()> { let account_id: UserId = create_id_all_classes("").try_into()?; let jwt = gen_jwt(Some(account_id.id())).0; BlockingIronOxide::user_create(&jwt, USER_PASSWORD, &UserCreateOpts::new(true), None)?; @@ -60,7 +60,7 @@ mod blocking_integration_tests { // Tests a DocumentOp (document_encrypt) and a DocumentAdvancedOp (document_encrypt_unmanaged) #[test] - fn document_encrypt() -> Result<(), IronOxideErr> { + fn document_encrypt() -> Result<()> { let account_id: UserId = create_id_all_classes("").try_into()?; BlockingIronOxide::user_create( &gen_jwt(Some(account_id.id())).0, @@ -91,7 +91,7 @@ mod blocking_integration_tests { // Show that SDK operations timeout correctly using BlockingIronOxide #[test] - fn initialize_with_timeout() -> Result<(), IronOxideErr> { + fn initialize_with_timeout() -> Result<()> { let account_id: UserId = create_id_all_classes("").try_into()?; BlockingIronOxide::user_create( &gen_jwt(Some(account_id.id())).0, @@ -130,7 +130,7 @@ mod blocking_integration_tests { } #[test] - fn rotate_all_with_timeout() -> Result<(), IronOxideErr> { + fn rotate_all_with_timeout() -> Result<()> { let account_id: UserId = create_id_all_classes("").try_into()?; BlockingIronOxide::user_create( &gen_jwt(Some(account_id.id())).0, @@ -168,4 +168,115 @@ mod blocking_integration_tests { panic!("rotation should be required") } } + + // Tests DocumentFileOps (managed file encrypt/decrypt roundtrip) + #[test] + fn document_file_encrypt_decrypt_roundtrip() -> Result<()> { + use std::io::Write; + use tempfile::NamedTempFile; + + let account_id: UserId = create_id_all_classes("").try_into()?; + BlockingIronOxide::user_create( + &gen_jwt(Some(account_id.id())).0, + USER_PASSWORD, + &UserCreateOpts::new(false), + None, + )?; + let device = BlockingIronOxide::generate_new_device( + &gen_jwt(Some(account_id.id())).0, + USER_PASSWORD, + &Default::default(), + None, + )? + .into(); + let sdk = ironoxide::blocking::initialize(&device, &Default::default())?; + + // Create source file with test data + let plaintext = b"Hello from blocking file operations test!"; + let mut source_file = NamedTempFile::new().expect("Failed to create temp file"); + source_file + .write_all(plaintext) + .expect("Failed to write test data"); + let source_path = source_file.path().to_str().unwrap(); + + // Create destination paths + let encrypted_file = NamedTempFile::new().expect("Failed to create temp file"); + let encrypted_path = encrypted_file.path().to_str().unwrap(); + let decrypted_file = NamedTempFile::new().expect("Failed to create temp file"); + let decrypted_path = decrypted_file.path().to_str().unwrap(); + + // Encrypt file + let encrypt_result = + sdk.document_file_encrypt(source_path, encrypted_path, &Default::default())?; + assert_eq!(encrypt_result.grants().len(), 1); + assert_eq!(encrypt_result.access_errs().len(), 0); + + // Decrypt file + let decrypt_result = sdk.document_file_decrypt(encrypted_path, decrypted_path)?; + assert_eq!(decrypt_result.id(), encrypt_result.id()); + + // Verify decrypted content matches original + let decrypted_content = std::fs::read(decrypted_path).expect("Failed to read decrypted"); + assert_eq!(decrypted_content, plaintext); + + Ok(()) + } + + // Tests DocumentFileAdvancedOps (unmanaged file encrypt/decrypt roundtrip) + #[test] + fn document_file_encrypt_decrypt_unmanaged_roundtrip() -> Result<()> { + use std::io::Write; + use tempfile::NamedTempFile; + + let account_id: UserId = create_id_all_classes("").try_into()?; + BlockingIronOxide::user_create( + &gen_jwt(Some(account_id.id())).0, + USER_PASSWORD, + &UserCreateOpts::new(false), + None, + )?; + let device = BlockingIronOxide::generate_new_device( + &gen_jwt(Some(account_id.id())).0, + USER_PASSWORD, + &Default::default(), + None, + )? + .into(); + let sdk = ironoxide::blocking::initialize(&device, &Default::default())?; + + // Create source file with test data + let plaintext = b"Hello from blocking unmanaged file operations test!"; + let mut source_file = NamedTempFile::new().expect("Failed to create temp file"); + source_file + .write_all(plaintext) + .expect("Failed to write test data"); + let source_path = source_file.path().to_str().unwrap(); + + // Create destination paths + let encrypted_file = NamedTempFile::new().expect("Failed to create temp file"); + let encrypted_path = encrypted_file.path().to_str().unwrap(); + let decrypted_file = NamedTempFile::new().expect("Failed to create temp file"); + let decrypted_path = decrypted_file.path().to_str().unwrap(); + + // Encrypt file (unmanaged) + let encrypt_result = + sdk.document_file_encrypt_unmanaged(source_path, encrypted_path, &Default::default())?; + assert_eq!(encrypt_result.grants().len(), 1); + assert_eq!(encrypt_result.access_errs().len(), 0); + assert!(!encrypt_result.encrypted_deks().is_empty()); + + // Decrypt file (unmanaged) + let decrypt_result = sdk.document_file_decrypt_unmanaged( + encrypted_path, + decrypted_path, + encrypt_result.encrypted_deks(), + )?; + assert_eq!(decrypt_result.id(), encrypt_result.id()); + + // Verify decrypted content matches original + let decrypted_content = std::fs::read(decrypted_path).expect("Failed to read decrypted"); + assert_eq!(decrypted_content, plaintext); + + Ok(()) + } } diff --git a/tests/file_ops.rs b/tests/file_ops.rs new file mode 100644 index 00000000..0dac8461 --- /dev/null +++ b/tests/file_ops.rs @@ -0,0 +1,721 @@ +mod common; + +use crate::common::{create_id_all_classes, create_second_user, init_sdk_get_user, initialize_sdk}; +use galvanic_assert::{matchers::collection::contains_in_any_order, *}; +use ironoxide::document::file::{DocumentFileAdvancedOps, DocumentFileOps}; +use ironoxide::{Result, prelude::*}; +use std::convert::TryInto; +use std::io::Write; +use tempfile::{NamedTempFile, TempPath}; + +// Helper to create a temp file with given content. +// Returns a TempPath (not NamedTempFile) because on Windows, NamedTempFile holds +// the file handle open, preventing other code from opening it for writing. +fn create_temp_file_with_content(content: &[u8]) -> TempPath { + let mut file = NamedTempFile::new().expect("Failed to create temp file"); + file.write_all(content).expect("Failed to write test data"); + file.flush().expect("Failed to flush test data"); + // Convert to TempPath which closes the handle but keeps path for cleanup + file.into_temp_path() +} + +// Helper to create an empty temp file path for output. +// Returns TempPath (handle closed) to avoid Windows file locking issues. +fn create_output_temp_file() -> TempPath { + NamedTempFile::new() + .expect("Failed to create temp file") + .into_temp_path() +} + +#[tokio::test] +async fn file_encrypt_decrypt_roundtrip() -> Result<()> { + let sdk = initialize_sdk().await?; + + let plaintext = b"Hello, World! This is a test of file encryption."; + let source_file = create_temp_file_with_content(plaintext); + let encrypted_file = create_output_temp_file(); + let decrypted_file = create_output_temp_file(); + + let source_path = source_file.to_str().unwrap(); + let encrypted_path = encrypted_file.to_str().unwrap(); + let decrypted_path = decrypted_file.to_str().unwrap(); + + let encrypt_result = sdk + .document_file_encrypt(source_path, encrypted_path, &Default::default()) + .await?; + + assert_eq!(encrypt_result.grants().len(), 1); + assert_eq!(encrypt_result.access_errs().len(), 0); + + let decrypt_result = sdk + .document_file_decrypt(encrypted_path, decrypted_path) + .await?; + + assert_eq!(decrypt_result.id(), encrypt_result.id()); + + let decrypted_content = std::fs::read(decrypted_path).expect("Failed to read decrypted file"); + assert_eq!(decrypted_content, plaintext); + + Ok(()) +} + +#[tokio::test] +async fn file_encrypt_decrypt_unmanaged_roundtrip() -> Result<()> { + let sdk = initialize_sdk().await?; + + let plaintext = b"Unmanaged file encryption test data"; + let source_file = create_temp_file_with_content(plaintext); + let encrypted_file = create_output_temp_file(); + let decrypted_file = create_output_temp_file(); + + let source_path = source_file.to_str().unwrap(); + let encrypted_path = encrypted_file.to_str().unwrap(); + let decrypted_path = decrypted_file.to_str().unwrap(); + + let encrypt_result = sdk + .document_file_encrypt_unmanaged(source_path, encrypted_path, &Default::default()) + .await?; + + assert_eq!(encrypt_result.grants().len(), 1); + assert_eq!(encrypt_result.access_errs().len(), 0); + assert!(!encrypt_result.encrypted_deks().is_empty()); + + let decrypt_result = sdk + .document_file_decrypt_unmanaged( + encrypted_path, + decrypted_path, + encrypt_result.encrypted_deks(), + ) + .await?; + + assert_eq!(decrypt_result.id(), encrypt_result.id()); + + let decrypted_content = std::fs::read(decrypted_path).expect("Failed to read decrypted file"); + assert_eq!(decrypted_content, plaintext); + + Ok(()) +} + +#[tokio::test] +async fn file_roundtrip_empty_data() -> Result<()> { + let sdk = initialize_sdk().await?; + + let plaintext: &[u8] = &[]; + let source_file = create_temp_file_with_content(plaintext); + let encrypted_file = create_output_temp_file(); + let decrypted_file = create_output_temp_file(); + + let source_path = source_file.to_str().unwrap(); + let encrypted_path = encrypted_file.to_str().unwrap(); + let decrypted_path = decrypted_file.to_str().unwrap(); + + let encrypt_result = sdk + .document_file_encrypt(source_path, encrypted_path, &Default::default()) + .await?; + + let decrypt_result = sdk + .document_file_decrypt(encrypted_path, decrypted_path) + .await?; + + assert_eq!(decrypt_result.id(), encrypt_result.id()); + + let decrypted_content = std::fs::read(decrypted_path).expect("Failed to read decrypted file"); + assert_eq!(decrypted_content, plaintext); + + Ok(()) +} + +#[tokio::test] +async fn file_roundtrip_large_data() -> Result<()> { + let sdk = initialize_sdk().await?; + + // Create 1MB of random-ish data + let plaintext: Vec = (0..1024 * 1024).map(|i| (i % 256) as u8).collect(); + let source_file = create_temp_file_with_content(&plaintext); + let encrypted_file = create_output_temp_file(); + let decrypted_file = create_output_temp_file(); + + let source_path = source_file.to_str().unwrap(); + let encrypted_path = encrypted_file.to_str().unwrap(); + let decrypted_path = decrypted_file.to_str().unwrap(); + + let encrypt_result = sdk + .document_file_encrypt(source_path, encrypted_path, &Default::default()) + .await?; + + assert_eq!(encrypt_result.grants().len(), 1); + + let decrypt_result = sdk + .document_file_decrypt(encrypted_path, decrypted_path) + .await?; + + assert_eq!(decrypt_result.id(), encrypt_result.id()); + + let decrypted_content = std::fs::read(decrypted_path).expect("Failed to read decrypted file"); + assert_eq!(decrypted_content, plaintext); + + Ok(()) +} + +#[tokio::test] +async fn file_roundtrip_large_data_unmanaged() -> Result<()> { + let sdk = initialize_sdk().await?; + + // Create 2MB of random-ish data + let plaintext: Vec = (0..2 * 1024 * 1024).map(|i| (i % 256) as u8).collect(); + let source_file = create_temp_file_with_content(&plaintext); + let encrypted_file = create_output_temp_file(); + let decrypted_file = create_output_temp_file(); + + let source_path = source_file.to_str().unwrap(); + let encrypted_path = encrypted_file.to_str().unwrap(); + let decrypted_path = decrypted_file.to_str().unwrap(); + + let encrypt_result = sdk + .document_file_encrypt_unmanaged(source_path, encrypted_path, &Default::default()) + .await?; + + assert_eq!(encrypt_result.grants().len(), 1); + + let decrypt_result = sdk + .document_file_decrypt_unmanaged( + encrypted_path, + decrypted_path, + encrypt_result.encrypted_deks(), + ) + .await?; + + assert_eq!(decrypt_result.id(), encrypt_result.id()); + + let decrypted_content = std::fs::read(decrypted_path).expect("Failed to read decrypted file"); + assert_eq!(decrypted_content, plaintext); + + Ok(()) +} + +#[tokio::test] +async fn file_encrypt_with_explicit_grants() -> Result<()> { + let sdk = initialize_sdk().await?; + let second_user = create_second_user().await; + + let plaintext = b"File with explicit grants"; + let source_file = create_temp_file_with_content(plaintext); + let encrypted_file = create_output_temp_file(); + + let source_path = source_file.to_str().unwrap(); + let encrypted_path = encrypted_file.to_str().unwrap(); + + let bad_user: UserId = "bad_user".try_into()?; + let bad_group: GroupId = "bad_group".try_into()?; + + let opts = DocumentEncryptOpts::with_explicit_grants( + None, + Some("file with grants".try_into()?), + true, + vec![ + UserOrGroup::User { + id: second_user.account_id().clone(), + }, + UserOrGroup::User { id: bad_user }, + UserOrGroup::Group { id: bad_group }, + ], + ); + + let encrypt_result = sdk + .document_file_encrypt(source_path, encrypted_path, &opts) + .await?; + + // Should have 2 successful grants (self + second_user) + assert_eq!(encrypt_result.grants().len(), 2); + assert_that!( + &encrypt_result + .grants() + .iter() + .cloned() + .collect::>(), + contains_in_any_order(vec![ + UserOrGroup::User { + id: sdk.device().account_id().clone() + }, + UserOrGroup::User { + id: second_user.account_id().clone() + }, + ]) + ); + // Should have 2 errors (bad_user + bad_group) + assert_eq!(encrypt_result.access_errs().len(), 2); + + Ok(()) +} + +#[tokio::test] +async fn file_encrypt_with_explicit_grants_unmanaged() -> Result<()> { + let sdk = initialize_sdk().await?; + let second_user = create_second_user().await; + + let plaintext = b"Unmanaged file with explicit grants"; + let source_file = create_temp_file_with_content(plaintext); + let encrypted_file = create_output_temp_file(); + + let source_path = source_file.to_str().unwrap(); + let encrypted_path = encrypted_file.to_str().unwrap(); + + let bad_user: UserId = "bad_user".try_into()?; + let bad_group: GroupId = "bad_group".try_into()?; + + let opts = DocumentEncryptOpts::with_explicit_grants( + None, + None, + true, + vec![ + UserOrGroup::User { + id: second_user.account_id().clone(), + }, + UserOrGroup::User { id: bad_user }, + UserOrGroup::Group { id: bad_group }, + ], + ); + + let encrypt_result = sdk + .document_file_encrypt_unmanaged(source_path, encrypted_path, &opts) + .await?; + + // Should have 2 successful grants (self + second_user) + assert_eq!(encrypt_result.grants().len(), 2); + // Should have 2 errors (bad_user + bad_group) + assert_eq!(encrypt_result.access_errs().len(), 2); + + Ok(()) +} + +#[tokio::test] +async fn file_encrypt_decrypt_with_document_id_and_name() -> Result<()> { + let sdk = initialize_sdk().await?; + + let plaintext = b"File with explicit ID and name"; + let source_file = create_temp_file_with_content(plaintext); + let encrypted_file = create_output_temp_file(); + let decrypted_file = create_output_temp_file(); + + let source_path = source_file.to_str().unwrap(); + let encrypted_path = encrypted_file.to_str().unwrap(); + let decrypted_path = decrypted_file.to_str().unwrap(); + + let doc_id: DocumentId = create_id_all_classes("file_doc_").try_into()?; + let doc_name: DocumentName = "Test File Document".try_into()?; + + let opts = DocumentEncryptOpts::with_explicit_grants( + Some(doc_id.clone()), + Some(doc_name.clone()), + true, + vec![], + ); + + let encrypt_result = sdk + .document_file_encrypt(source_path, encrypted_path, &opts) + .await?; + + assert_eq!(encrypt_result.id(), &doc_id); + assert_eq!(encrypt_result.name(), Some(&doc_name)); + + let decrypt_result = sdk + .document_file_decrypt(encrypted_path, decrypted_path) + .await?; + + assert_eq!(decrypt_result.id(), &doc_id); + assert_eq!(decrypt_result.name(), Some(&doc_name)); + + let decrypted_content = std::fs::read(decrypted_path).expect("Failed to read decrypted file"); + assert_eq!(decrypted_content, plaintext); + + Ok(()) +} + +#[tokio::test] +async fn file_encrypt_without_self_grant() -> Result<()> { + let sdk = initialize_sdk().await?; + let second_user = create_second_user().await; + + let plaintext = b"File without self grant"; + let source_file = create_temp_file_with_content(plaintext); + let encrypted_file = create_output_temp_file(); + let decrypted_file = create_output_temp_file(); + + let source_path = source_file.to_str().unwrap(); + let encrypted_path = encrypted_file.to_str().unwrap(); + let decrypted_path = decrypted_file.to_str().unwrap(); + + let opts = DocumentEncryptOpts::with_explicit_grants( + None, + None, + false, // grant_to_author = false + vec![UserOrGroup::User { + id: second_user.account_id().clone(), + }], + ); + + let encrypt_result = sdk + .document_file_encrypt(source_path, encrypted_path, &opts) + .await?; + + // Only second_user should have access + assert_eq!(encrypt_result.grants().len(), 1); + assert_eq!( + encrypt_result.grants()[0], + UserOrGroup::User { + id: second_user.account_id().clone() + } + ); + + // SDK user should NOT be able to decrypt + let decrypt_result = sdk + .document_file_decrypt(encrypted_path, decrypted_path) + .await; + + assert!(decrypt_result.is_err()); + + Ok(()) +} + +#[tokio::test] +async fn file_encrypt_source_not_found() -> Result<()> { + let sdk = initialize_sdk().await?; + + let encrypted_file = create_output_temp_file(); + let encrypted_path = encrypted_file.to_str().unwrap(); + + let result = sdk + .document_file_encrypt( + "/nonexistent/path/to/file.txt", + encrypted_path, + &Default::default(), + ) + .await; + + assert!(result.is_err()); + assert_that!(&result.unwrap_err(), is_variant!(IronOxideErr::FileIoError)); + + Ok(()) +} + +#[tokio::test] +async fn file_decrypt_source_not_found() -> Result<()> { + let sdk = initialize_sdk().await?; + + let decrypted_file = create_output_temp_file(); + let decrypted_path = decrypted_file.to_str().unwrap(); + + let result = sdk + .document_file_decrypt("/nonexistent/path/to/encrypted.iron", decrypted_path) + .await; + + assert!(result.is_err()); + assert_that!(&result.unwrap_err(), is_variant!(IronOxideErr::FileIoError)); + + Ok(()) +} + +#[tokio::test] +async fn file_decrypt_invalid_encrypted_data() -> Result<()> { + let sdk = initialize_sdk().await?; + + // Create a file with garbage data (not a valid encrypted document) + let garbage_data = b"This is not encrypted data"; + let source_file = create_temp_file_with_content(garbage_data); + let decrypted_file = create_output_temp_file(); + + let source_path = source_file.to_str().unwrap(); + let decrypted_path = decrypted_file.to_str().unwrap(); + + let result = sdk.document_file_decrypt(source_path, decrypted_path).await; + + assert!(result.is_err()); + // Should fail to parse the document header + assert_that!( + &result.unwrap_err(), + is_variant!(IronOxideErr::DocumentHeaderParseFailure) + ); + + Ok(()) +} + +#[tokio::test] +async fn file_encrypt_invalid_destination_path() -> Result<()> { + let sdk = initialize_sdk().await?; + + let plaintext = b"Test data for invalid destination"; + let source_file = create_temp_file_with_content(plaintext); + let source_path = source_file.to_str().unwrap(); + + // Try to write to a non-existent directory + let result = sdk + .document_file_encrypt( + source_path, + "/nonexistent/directory/output.iron", + &Default::default(), + ) + .await; + + assert!(result.is_err()); + assert_that!(&result.unwrap_err(), is_variant!(IronOxideErr::FileIoError)); + + Ok(()) +} + +#[tokio::test] +async fn file_decrypt_invalid_destination_path() -> Result<()> { + let sdk = initialize_sdk().await?; + + // First create a valid encrypted file + let plaintext = b"Test data"; + let source_file = create_temp_file_with_content(plaintext); + let encrypted_file = create_output_temp_file(); + + let source_path = source_file.to_str().unwrap(); + let encrypted_path = encrypted_file.to_str().unwrap(); + + sdk.document_file_encrypt(source_path, encrypted_path, &Default::default()) + .await?; + + // Try to decrypt to a non-existent directory + let result = sdk + .document_file_decrypt(encrypted_path, "/nonexistent/directory/output.txt") + .await; + + assert!(result.is_err()); + assert_that!(&result.unwrap_err(), is_variant!(IronOxideErr::FileIoError)); + + Ok(()) +} + +// Interoperability tests: file operations should produce output compatible with memory operations + +#[tokio::test] +async fn interop_file_encrypt_memory_decrypt() -> Result<()> { + let sdk = initialize_sdk().await?; + + let plaintext = b"Test interoperability: file encrypt, memory decrypt"; + let source_file = create_temp_file_with_content(plaintext); + let encrypted_file = create_output_temp_file(); + + let source_path = source_file.to_str().unwrap(); + let encrypted_path = encrypted_file.to_str().unwrap(); + + // Encrypt with file API + let _encrypt_result = sdk + .document_file_encrypt(source_path, encrypted_path, &Default::default()) + .await?; + + // Read encrypted file into memory + let encrypted_bytes = std::fs::read(encrypted_path).expect("Failed to read encrypted file"); + + // Decrypt with memory API + let decrypt_result = sdk.document_decrypt(&encrypted_bytes).await?; + + assert_eq!(decrypt_result.decrypted_data(), plaintext); + + Ok(()) +} + +#[tokio::test] +async fn interop_memory_encrypt_file_decrypt() -> Result<()> { + let sdk = initialize_sdk().await?; + + let plaintext = b"Test interoperability: memory encrypt, file decrypt"; + + // Encrypt with memory API + let encrypt_result = sdk + .document_encrypt(plaintext.to_vec(), &Default::default()) + .await?; + + // Write encrypted data to file + let encrypted_file = create_output_temp_file(); + let encrypted_path = encrypted_file.to_str().unwrap(); + std::fs::write(encrypted_path, encrypt_result.encrypted_data()) + .expect("Failed to write encrypted file"); + + let decrypted_file = create_output_temp_file(); + let decrypted_path = decrypted_file.to_str().unwrap(); + + // Decrypt with file API + let decrypt_result = sdk + .document_file_decrypt(encrypted_path, decrypted_path) + .await?; + + assert_eq!(decrypt_result.id(), encrypt_result.id()); + + let decrypted_content = std::fs::read(decrypted_path).expect("Failed to read decrypted file"); + assert_eq!(decrypted_content, plaintext); + + Ok(()) +} + +#[tokio::test] +async fn interop_file_encrypt_unmanaged_memory_decrypt_unmanaged() -> Result<()> { + let sdk = initialize_sdk().await?; + + let plaintext = b"Unmanaged interop test: file encrypt, memory decrypt"; + let source_file = create_temp_file_with_content(plaintext); + let encrypted_file = create_output_temp_file(); + + let source_path = source_file.to_str().unwrap(); + let encrypted_path = encrypted_file.to_str().unwrap(); + + // Encrypt with file API (unmanaged) + let encrypt_result = sdk + .document_file_encrypt_unmanaged(source_path, encrypted_path, &Default::default()) + .await?; + + // Read encrypted file into memory + let encrypted_bytes = std::fs::read(encrypted_path).expect("Failed to read encrypted file"); + + // Decrypt with memory API (unmanaged) + let decrypt_result = sdk + .document_decrypt_unmanaged(&encrypted_bytes, encrypt_result.encrypted_deks()) + .await?; + + assert_eq!(decrypt_result.decrypted_data(), plaintext); + + Ok(()) +} + +#[tokio::test] +async fn interop_memory_encrypt_unmanaged_file_decrypt_unmanaged() -> Result<()> { + let sdk = initialize_sdk().await?; + + let plaintext = b"Unmanaged interop test: memory encrypt, file decrypt"; + + // Encrypt with memory API (unmanaged) + let encrypt_result = sdk + .document_encrypt_unmanaged(plaintext.to_vec(), &Default::default()) + .await?; + + // Write encrypted data to file + let encrypted_file = create_output_temp_file(); + let encrypted_path = encrypted_file.to_str().unwrap(); + std::fs::write(encrypted_path, encrypt_result.encrypted_data()) + .expect("Failed to write encrypted file"); + + let decrypted_file = create_output_temp_file(); + let decrypted_path = decrypted_file.to_str().unwrap(); + + // Decrypt with file API (unmanaged) + let decrypt_result = sdk + .document_file_decrypt_unmanaged( + encrypted_path, + decrypted_path, + encrypt_result.encrypted_deks(), + ) + .await?; + + assert_eq!(decrypt_result.id(), encrypt_result.id()); + + let decrypted_content = std::fs::read(decrypted_path).expect("Failed to read decrypted file"); + assert_eq!(decrypted_content, plaintext); + + Ok(()) +} + +// Cross-user decryption tests + +#[tokio::test] +async fn file_encrypt_decrypt_by_different_user() -> Result<()> { + let (_user1, sdk1) = init_sdk_get_user().await; + let (user2, sdk2) = init_sdk_get_user().await; + + let plaintext = b"File shared between users"; + let source_file = create_temp_file_with_content(plaintext); + let encrypted_file = create_output_temp_file(); + let decrypted_file = create_output_temp_file(); + + let source_path = source_file.to_str().unwrap(); + let encrypted_path = encrypted_file.to_str().unwrap(); + let decrypted_path = decrypted_file.to_str().unwrap(); + + // User1 encrypts to User2 + let opts = DocumentEncryptOpts::with_explicit_grants( + None, + None, + false, // Don't grant to self + vec![UserOrGroup::User { id: user2.clone() }], + ); + + let encrypt_result = sdk1 + .document_file_encrypt(source_path, encrypted_path, &opts) + .await?; + + assert_eq!(encrypt_result.grants().len(), 1); + assert_eq!( + encrypt_result.grants()[0], + UserOrGroup::User { id: user2.clone() } + ); + + // User2 decrypts + let decrypt_result = sdk2 + .document_file_decrypt(encrypted_path, decrypted_path) + .await?; + + assert_eq!(decrypt_result.id(), encrypt_result.id()); + + let decrypted_content = std::fs::read(decrypted_path).expect("Failed to read decrypted file"); + assert_eq!(decrypted_content, plaintext); + + // User1 should NOT be able to decrypt (they didn't grant to self) + let decrypted_file2 = create_output_temp_file(); + let decrypted_path2 = decrypted_file2.to_str().unwrap(); + + let result = sdk1 + .document_file_decrypt(encrypted_path, decrypted_path2) + .await; + assert!(result.is_err()); + + Ok(()) +} + +#[tokio::test] +async fn file_encrypt_to_group() -> Result<()> { + let (_, sdk) = init_sdk_get_user().await; + + // Create a group + let group = sdk.group_create(&Default::default()).await?; + + let plaintext = b"File encrypted to group"; + let source_file = create_temp_file_with_content(plaintext); + let encrypted_file = create_output_temp_file(); + let decrypted_file = create_output_temp_file(); + + let source_path = source_file.to_str().unwrap(); + let encrypted_path = encrypted_file.to_str().unwrap(); + let decrypted_path = decrypted_file.to_str().unwrap(); + + let opts = DocumentEncryptOpts::with_explicit_grants( + None, + None, + false, // Don't grant to self directly + vec![UserOrGroup::Group { + id: group.id().clone(), + }], + ); + + let encrypt_result = sdk + .document_file_encrypt(source_path, encrypted_path, &opts) + .await?; + + assert_eq!(encrypt_result.grants().len(), 1); + assert_eq!( + encrypt_result.grants()[0], + UserOrGroup::Group { + id: group.id().clone() + } + ); + + // Should be able to decrypt via group membership + let _decrypt_result = sdk + .document_file_decrypt(encrypted_path, decrypted_path) + .await?; + + let decrypted_content = std::fs::read(decrypted_path).expect("Failed to read decrypted file"); + assert_eq!(decrypted_content, plaintext); + + Ok(()) +}