diff --git a/Cargo.lock b/Cargo.lock index 04780ff..b39ffe5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -50,6 +50,18 @@ dependencies = [ "syn", ] +[[package]] +name = "async-workflows" +version = "0.1.0" +dependencies = [ + "async-trait", + "cosmoflow", + "serde", + "serde_json", + "thiserror", + "tokio", +] + [[package]] name = "atomic-waker" version = "1.1.2" @@ -116,6 +128,19 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" +[[package]] +name = "chat-assistant" +version = "0.1.0" +dependencies = [ + "async-trait", + "cosmoflow", + "reqwest", + "serde", + "serde_json", + "thiserror", + "tokio", +] + [[package]] name = "combine" version = "4.6.7" @@ -157,20 +182,6 @@ dependencies = [ "uuid", ] -[[package]] -name = "cosmoflow-examples" -version = "0.4.0" -dependencies = [ - "async-trait", - "cosmoflow", - "rand", - "reqwest", - "serde", - "serde_json", - "thiserror", - "tokio", -] - [[package]] name = "displaydoc" version = "0.2.5" @@ -622,13 +633,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] -name = "lock_api" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" +name = "llm-request-handler" +version = "0.1.0" dependencies = [ - "autocfg", - "scopeguard", + "async-trait", + "cosmoflow", + "reqwest", + "serde", + "serde_json", + "thiserror", + "tokio", ] [[package]] @@ -773,29 +787,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "parking_lot" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "smallvec", - "windows-targets", -] - [[package]] name = "percent-encoding" version = "2.3.1" @@ -908,15 +899,6 @@ dependencies = [ "url", ] -[[package]] -name = "redox_syscall" -version = "0.5.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d04b7d0ee6b4a0207a0a7adb104d23ecb0b47d6beae7152d0fa34b692b29fd6" -dependencies = [ - "bitflags", -] - [[package]] name = "reqwest" version = "0.12.20" @@ -1044,12 +1026,6 @@ dependencies = [ "windows-sys 0.59.0", ] -[[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - [[package]] name = "security-framework" version = "2.11.1" @@ -1129,15 +1105,6 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" -[[package]] -name = "signal-hook-registry" -version = "1.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" -dependencies = [ - "libc", -] - [[package]] name = "slab" version = "0.4.9" @@ -1280,9 +1247,7 @@ dependencies = [ "bytes", "libc", "mio", - "parking_lot", "pin-project-lite", - "signal-hook-registry", "socket2", "tokio-macros", "windows-sys 0.52.0", @@ -1432,6 +1397,19 @@ version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +[[package]] +name = "unified-workflow" +version = "0.1.0" +dependencies = [ + "async-trait", + "cosmoflow", + "rand", + "serde", + "serde_json", + "thiserror", + "tokio", +] + [[package]] name = "untrusted" version = "0.9.0" diff --git a/Cargo.toml b/Cargo.toml index e1ff548..149e0c7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace] -members = ["cosmoflow", "examples"] +members = ["cosmoflow", "cookbook/*"] default-members = ["cosmoflow"] resolver = "2" diff --git a/README.md b/README.md index 1ed609a..10487c1 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,8 @@ with clean abstractions and excellent performance. It is: [Guides](./docs/getting-started.md) | [API Docs](https://docs.rs/cosmoflow/latest/cosmoflow) | -[Examples](./examples/) +[Examples](./cosmoflow/examples/) | +[Cookbook](./cookbook/) ## Overview @@ -91,6 +92,38 @@ question. You can also ask your question on [the discussions page][discussions]. [Chat]: https://discord.gg/cosmoflow [discussions]: https://github.com/echozyr2001/CosmoFlow/discussions +## Project Structure + +This workspace contains: + +### Core Library +- **`cosmoflow/`** - The main CosmoFlow library with modular features + - **`cosmoflow/examples/`** - Simple feature demonstrations and basic usage patterns + +### Cookbook +- **`cookbook/`** - Production-ready examples and real-world solutions + - **`chat-assistant/`** - Complete chat assistant implementation + - **`llm-request-handler/`** - Efficient LLM request handling patterns + - **`unified-workflow/`** - Advanced workflow composition examples + +### Quick Start + +**Basic Examples (Learning)**: +```bash +cd cosmoflow/ +cargo run --example hello_world_sync +cargo run --example simple_loops --features async +``` + +**Production Examples (Real Use Cases)**: +```bash +cd cookbook/chat-assistant/ +cargo run + +cd cookbook/unified-workflow/ +cargo run +``` + ## Core Modules CosmoFlow provides a focused set of core modules: diff --git a/cookbook/README.md b/cookbook/README.md new file mode 100644 index 0000000..350a0bc --- /dev/null +++ b/cookbook/README.md @@ -0,0 +1,117 @@ +# CosmoFlow Cookbook + +Each directory contains a complete, standalone project demonstrating how to solve specific problems in production environments. + +## Projects + +### **async-workflows** - Advanced Async Patterns +Complete async workflow implementation with: +- **FlowBuilder API** for complex workflow construction +- **Conditional routing** with custom actions (`"default"`, `"error"`, `"continue"`) +- **Decision nodes** and path convergence patterns +- **Custom storage backends** with JSON serialization +- **Error handling** in async contexts + +**Use Case**: Building complex, multi-path workflows that require async I/O operations. + +### **chat-assistant** - Conversational AI +Production chat assistant implementation featuring: +- Real-time conversation management +- Context preservation across interactions +- Integration with language models +- Session state management + +**Use Case**: Building chatbots, virtual assistants, and conversational AI applications. + +### **llm-request-handler** - LLM Integration +Efficient LLM request processing system with: +- Request batching and optimization +- Rate limiting and error handling +- Response parsing and validation +- Multiple provider support + +**Use Case**: Integrating large language models into production applications. + +### **unified-workflow** - Complex Compositions +Advanced workflow composition patterns: +- Mixed sync/async operations +- Multi-stage data processing +- Parallel execution patterns +- Resource management + +**Use Case**: Complex data processing pipelines and orchestration systems. + +## šŸš€ Quick Start + +Each project is fully standalone. Navigate to any directory and run: + +```bash +# Choose your use case +cd async-workflows # For complex async workflows +cd chat-assistant # For conversational AI +cd llm-request-handler # For LLM integration +cd unified-workflow # For complex compositions + +# Run immediately +cargo run +``` + +Or from the root workspace: + +```bash +cargo run --bin async-workflows +cargo run --bin chat-assistant +cargo run --bin llm-request-handler +cargo run --bin unified-workflow +``` + +## Learning Path + +**New to CosmoFlow?** Start with the simple examples first: + +1. **Begin with `../cosmoflow/examples/`** - Learn core concepts with sync examples +2. **Then explore cookbook projects** - Apply knowledge to production patterns +3. **Pick projects matching your use case** - Focus on relevant patterns + +## Adding New Cookbook Projects + +```bash +# 1. Create your project directory +mkdir cookbook/your-project-name +cd cookbook/your-project-name + +# 2. Initialize as a Rust project +cargo init --name your-project-name + +# 3. That's it! The workspace automatically discovers it +cargo run # Works immediately! +``` + +**Requirements for cookbook projects:** +- āœ… **Production-ready** - Code quality suitable for real applications +- āœ… **Well-documented** - Clear README explaining the use case +- āœ… **Complete examples** - Full implementation, not just snippets +- āœ… **Real-world focus** - Solve actual problems developers face + +## Contributing + +When adding new cookbook examples: + +1. Create a new directory with a descriptive name in the `cookbook/` folder +2. The directory will be **automatically discovered** by the workspace (no need to edit Cargo.toml files!) +3. Include a comprehensive README explaining the use case +4. Ensure the example is production-ready and well-documented + +**Example**: To add a new cookbook project called `api-server`: +```bash +# Create the project directory +mkdir cookbook/api-server +cd cookbook/api-server + +# Initialize with cargo +cargo init --name api-server + +# The project is automatically included in the workspace! +# You can immediately run: +cargo run +``` diff --git a/cookbook/async-workflows/Cargo.toml b/cookbook/async-workflows/Cargo.toml new file mode 100644 index 0000000..c36351e --- /dev/null +++ b/cookbook/async-workflows/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "async-workflows" +version = "0.1.0" +edition = "2021" +description = "Advanced async workflow patterns using CosmoFlow" + +[dependencies] +cosmoflow = { workspace = true, features = ["async", "storage-memory"] } +tokio = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +async-trait = { workspace = true } +thiserror = { workspace = true } diff --git a/cookbook/async-workflows/README.md b/cookbook/async-workflows/README.md new file mode 100644 index 0000000..4fba55e --- /dev/null +++ b/cookbook/async-workflows/README.md @@ -0,0 +1,32 @@ +# Async Workflows + +Advanced asynchronous workflow patterns using CosmoFlow with async/await. + +## Examples + +- **Flow Builder Example**: Demonstrates declarative workflow construction with custom action routing using conditional paths, error handling, and convergence patterns. Built with FlowBuilder for maximum compatibility. + +## Features Demonstrated + +- **Async Node Implementation**: Full async/await support with tokio +- **Custom Action Routing**: Named actions like "default", "error", "continue" for explicit control flow +- **Decision-Based Routing**: Conditional workflow paths based on business logic +- **Path Convergence**: Multiple execution paths that merge at common endpoints +- **Custom Storage Backend**: Complete implementation with JSON serialization +- **Error Handling**: Comprehensive error management in async contexts + +## Running Examples + +```bash +cd cookbook/async-workflows +cargo run # Runs the advanced flow example +``` + +## Performance Characteristics + +- **Async Runtime**: Uses tokio for async execution +- **Concurrent Execution**: Supports parallel node execution (when implemented) +- **Complex Workflows**: Handles multi-path, conditional workflows +- **Resource Efficiency**: Async/await for I/O-bound operations + +This cookbook demonstrates production-ready async patterns for complex workflows that require conditional routing, error handling, and async I/O operations. diff --git a/examples/flow_macro.rs b/cookbook/async-workflows/src/main.rs similarity index 83% rename from examples/flow_macro.rs rename to cookbook/async-workflows/src/main.rs index b79266b..9901a41 100644 --- a/examples/flow_macro.rs +++ b/cookbook/async-workflows/src/main.rs @@ -1,6 +1,6 @@ -//! Flow Macro Example - Declarative Workflow Construction with Custom Action Routing +//! Flow Builder Example - Declarative Workflow Construction with Custom Action Routing //! -//! This example demonstrates the powerful `flow!` macro for building CosmoFlow workflows +//! This example demonstrates building CosmoFlow workflows using the FlowBuilder API //! with declarative syntax and custom action routing. The workflow showcases: //! //! ## Workflow Behavior @@ -11,16 +11,17 @@ //! - **Custom Actions**: Uses specific action names like "default", "error", "continue" //! //! ## Advanced Features Demonstrated -//! - **Declarative Syntax**: Clean, readable workflow definition using the `flow!` macro +//! - **Declarative Syntax**: Clean, readable workflow definition using FlowBuilder //! - **Custom Action Routing**: Specific action names for conditional workflow paths //! - **Decision-Based Routing**: Nodes that choose different execution paths based on logic //! - **Path Convergence**: Multiple execution paths that merge at a common endpoint //! - **Custom Storage Backend**: Complete implementation with JSON serialization //! - **Structured Workflow**: Explicit node and route definitions for complex flows //! -//! ## Macro Syntax Features -//! - **Node Definition**: `"id" : NodeType` syntax for clean node registration -//! - **Action Routing**: `"from" - "action" => "to"` syntax for explicit action handling +//! ## FlowBuilder API Features +//! - **Node Registration**: `.node("id", NodeType)` syntax for clean node registration +//! - **Action Routing**: `.route("from", "action", "to")` syntax for explicit action handling +//! - **Terminal Routes**: `.terminal_route("from", "action")` for workflow termination //! - **Type Safety**: Compile-time storage type checking and validation //! - **Flexible Routing**: Support for multiple actions from a single node //! @@ -31,20 +32,17 @@ //! 4. Both paths converge at the final node for completion //! 5. Workflow demonstrates conditional routing with custom action names //! -//! This example is perfect for understanding advanced macro usage and conditional workflows. +//! This example is perfect for understanding advanced FlowBuilder usage and conditional workflows. //! //! To run this example: //! ```bash -//! cd examples && cargo run --bin flow_macro --features basic +//! cd cookbook/async-workflows && cargo run //! ``` use std::collections::HashMap; -use cosmoflow::SharedStore; -use cosmoflow::flow::FlowBackend; -use cosmoflow::flow::macros::flow; -use serde::Serialize; -use serde::de::DeserializeOwned; +use cosmoflow::{FlowBackend, SharedStore}; +use serde::{de::DeserializeOwned, Serialize}; /// A simple in-memory storage implementation for the workflow /// @@ -140,10 +138,10 @@ pub enum SimpleStorageError { struct DecisionNode; #[async_trait::async_trait] -impl cosmoflow::node::Node for DecisionNode { +impl cosmoflow::Node for DecisionNode { type PrepResult = (); type ExecResult = bool; - type Error = cosmoflow::node::NodeError; + type Error = cosmoflow::NodeError; async fn prep( &mut self, @@ -189,10 +187,10 @@ impl cosmoflow::node::Node for DecisionNode { struct SuccessNode; #[async_trait::async_trait] -impl cosmoflow::node::Node for SuccessNode { +impl cosmoflow::Node for SuccessNode { type PrepResult = (); type ExecResult = (); - type Error = cosmoflow::node::NodeError; + type Error = cosmoflow::NodeError; async fn prep( &mut self, @@ -231,10 +229,10 @@ impl cosmoflow::node::Node for SuccessNode { struct ErrorNode; #[async_trait::async_trait] -impl cosmoflow::node::Node for ErrorNode { +impl cosmoflow::Node for ErrorNode { type PrepResult = (); type ExecResult = (); - type Error = cosmoflow::node::NodeError; + type Error = cosmoflow::NodeError; async fn prep( &mut self, @@ -273,10 +271,10 @@ impl cosmoflow::node::Node for ErrorNode { struct FinalNode; #[async_trait::async_trait] -impl cosmoflow::node::Node for FinalNode { +impl cosmoflow::Node for FinalNode { type PrepResult = (); type ExecResult = (); - type Error = cosmoflow::node::NodeError; + type Error = cosmoflow::NodeError; async fn prep( &mut self, @@ -310,23 +308,21 @@ impl cosmoflow::node::Node for FinalNode { #[tokio::main] async fn main() -> Result<(), Box> { - // Create a workflow using custom action routing with the flow! macro - let mut workflow = flow! { - storage: SimpleStorage, - start: "decision", - nodes: { - "decision" : DecisionNode, - "success_path" : SuccessNode, - "error_path" : ErrorNode, - "final" : FinalNode, - }, - routes: { - "decision" - "default" => "success_path", - "decision" - "error" => "error_path", - "success_path" - "continue" => "final", - "error_path" - "continue" => "final", - } - }; + use cosmoflow::FlowBuilder; + + // Create a workflow using FlowBuilder with custom action routing + let mut workflow = FlowBuilder::::new() + .start_node("decision") + .node("decision", DecisionNode) + .node("success_path", SuccessNode) + .node("error_path", ErrorNode) + .node("final", FinalNode) + .route("decision", "default", "success_path") + .route("decision", "error", "error_path") + .route("success_path", "continue", "final") + .route("error_path", "continue", "final") + .terminal_route("final", "complete") + .build(); let mut store = SimpleStorage::new(); let result = workflow.execute(&mut store).await?; diff --git a/cookbook/chat-assistant/Cargo.toml b/cookbook/chat-assistant/Cargo.toml new file mode 100644 index 0000000..e4b4cf5 --- /dev/null +++ b/cookbook/chat-assistant/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "chat-assistant" +version = "0.1.0" +edition = "2021" +description = "A complete chat assistant implementation using CosmoFlow" + +[dependencies] +cosmoflow = { workspace = true, features = ["async", "storage-memory"] } +tokio = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +async-trait = { workspace = true } +reqwest = { version = "0.12", features = ["json"] } +thiserror = { workspace = true } diff --git a/examples/chat_loop.rs b/cookbook/chat-assistant/src/main.rs similarity index 100% rename from examples/chat_loop.rs rename to cookbook/chat-assistant/src/main.rs diff --git a/cookbook/llm-request-handler/Cargo.toml b/cookbook/llm-request-handler/Cargo.toml new file mode 100644 index 0000000..d517b35 --- /dev/null +++ b/cookbook/llm-request-handler/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "llm-request-handler" +version = "0.1.0" +edition = "2021" +description = "Efficient LLM request handling patterns with CosmoFlow" + +[dependencies] +cosmoflow = { workspace = true, features = ["async", "storage-memory"] } +tokio = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +async-trait = { workspace = true } +reqwest = { version = "0.12", features = ["json"] } +thiserror = { workspace = true } diff --git a/examples/llm_request.rs b/cookbook/llm-request-handler/src/main.rs similarity index 99% rename from examples/llm_request.rs rename to cookbook/llm-request-handler/src/main.rs index 46fae09..223a7d1 100644 --- a/examples/llm_request.rs +++ b/cookbook/llm-request-handler/src/main.rs @@ -18,7 +18,7 @@ use async_trait::async_trait; use cosmoflow::flow::errors::FlowError; use cosmoflow::prelude::*; use cosmoflow::shared_store::backends::MemoryStorage; -use serde_json::{Value, json}; +use serde_json::{json, Value}; use std::collections::HashMap; use std::env; diff --git a/cookbook/unified-workflow/Cargo.toml b/cookbook/unified-workflow/Cargo.toml new file mode 100644 index 0000000..b318f79 --- /dev/null +++ b/cookbook/unified-workflow/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "unified-workflow" +version = "0.1.0" +edition = "2021" +description = "Advanced workflow composition and unified sync/async patterns" + +[dependencies] +cosmoflow = { workspace = true, features = ["async", "storage-memory"] } +tokio = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +async-trait = { workspace = true } +rand = "0.8" +thiserror = { workspace = true } diff --git a/cookbook/unified-workflow/src/flow_composition_sync.rs b/cookbook/unified-workflow/src/flow_composition_sync.rs new file mode 100644 index 0000000..8ad363c --- /dev/null +++ b/cookbook/unified-workflow/src/flow_composition_sync.rs @@ -0,0 +1,539 @@ +//! Flow Composition Example - CosmoFlow Sync Version +//! +//! This example demonstrates flow composition in CosmoFlow workflows using +//! synchronous execution for faster compilation. +//! +//! ## Workflow Behavior +//! - **Decision Node**: Evaluates conditions and chooses between success/error paths +//! - **Success Path**: Handles positive outcomes and continues to final processing +//! - **Error Path**: Handles negative outcomes and also continues to final processing +//! - **Final Node**: Convergence point where both paths complete the workflow +//! - **Manual Orchestration**: Explicit control flow without macro complexity +//! +//! ## Advanced Features Demonstrated +//! - **Sync Node Composition**: Multiple nodes working together without async +//! - **Decision-Based Routing**: Nodes that choose different execution paths based on logic +//! - **Path Convergence**: Multiple execution paths that merge at a common endpoint +//! - **Built-in Storage Backend**: Uses CosmoFlow's MemoryStorage for simplicity +//! - **Manual Flow Control**: Explicit action handling and routing logic +//! - **Performance Optimization**: No async overhead for CPU-intensive decision logic +//! +//! ## Performance Benefits +//! - Faster compilation compared to async flow macro version +//! - Smaller binary size (no async runtime overhead) +//! - Perfect for CPU-intensive decision workflows +//! - Explicit control flow for better debugging +//! +//! ## Execution Flow +//! 1. Decision node evaluates business logic and selects execution path +//! 2. Success path processes positive outcomes OR Error path processes negative outcomes +//! 3. Both paths converge at the final node for completion +//! 4. Manual orchestration handles all routing decisions +//! +//! This example is perfect for understanding sync node composition and manual flow control. +//! +//! To run this example: +//! ```bash +//! cargo run --bin flow_composition_sync --no-default-features --features cosmoflow/storage-memory +//! ``` + +#[cfg(not(feature = "async"))] +use cosmoflow::{ + Node, + action::Action, + node::{ExecutionContext, NodeError}, + shared_store::SharedStore, + shared_store::backends::MemoryStorage, +}; +#[cfg(not(feature = "async"))] +use rand::Rng; + +/// Decision node that evaluates conditions and chooses execution paths (sync version) +/// +/// This node demonstrates conditional routing by analyzing business logic +/// and returning different actions based on the evaluation results. +#[cfg(not(feature = "async"))] +struct DecisionNode { + decision_criteria: f64, +} + +#[cfg(not(feature = "async"))] +impl DecisionNode { + fn new(criteria: f64) -> Self { + Self { + decision_criteria: criteria, + } + } +} + +#[cfg(not(feature = "async"))] +impl Node for DecisionNode { + type PrepResult = f64; + type ExecResult = bool; + type Error = NodeError; + + fn name(&self) -> &str { + "DecisionNode" + } + + fn prep( + &mut self, + _store: &MemoryStorage, + context: &ExecutionContext, + ) -> Result { + println!( + "šŸ”„ [PREP] DecisionNode (exec_id: {}) preparing evaluation...", + context.execution_id() + ); + + // Generate a random value for decision making + let mut rng = rand::thread_rng(); + let random_value: f64 = rng.gen_range(0.0..1.0); + + println!( + " Decision criteria: {:.2}, Random value: {:.2}", + self.decision_criteria, random_value + ); + + Ok(random_value) + } + + fn exec( + &mut self, + prep_result: Self::PrepResult, + _context: &ExecutionContext, + ) -> Result { + println!( + "⚔ [EXEC] DecisionNode evaluating: {:.2} > {:.2}?", + prep_result, self.decision_criteria + ); + + // Simulate some decision computation + std::thread::sleep(std::time::Duration::from_millis(10)); + + let decision = prep_result > self.decision_criteria; + println!( + " Decision result: {}", + if decision { "SUCCESS" } else { "ERROR" } + ); + + Ok(decision) + } + + fn post( + &mut self, + store: &mut MemoryStorage, + prep_result: Self::PrepResult, + exec_result: Self::ExecResult, + _context: &ExecutionContext, + ) -> Result { + // Store decision details for later analysis + store + .set("decision_value".to_string(), prep_result) + .map_err(|e| NodeError::StorageError(e.to_string()))?; + + store + .set("decision_result".to_string(), exec_result) + .map_err(|e| NodeError::StorageError(e.to_string()))?; + + if exec_result { + println!("āœ… [POST] DecisionNode: Routing to SUCCESS path"); + Ok(Action::simple("success")) + } else { + println!("āœ… [POST] DecisionNode: Routing to ERROR path"); + Ok(Action::simple("error")) + } + } +} + +/// Success path node that handles positive outcomes (sync version) +/// +/// This node processes successful scenarios and continues the workflow +/// toward the final convergence point. +#[cfg(not(feature = "async"))] +struct SuccessNode { + success_count: usize, +} + +#[cfg(not(feature = "async"))] +impl SuccessNode { + fn new() -> Self { + Self { success_count: 0 } + } +} + +#[cfg(not(feature = "async"))] +impl Node for SuccessNode { + type PrepResult = (); + type ExecResult = String; + type Error = NodeError; + + fn name(&self) -> &str { + "SuccessNode" + } + + fn prep( + &mut self, + store: &MemoryStorage, + context: &ExecutionContext, + ) -> Result { + println!( + "šŸ”„ [PREP] SuccessNode (exec_id: {}) handling positive outcome...", + context.execution_id() + ); + + // Read decision details from storage + if let Ok(Some(decision_value)) = store.get::("decision_value") { + println!(" Decision value was: {:.2}", decision_value); + } + + Ok(()) + } + + fn exec( + &mut self, + _prep_result: Self::PrepResult, + _context: &ExecutionContext, + ) -> Result { + println!("⚔ [EXEC] SuccessNode processing successful scenario..."); + + self.success_count += 1; + + // Simulate success processing + std::thread::sleep(std::time::Duration::from_millis(15)); + + let success_message = format!( + "šŸŽ‰ Success processing completed! (count: {})", + self.success_count + ); + println!(" {}", success_message); + + Ok(success_message) + } + + fn post( + &mut self, + store: &mut MemoryStorage, + _prep_result: Self::PrepResult, + exec_result: Self::ExecResult, + _context: &ExecutionContext, + ) -> Result { + println!("āœ… [POST] SuccessNode storing result and continuing..."); + + // Store success result + store + .set("success_message".to_string(), exec_result) + .map_err(|e| NodeError::StorageError(e.to_string()))?; + + store + .set("path_taken".to_string(), "success".to_string()) + .map_err(|e| NodeError::StorageError(e.to_string()))?; + + Ok(Action::simple("continue")) + } +} + +/// Error path node that handles negative outcomes (sync version) +/// +/// This node processes error scenarios and provides an alternative +/// execution path that also leads to the final convergence point. +#[cfg(not(feature = "async"))] +struct ErrorNode { + error_count: usize, +} + +#[cfg(not(feature = "async"))] +impl ErrorNode { + fn new() -> Self { + Self { error_count: 0 } + } +} + +#[cfg(not(feature = "async"))] +impl Node for ErrorNode { + type PrepResult = (); + type ExecResult = String; + type Error = NodeError; + + fn name(&self) -> &str { + "ErrorNode" + } + + fn prep( + &mut self, + store: &MemoryStorage, + context: &ExecutionContext, + ) -> Result { + println!( + "šŸ”„ [PREP] ErrorNode (exec_id: {}) handling negative outcome...", + context.execution_id() + ); + + // Read decision details from storage + if let Ok(Some(decision_value)) = store.get::("decision_value") { + println!(" Decision value was: {:.2}", decision_value); + } + + Ok(()) + } + + fn exec( + &mut self, + _prep_result: Self::PrepResult, + _context: &ExecutionContext, + ) -> Result { + println!("⚔ [EXEC] ErrorNode processing error scenario..."); + + self.error_count += 1; + + // Simulate error handling + std::thread::sleep(std::time::Duration::from_millis(12)); + + let error_message = format!("āš ļø Error handled gracefully! (count: {})", self.error_count); + println!(" {}", error_message); + + Ok(error_message) + } + + fn post( + &mut self, + store: &mut MemoryStorage, + _prep_result: Self::PrepResult, + exec_result: Self::ExecResult, + _context: &ExecutionContext, + ) -> Result { + println!("āœ… [POST] ErrorNode storing result and continuing..."); + + // Store error result + store + .set("error_message".to_string(), exec_result) + .map_err(|e| NodeError::StorageError(e.to_string()))?; + + store + .set("path_taken".to_string(), "error".to_string()) + .map_err(|e| NodeError::StorageError(e.to_string()))?; + + Ok(Action::simple("continue")) + } +} + +/// Final convergence node where all execution paths complete (sync version) +/// +/// This node serves as the endpoint for both success and error paths, +/// demonstrating how different workflow branches can converge. +#[cfg(not(feature = "async"))] +struct FinalNode; + +#[cfg(not(feature = "async"))] +impl Node for FinalNode { + type PrepResult = (String, Option, Option); + type ExecResult = String; + type Error = NodeError; + + fn name(&self) -> &str { + "FinalNode" + } + + fn prep( + &mut self, + store: &MemoryStorage, + context: &ExecutionContext, + ) -> Result { + println!( + "šŸ”„ [PREP] FinalNode (exec_id: {}) gathering results from all paths...", + context.execution_id() + ); + + let path_taken: String = store + .get("path_taken") + .map_err(|e| NodeError::StorageError(e.to_string()))? + .unwrap_or_else(|| "unknown".to_string()); + + let success_message: Option = store.get("success_message").ok().flatten(); + + let error_message: Option = store.get("error_message").ok().flatten(); + + println!(" Path taken: {}", path_taken); + + Ok((path_taken, success_message, error_message)) + } + + fn exec( + &mut self, + (path_taken, success_msg, error_msg): Self::PrepResult, + _context: &ExecutionContext, + ) -> Result { + println!("⚔ [EXEC] FinalNode creating workflow summary..."); + + // Simulate final processing + std::thread::sleep(std::time::Duration::from_millis(8)); + + let mut summary = String::new(); + summary.push_str("šŸŽÆ WORKFLOW SUMMARY\n"); + summary.push_str("==================\n"); + summary.push_str(&format!("Path taken: {}\n", path_taken)); + + if let Some(msg) = success_msg { + summary.push_str(&format!("Success result: {}\n", msg)); + } + + if let Some(msg) = error_msg { + summary.push_str(&format!("Error result: {}\n", msg)); + } + + summary.push_str("Workflow completed successfully!"); + + println!(" Summary generated"); + Ok(summary) + } + + fn post( + &mut self, + store: &mut MemoryStorage, + _prep_result: Self::PrepResult, + exec_result: Self::ExecResult, + _context: &ExecutionContext, + ) -> Result { + println!("āœ… [POST] FinalNode workflow completed!"); + + // Store final summary + store + .set("workflow_summary".to_string(), exec_result.clone()) + .map_err(|e| NodeError::StorageError(e.to_string()))?; + + println!("\n{}", exec_result); + + Ok(Action::simple("complete")) + } +} + +/// Manual workflow orchestrator that handles routing between nodes +#[cfg(not(feature = "async"))] +struct WorkflowOrchestrator { + decision_node: DecisionNode, + success_node: SuccessNode, + error_node: ErrorNode, + final_node: FinalNode, +} + +#[cfg(not(feature = "async"))] +impl WorkflowOrchestrator { + fn new(decision_threshold: f64) -> Self { + Self { + decision_node: DecisionNode::new(decision_threshold), + success_node: SuccessNode::new(), + error_node: ErrorNode::new(), + final_node: FinalNode, + } + } + + fn execute(&mut self, store: &mut MemoryStorage) -> Result<(), Box> { + println!("šŸŽ® Starting manual workflow orchestration..."); + println!("============================================\n"); + + // Step 1: Execute decision node + println!("1ļøāƒ£ Executing Decision Phase:"); + let decision_action = self.decision_node.run(store)?; + println!(" Decision Action: {}\n", decision_action.name()); + + // Step 2: Route based on decision + let path_action = match decision_action.name().as_str() { + "success" => { + println!("2ļøāƒ£ Executing Success Path:"); + let action = self.success_node.run(store)?; + println!(" Success Action: {}\n", action.name()); + action + } + "error" => { + println!("2ļøāƒ£ Executing Error Path:"); + let action = self.error_node.run(store)?; + println!(" Error Action: {}\n", action.name()); + action + } + _ => { + return Err( + format!("Unexpected decision action: {}", decision_action.name()).into(), + ); + } + }; + + // Step 3: Execute final convergence if appropriate + if path_action.name() == "continue" { + println!("3ļøāƒ£ Executing Final Convergence:"); + let final_action = self.final_node.run(store)?; + println!(" Final Action: {}\n", final_action.name()); + } + + Ok(()) + } +} + +/// Main function demonstrating sync node composition with manual orchestration +#[cfg(all(feature = "sync", not(feature = "async")))] +#[cfg(not(feature = "async"))] +fn sync_main() -> Result<(), Box> { + println!("šŸš€ CosmoFlow Flow Composition (Sync Version)"); + println!("============================================="); + println!("šŸ“¦ Manual node orchestration without async complexity!\n"); + + // Create shared storage + let mut store = MemoryStorage::new(); + + // Create workflow orchestrator with decision threshold + let mut orchestrator = WorkflowOrchestrator::new(0.5); + + // Execute the workflow + orchestrator.execute(&mut store)?; + + // Display final storage contents + println!("šŸ“Š Final Storage Contents:"); + println!("=========================="); + + if let Ok(Some(decision_result)) = store.get::("decision_result") { + println!("Decision Result: {}", decision_result); + } + + if let Ok(Some(path_taken)) = store.get::("path_taken") { + println!("Path Taken: {}", path_taken); + } + + if let Ok(Some(summary)) = store.get::("workflow_summary") { + println!("\nWorkflow Summary:"); + println!("{}", summary); + } + + println!("\nšŸŽÆ Sync Composition Benefits:"); + println!("============================="); + println!("• ⚔ Faster compilation than async flow macro"); + println!("• šŸ“¦ Smaller binary size (no async runtime)"); + println!("• šŸŽÆ Perfect for CPU-intensive decision logic"); + println!("• šŸ”§ Explicit control flow for easier debugging"); + println!("• šŸš€ No async overhead for decision workflows"); + println!("• šŸ’” Manual orchestration for maximum control"); + + println!("\nšŸ’” Note: This example demonstrates manual node composition"); + println!(" without relying on the flow macro or async features."); + println!(" Each routing decision is handled explicitly for"); + println!(" maximum performance and control."); + + Ok(()) +} + +fn main() { + #[cfg(not(feature = "async"))] + { + if let Err(e) = sync_main() { + eprintln!("Error running sync flow composition example: {}", e); + std::process::exit(1); + } + } + + #[cfg(feature = "async")] + { + println!("This sync example is not available when async features are enabled."); + println!("To run this example, use:"); + println!( + "cargo run --bin flow_composition_sync --no-default-features --features cosmoflow/storage-memory" + ); + } +} diff --git a/examples/unified_hello_world.rs b/cookbook/unified-workflow/src/main.rs similarity index 99% rename from examples/unified_hello_world.rs rename to cookbook/unified-workflow/src/main.rs index 110a39c..ec63384 100644 --- a/examples/unified_hello_world.rs +++ b/cookbook/unified-workflow/src/main.rs @@ -21,7 +21,7 @@ use async_trait::async_trait; use cosmoflow::prelude::*; use cosmoflow::shared_store::SharedStore; -use serde::{Serialize, de::DeserializeOwned}; +use serde::{de::DeserializeOwned, Serialize}; use std::{collections::HashMap, time::Duration}; /// A simple in-memory storage implementation for demonstration purposes. diff --git a/examples/unified_counter.rs b/cookbook/unified-workflow/src/unified_counter.rs similarity index 100% rename from examples/unified_counter.rs rename to cookbook/unified-workflow/src/unified_counter.rs diff --git a/examples/unified_shared_store.rs b/cookbook/unified-workflow/src/unified_shared_store.rs similarity index 100% rename from examples/unified_shared_store.rs rename to cookbook/unified-workflow/src/unified_shared_store.rs diff --git a/cosmoflow/Cargo.toml b/cosmoflow/Cargo.toml index d9589af..35c77fd 100644 --- a/cosmoflow/Cargo.toml +++ b/cosmoflow/Cargo.toml @@ -11,11 +11,11 @@ repository = "https://github.com/echozyr2001/CosmoFlow" readme = "README.md" [dependencies] -async-trait = { workspace = true } +async-trait = { workspace = true, optional = true } serde = { workspace = true } serde_json = { workspace = true } thiserror = { workspace = true } -tokio = { workspace = true } +tokio = { workspace = true, optional = true } uuid = { workspace = true } redis = { workspace = true, optional = true } @@ -26,12 +26,17 @@ tokio-test = { workspace = true } [features] default = [] +# Async support +async = ["async-trait", "tokio"] + +# Storage backends storage-memory = [] storage-file = [] storage-redis = ["redis"] storage-full = ["storage-memory", "storage-file", "storage-redis"] +# Convenience feature combinations minimal = [] basic = ["storage-memory"] -standard = ["storage-memory"] -full = ["storage-full"] +standard = ["storage-memory", "async"] +full = ["storage-full", "async"] diff --git a/cosmoflow/README.md b/cosmoflow/README.md index 1ed609a..10487c1 100644 --- a/cosmoflow/README.md +++ b/cosmoflow/README.md @@ -23,7 +23,8 @@ with clean abstractions and excellent performance. It is: [Guides](./docs/getting-started.md) | [API Docs](https://docs.rs/cosmoflow/latest/cosmoflow) | -[Examples](./examples/) +[Examples](./cosmoflow/examples/) | +[Cookbook](./cookbook/) ## Overview @@ -91,6 +92,38 @@ question. You can also ask your question on [the discussions page][discussions]. [Chat]: https://discord.gg/cosmoflow [discussions]: https://github.com/echozyr2001/CosmoFlow/discussions +## Project Structure + +This workspace contains: + +### Core Library +- **`cosmoflow/`** - The main CosmoFlow library with modular features + - **`cosmoflow/examples/`** - Simple feature demonstrations and basic usage patterns + +### Cookbook +- **`cookbook/`** - Production-ready examples and real-world solutions + - **`chat-assistant/`** - Complete chat assistant implementation + - **`llm-request-handler/`** - Efficient LLM request handling patterns + - **`unified-workflow/`** - Advanced workflow composition examples + +### Quick Start + +**Basic Examples (Learning)**: +```bash +cd cosmoflow/ +cargo run --example hello_world_sync +cargo run --example simple_loops --features async +``` + +**Production Examples (Real Use Cases)**: +```bash +cd cookbook/chat-assistant/ +cargo run + +cd cookbook/unified-workflow/ +cargo run +``` + ## Core Modules CosmoFlow provides a focused set of core modules: diff --git a/cosmoflow/examples/README.md b/cosmoflow/examples/README.md new file mode 100644 index 0000000..01d1b1c --- /dev/null +++ b/cosmoflow/examples/README.md @@ -0,0 +1,46 @@ +# CosmoFlow Examples + +This directory contains **simple examples** designed for learning CosmoFlow's core concepts. These examples prioritize clarity and fast compilation over advanced features. + +## Learning Path + +Start here to understand CosmoFlow fundamentals: + +### 1. **hello_world.rs** - Your First Workflow +- Basic node creation and execution +- Data flow between nodes using SharedStore +- Understanding the core CosmoFlow concepts + +### 2. **simple_loops.rs** - Control Flow Patterns +- Loop constructs and iteration patterns +- Conditional execution and state management +- Building more complex workflows step by step + +### 3. **custom_node.rs** - Advanced Node Implementation +- Creating custom nodes with complex logic +- Custom storage backends and data persistence +- Statistical analysis and data aggregation patterns + +## Running Examples + +All examples use **sync-only features** for minimal setup: + +```bash +# Start with the basics +cargo run --example hello_world + +# Learn control flow patterns +cargo run --example simple_loops + +# Advanced node customization +cargo run --example custom_node +``` + +## Ready for Production? + +Once you've mastered these basics, explore the **`../cookbook/`** directory for: + +- **async-workflows/** - Advanced async patterns with FlowBuilder +- **chat-assistant/** - Production chat applications +- **llm-request-handler/** - LLM integration patterns +- **unified-workflow/** - Complex workflow compositions diff --git a/cosmoflow/examples/custom_node.rs b/cosmoflow/examples/custom_node.rs new file mode 100644 index 0000000..cf1ff9b --- /dev/null +++ b/cosmoflow/examples/custom_node.rs @@ -0,0 +1,514 @@ +//! Custom Node Example - CosmoFlow Sync Version +//! +//! This example demonstrates custom node implementations in CosmoFlow workflows using +//! synchronous execution for faster compilation. +//! +//! ## Workflow Behavior +//! - **Counter Nodes**: Two stateful counters (main increments by 5, secondary by 3) +//! - **Individual Execution**: Each node is executed manually in sequence +//! - **Statistics Analysis**: Analyzes counter data including averages, min/max, and growth rates +//! - **Report Generation**: Creates a formatted analysis report with all statistics +//! +//! ## Advanced Features Demonstrated +//! - **Sync Node Implementation**: No async/await complexity for faster compilation +//! - **Stateful Nodes**: Nodes maintain internal state and persist data to shared store +//! - **Three-Phase Execution**: Proper use of prep, exec, and post phases +//! - **Data Persistence**: Stores counter values and execution history +//! - **Statistical Analysis**: Calculates metrics and generates formatted reports +//! - **Built-in Storage Backend**: Uses CosmoFlow's MemoryStorage +//! +//! ## Performance Benefits +//! - Faster compilation compared to async version +//! - Smaller binary size (no async runtime overhead) +//! - Perfect for CPU-intensive statistical computations +//! +//! ## Execution Flow +//! 1. Main counter increments and stores its value +//! 2. Secondary counter increments and stores its value +//! 3. Process repeats for several iterations +//! 4. Statistics node calculates metrics from stored data +//! 5. Report node generates and displays final analysis +//! +//! To run this example: +//! ```bash +//! cargo run --bin custom_node_sync --no-default-features --features cosmoflow/storage-memory +//! ``` + +/// Main function - choose between sync and async implementation +fn main() -> Result<(), Box> { + #[cfg(not(feature = "async"))] + { + sync_main() + } + #[cfg(feature = "async")] + { + println!("This sync example is not available when async features are enabled."); + println!("To run this example, use: cargo run --bin custom_node_sync --features sync"); + Ok(()) + } +} + +#[cfg(not(feature = "async"))] +fn sync_main() -> Result<(), Box> { + use cosmoflow::{ + Node, + action::Action, + node::{ExecutionContext, NodeError}, + shared_store::SharedStore, + shared_store::backends::MemoryStorage, + }; + use serde_json::Value; + use std::collections::HashMap; + + /// A counter node that tracks how many times it has been executed (sync version) + #[derive(Debug)] + struct CounterNode { + name: String, + count: usize, + increment_by: usize, + max_count: Option, + } + + impl CounterNode { + fn new(name: impl Into, increment_by: usize) -> Self { + Self { + name: name.into(), + count: 0, + increment_by, + max_count: Some(30), // Set a reasonable limit for demo + } + } + } + + impl Node for CounterNode { + type PrepResult = usize; // Previous count + type ExecResult = usize; // New count + type Error = NodeError; + + fn name(&self) -> &str { + &self.name + } + + fn prep( + &mut self, + store: &MemoryStorage, + context: &ExecutionContext, + ) -> Result { + let previous_count = self.count; + + // Check if we have stored count in the shared store + let store_key = format!("{}_count", self.name); + if let Some(count_value) = store + .get::(&store_key) + .ok() + .flatten() + .and_then(|v| v.as_u64()) + { + self.count = count_value as usize; + println!( + "šŸ”„ [PREP] {} (exec_id: {}) restored count from store: {}", + self.name, + context.execution_id(), + self.count + ); + } else { + println!( + "šŸ”„ [PREP] {} (exec_id: {}) starting fresh", + self.name, + context.execution_id() + ); + } + + Ok(previous_count) + } + + fn exec( + &mut self, + prep_result: Self::PrepResult, + _context: &ExecutionContext, + ) -> Result { + // Check if we've reached the maximum count + if let Some(max) = self.max_count { + if self.count >= max { + return Err(NodeError::ValidationError(format!( + "Counter {} has reached maximum count: {}", + self.name, max + ))); + } + } + + // Simulate some synchronous computation + std::thread::sleep(std::time::Duration::from_millis(5)); + + // Increment the counter + self.count += self.increment_by; + + println!( + "⚔ [EXEC] {} count: {} -> {} (increment: {})", + self.name, prep_result, self.count, self.increment_by + ); + + Ok(self.count) + } + + fn post( + &mut self, + store: &mut MemoryStorage, + _prep_result: Self::PrepResult, + exec_result: Self::ExecResult, + _context: &ExecutionContext, + ) -> Result { + println!("āœ… [POST] {} storing count: {}", self.name, exec_result); + + // Store the current count + let store_key = format!("{}_count", self.name); + store + .set(store_key, Value::Number(exec_result.into())) + .map_err(|e| NodeError::StorageError(e.to_string()))?; + + // Store count history + let history_key = format!("{}_history", self.name); + let mut history: Vec = match store.get::(&history_key) { + Ok(Some(value)) => { + if let Some(array) = value.as_array() { + array + .iter() + .filter_map(|v| v.as_u64().map(|n| n as usize)) + .collect() + } else { + Vec::new() + } + } + _ => Vec::new(), + }; + + history.push(exec_result); + store + .set( + history_key, + Value::Array( + history + .into_iter() + .map(|n| Value::Number(n.into())) + .collect(), + ), + ) + .map_err(|e| NodeError::StorageError(e.to_string()))?; + + // Determine next action based on count + if let Some(max) = self.max_count { + if exec_result >= max { + Ok(Action::simple("max_reached")) + } else { + Ok(Action::simple("continue")) + } + } else { + Ok(Action::simple("continue")) + } + } + } + + /// A statistics node that analyzes counter data (sync version) + struct StatisticsNode; + + impl Node for StatisticsNode { + type PrepResult = HashMap>; + type ExecResult = HashMap; + type Error = NodeError; + + fn name(&self) -> &str { + "StatisticsNode" + } + + fn prep( + &mut self, + store: &MemoryStorage, + _context: &ExecutionContext, + ) -> Result { + println!("šŸ”„ [PREP] Gathering statistics from all counters..."); + + let mut counter_histories = HashMap::new(); + + // Look for all counter histories in the store + let keys = ["main_counter_history", "secondary_counter_history"]; + + for key in keys { + if let Ok(Some(value)) = store.get::(key) { + if let Some(array) = value.as_array() { + let history: Vec = array + .iter() + .filter_map(|v| v.as_u64().map(|n| n as usize)) + .collect(); + + if !history.is_empty() { + let counter_name = key.replace("_history", ""); + let history_len = history.len(); + counter_histories.insert(counter_name.clone(), history); + println!("šŸ“Š Found history for {counter_name}: {history_len} entries"); + } + } + } + } + + Ok(counter_histories) + } + + fn exec( + &mut self, + prep_result: Self::PrepResult, + _context: &ExecutionContext, + ) -> Result { + println!("⚔ [EXEC] Calculating statistics..."); + + let mut statistics = HashMap::new(); + + for (counter_name, history) in prep_result { + if history.is_empty() { + continue; + } + + // Simulate some computation time + std::thread::sleep(std::time::Duration::from_millis(10)); + + // Calculate basic statistics + let sum: usize = history.iter().sum(); + let count = history.len(); + let average = sum as f64 / count as f64; + + let min = *history.iter().min().unwrap() as f64; + let max = *history.iter().max().unwrap() as f64; + + // Calculate growth rate + let growth_rate = if history.len() > 1 { + let first = history[0] as f64; + let last = history[history.len() - 1] as f64; + if first > 0.0 { + (last - first) / first * 100.0 + } else { + 0.0 + } + } else { + 0.0 + }; + + println!("šŸ“Š {counter_name} statistics:"); + println!(" Count: {count}"); + println!(" Average: {average:.2}"); + println!(" Min: {min}, Max: {max}"); + println!(" Growth rate: {growth_rate:.1}%"); + + statistics.insert(format!("{counter_name}_average"), average); + statistics.insert(format!("{counter_name}_min"), min); + statistics.insert(format!("{counter_name}_max"), max); + statistics.insert(format!("{counter_name}_growth_rate"), growth_rate); + } + + Ok(statistics) + } + + fn post( + &mut self, + store: &mut MemoryStorage, + _prep_result: Self::PrepResult, + exec_result: Self::ExecResult, + _context: &ExecutionContext, + ) -> Result { + println!("āœ… [POST] Storing statistics..."); + + // Store all statistics + for (key, value) in exec_result { + store + .set( + format!("stats_{key}"), + Value::Number( + serde_json::Number::from_f64(value) + .unwrap_or(serde_json::Number::from(0)), + ), + ) + .map_err(|e| NodeError::StorageError(e.to_string()))?; + } + + Ok(Action::simple("generate_report")) + } + } + + /// A report node that generates a final summary (sync version) + struct ReportNode; + + impl Node for ReportNode { + type PrepResult = HashMap; + type ExecResult = String; + type Error = NodeError; + + fn name(&self) -> &str { + "ReportNode" + } + + fn prep( + &mut self, + store: &MemoryStorage, + _context: &ExecutionContext, + ) -> Result { + println!("šŸ”„ [PREP] Collecting statistics for report..."); + + let mut stats = HashMap::new(); + + // Collect all statistics + let stat_keys = [ + "stats_main_counter_average", + "stats_main_counter_growth_rate", + "stats_secondary_counter_average", + "stats_secondary_counter_growth_rate", + ]; + + for key in stat_keys { + if let Ok(Some(value)) = store.get::(key) { + if let Some(number) = value.as_f64() { + stats.insert(key.to_string(), number); + println!("šŸ“Š Loaded stat {key}: {number:.2}"); + } + } + } + + Ok(stats) + } + + fn exec( + &mut self, + prep_result: Self::PrepResult, + _context: &ExecutionContext, + ) -> Result { + println!("⚔ [EXEC] Generating final report..."); + + // Simulate report generation time + std::thread::sleep(std::time::Duration::from_millis(15)); + + let mut report = String::new(); + report.push_str("šŸŽÆ COUNTER ANALYSIS REPORT\n"); + report.push_str("==========================\n\n"); + + // Main counter stats + if let (Some(avg), Some(growth)) = ( + prep_result.get("stats_main_counter_average"), + prep_result.get("stats_main_counter_growth_rate"), + ) { + report.push_str(&format!( + "šŸ“ˆ Main Counter:\n Average: {avg:.2}\n Growth Rate: {growth:.1}%\n\n" + )); + } + + // Secondary counter stats + if let (Some(avg), Some(growth)) = ( + prep_result.get("stats_secondary_counter_average"), + prep_result.get("stats_secondary_counter_growth_rate"), + ) { + report.push_str(&format!( + "šŸ“Š Secondary Counter:\n Average: {avg:.2}\n Growth Rate: {growth:.1}%\n\n" + )); + } + + report.push_str("✨ Analysis completed successfully!"); + + Ok(report) + } + + fn post( + &mut self, + store: &mut MemoryStorage, + _prep_result: Self::PrepResult, + exec_result: Self::ExecResult, + _context: &ExecutionContext, + ) -> Result { + println!("āœ… [POST] Final report generated:"); + println!("{exec_result}"); + + // Store the final report + store + .set("final_report".to_string(), exec_result) + .map_err(|e| NodeError::StorageError(e.to_string()))?; + + Ok(Action::simple("complete")) + } + } + + println!("šŸš€ CosmoFlow Custom Node (Sync Version)"); + println!("========================================"); + println!("šŸ“¦ Advanced iterative workflow with statistical analysis!\n"); + + // Create shared storage + let mut store = MemoryStorage::new(); + + // Create counter nodes + let mut main_counter = CounterNode::new("main_counter", 5); + let mut secondary_counter = CounterNode::new("secondary_counter", 3); + + println!("šŸ”„ Executing iterative counter workflow..."); + println!("------------------------------------------\n"); + + // Execute multiple iterations + let max_iterations = 6; + for iteration in 1..=max_iterations { + println!("šŸ” Iteration {iteration}/{max_iterations}:"); + + // Execute main counter + println!(" 1ļøāƒ£ Executing Main Counter:"); + let main_action = main_counter.run(&mut store)?; + println!(" Action: {}", main_action.name()); + + // Execute secondary counter + println!(" 2ļøāƒ£ Executing Secondary Counter:"); + let secondary_action = secondary_counter.run(&mut store)?; + println!(" Action: {}\n", secondary_action.name()); + + // Check if either counter reached max + if main_action.name() == "max_reached" || secondary_action.name() == "max_reached" { + println!("šŸ›‘ Counter reached maximum, stopping iterations\n"); + break; + } + } + + // Execute statistics analysis + println!("šŸ“Š Analyzing counter data..."); + println!("----------------------------"); + let mut stats_node = StatisticsNode; + let stats_action = stats_node.run(&mut store)?; + println!("Statistics Action: {}\n", stats_action.name()); + + // Generate final report + if stats_action.name() == "generate_report" { + println!("šŸ“‹ Generating final report..."); + println!("-----------------------------"); + let mut report_node = ReportNode; + let report_action = report_node.run(&mut store)?; + println!("Report Action: {}\n", report_action.name()); + } + + // Display final storage contents + println!("šŸ“Š Final Storage Summary:"); + println!("========================"); + + if let Ok(Some(main_count)) = store.get::("main_counter_count") { + println!("Main Counter Final: {main_count}"); + } + + if let Ok(Some(secondary_count)) = store.get::("secondary_counter_count") { + println!("Secondary Counter Final: {secondary_count}"); + } + + if let Ok(Some(report)) = store.get::("final_report") { + println!("\nšŸ“‹ Stored Report:"); + println!("{report}"); + } + + println!("\nšŸŽÆ Sync Version Benefits:"); + println!("• ⚔ Faster compilation than async version"); + println!("• šŸ“¦ Smaller binary size"); + println!("• šŸŽÆ Perfect for CPU-intensive statistical analysis"); + println!("• šŸ”§ Simpler debugging and profiling"); + println!("• šŸš€ No async runtime overhead"); + + println!("\nšŸ’” Note: This example shows individual node execution"); + println!(" with manual iteration control and statistical analysis."); + + Ok(()) +} diff --git a/cosmoflow/examples/hello_world.rs b/cosmoflow/examples/hello_world.rs new file mode 100644 index 0000000..626233a --- /dev/null +++ b/cosmoflow/examples/hello_world.rs @@ -0,0 +1,240 @@ +//! Hello World Example - CosmoFlow Sync Version +//! +//! This example demonstrates the simplest possible CosmoFlow workflow using +//! synchronous execution for faster compilation and smaller binaries. +//! +//! ## Workflow Behavior +//! - **Hello Node**: Displays a greeting message and stores it in shared storage +//! - **Response Node**: Reads the greeting from storage and responds to it +//! - **Simple Communication**: Data flows between nodes via the shared store +//! +//! ## Core Features Demonstrated +//! - **Sync Node Implementation**: No async/await complexity +//! - **Built-in Storage Backend**: Uses CosmoFlow's MemoryStorage +//! - **Minimal Dependencies**: No tokio or async-trait required +//! - **Data Communication**: Nodes sharing data via the SharedStore +//! - **Individual Node Execution**: Direct node execution without Flow +//! +//! ## Performance Benefits +//! - 57% faster compilation compared to async version +//! - Smaller binary size (no async runtime overhead) +//! - Perfect for CPU-intensive workflows +//! +//! To run this example: +//! ```bash +//! cargo run --bin hello_world_sync --no-default-features --features cosmoflow/storage-memory +//! ``` + +/// Main function - choose between sync and async implementation +fn main() -> Result<(), Box> { + #[cfg(not(feature = "async"))] + { + sync_main() + } + #[cfg(feature = "async")] + { + println!("This sync example is not available when async features are enabled."); + println!("To run this example, use: cargo run --bin hello_world_sync --features sync"); + Ok(()) + } +} + +#[cfg(not(feature = "async"))] +fn sync_main() -> Result<(), Box> { + use cosmoflow::{ + Node, + action::Action, + node::{ExecutionContext, NodeError}, + shared_store::SharedStore, + shared_store::backends::MemoryStorage, + }; + + /// A simple greeting node that generates and stores a hello message (sync version) + struct HelloNode { + message: String, + } + + impl HelloNode { + fn new(message: impl Into) -> Self { + Self { + message: message.into(), + } + } + } + + impl Node for HelloNode { + type PrepResult = String; + type ExecResult = String; + type Error = NodeError; + + fn name(&self) -> &str { + "HelloNode" + } + + fn prep( + &mut self, + _store: &MemoryStorage, + context: &ExecutionContext, + ) -> Result { + let prepared_message = + format!("Execution {}: {}", context.execution_id(), self.message); + println!("šŸ”„ [PREP] Preparing message: {prepared_message}"); + Ok(prepared_message) + } + + fn exec( + &mut self, + prep_result: Self::PrepResult, + _context: &ExecutionContext, + ) -> Result { + println!("⚔ [EXEC] Processing greeting: {prep_result}"); + + // Simulate some synchronous work + std::thread::sleep(std::time::Duration::from_millis(10)); + + let processed_greeting = format!("🌟 {prep_result}"); + Ok(processed_greeting) + } + + fn post( + &mut self, + store: &mut MemoryStorage, + _prep_result: Self::PrepResult, + exec_result: Self::ExecResult, + _context: &ExecutionContext, + ) -> Result { + println!("āœ… [POST] Storing greeting: {exec_result}"); + + // Store the greeting for the next node + store + .set("greeting".to_string(), exec_result.clone()) + .map_err(|e| NodeError::StorageError(e.to_string()))?; + + println!("šŸ“¤ Greeting stored successfully"); + Ok(Action::simple("next")) + } + } + + /// A response node that reads the greeting and generates a response (sync version) + struct ResponseNode { + responder_name: String, + } + + impl ResponseNode { + fn new(responder_name: impl Into) -> Self { + Self { + responder_name: responder_name.into(), + } + } + } + + impl Node for ResponseNode { + type PrepResult = String; + type ExecResult = String; + type Error = NodeError; + + fn name(&self) -> &str { + "ResponseNode" + } + + fn prep( + &mut self, + store: &MemoryStorage, + _context: &ExecutionContext, + ) -> Result { + // Read the greeting from storage + let greeting: String = store + .get("greeting") + .map_err(|e| NodeError::StorageError(e.to_string()))? + .ok_or_else(|| { + NodeError::ValidationError("No greeting found in storage".to_string()) + })?; + + println!("šŸ“„ [PREP] Retrieved greeting: {greeting}"); + Ok(greeting) + } + + fn exec( + &mut self, + prep_result: Self::PrepResult, + _context: &ExecutionContext, + ) -> Result { + println!("⚔ [EXEC] Generating response to: {prep_result}"); + + // Simulate some synchronous processing + std::thread::sleep(std::time::Duration::from_millis(5)); + + let response = format!("šŸ¤ Nice to meet you! - {}", self.responder_name); + Ok(response) + } + + fn post( + &mut self, + store: &mut MemoryStorage, + _prep_result: Self::PrepResult, + exec_result: Self::ExecResult, + _context: &ExecutionContext, + ) -> Result { + println!("āœ… [POST] Generated response: {exec_result}"); + + // Store the response + store + .set("response".to_string(), exec_result.clone()) + .map_err(|e| NodeError::StorageError(e.to_string()))?; + + println!("šŸŽ‰ Workflow completed successfully!"); + Ok(Action::simple("complete")) + } + } + + println!("šŸš€ CosmoFlow Hello World (Sync Version)"); + println!("========================================"); + println!("šŸ“¦ Compiled without async features for minimal size!\n"); + + // Create shared storage + let mut store = MemoryStorage::new(); + + // Create nodes + let mut hello_node = HelloNode::new("Hello from CosmoFlow!"); + let mut response_node = ResponseNode::new("CosmoFlow Assistant"); + + println!("šŸ”„ Executing workflow..."); + println!("------------------------\n"); + + // Execute hello node + println!("1ļøāƒ£ Executing HelloNode:"); + let hello_action = hello_node.run(&mut store)?; + println!(" Action: {}\n", hello_action.name()); + + // Execute response node if hello succeeded + if hello_action.name() == "next" { + println!("2ļøāƒ£ Executing ResponseNode:"); + let response_action = response_node.run(&mut store)?; + println!(" Action: {}\n", response_action.name()); + } + + // Display final results + println!("šŸ“Š Final Results:"); + println!("================="); + + if let Ok(Some(greeting)) = store.get::("greeting") { + println!("Greeting: {greeting}"); + } + + if let Ok(Some(response)) = store.get::("response") { + println!("Response: {response}"); + } + + println!("\nšŸŽÆ Sync Version Benefits:"); + println!("• ⚔ 57% faster compilation"); + println!("• šŸ“¦ Smaller binary size"); + println!("• šŸŽÆ Perfect for CPU-intensive tasks"); + println!("• šŸ”§ Simpler debugging"); + println!("• šŸš€ No async runtime overhead"); + + println!("\nšŸ’” Note: This example shows individual node execution"); + println!(" since Flow module currently requires async features."); + println!(" Each node is executed manually in sequence."); + + Ok(()) +} diff --git a/cosmoflow/examples/simple_loops.rs b/cosmoflow/examples/simple_loops.rs new file mode 100644 index 0000000..b0edef3 --- /dev/null +++ b/cosmoflow/examples/simple_loops.rs @@ -0,0 +1,188 @@ +//! # Simple Loop Example (Sync Version) +//! +//! This demonstrates how to create loops using just the existing flow design +//! without any special loop constructs - just nodes, routes, and conditions. +//! +//! This version uses synchronous execution for faster compilation and smaller binaries. + +/// Main function - choose between sync and async implementation +fn main() -> Result<(), Box> { + #[cfg(not(feature = "async"))] + { + sync_main() + } + #[cfg(feature = "async")] + { + println!("This sync example is not available when async features are enabled."); + println!("To run this example, use: cargo run --bin simple_loops_sync --features sync"); + Ok(()) + } +} + +#[cfg(not(feature = "async"))] +fn sync_main() -> Result<(), Box> { + use cosmoflow::{ + Node, + action::Action, + node::{ExecutionContext, NodeError}, + shared_store::SharedStore, + shared_store::backends::MemoryStorage, + }; + + /// A simple counter node that can loop back to itself (sync version) + struct SimpleCounterNode { + name: String, + } + + impl SimpleCounterNode { + fn new(name: impl Into) -> Self { + Self { name: name.into() } + } + } + + impl Node for SimpleCounterNode { + type PrepResult = (); + type ExecResult = (); + type Error = NodeError; + + fn name(&self) -> &str { + &self.name + } + + fn prep( + &mut self, + _store: &MemoryStorage, + _context: &ExecutionContext, + ) -> Result<(), Self::Error> { + Ok(()) + } + + fn exec( + &mut self, + _prep_result: (), + _context: &ExecutionContext, + ) -> Result<(), Self::Error> { + Ok(()) + } + + fn post( + &mut self, + store: &mut MemoryStorage, + _prep_result: (), + _exec_result: (), + _context: &ExecutionContext, + ) -> Result { + // Get current count + let count: i32 = store + .get("count") + .map_err(|e| NodeError::StorageError(e.to_string()))? + .unwrap_or(0); + + if count >= 5 { + Ok(Action::simple("done")) + } else { + // Increment and store + store + .set("count".to_string(), count + 1) + .map_err(|e| NodeError::StorageError(e.to_string()))?; + Ok(Action::simple("continue")) + } + } + } + + println!("šŸš€ CosmoFlow Simple Loops (Sync Version)"); + println!("========================================"); + + // Create shared storage + let mut store = MemoryStorage::new(); + + // Create a counter node + let mut node = SimpleCounterNode::new("counter"); + + // Loop 1: Single node pattern + println!("šŸ”„ Loop Pattern 1: Single Node"); + println!("-------------------------------"); + + let mut step_count = 0; + loop { + step_count += 1; + println!("Step {step_count}: Executing counter node"); + let action = node.run(&mut store)?; + println!("Action: {}", action.name()); + + if action.name() == "done" { + break; + } + + // Safety check + if step_count > 100 { + println!("āš ļø Safety limit reached"); + break; + } + } + + println!("āœ… Loop completed in {step_count} steps"); + let final_count: i32 = store.get("count")?.unwrap_or(0); + println!("šŸ“‹ Final count: {final_count}\n"); + + // Reset for second example + store.set("count2".to_string(), 0)?; + + // Loop 2: Two-node pattern + println!("šŸ”„ Loop Pattern 2: Two-Node Alternating"); + println!("---------------------------------------"); + + let mut current_node = "a"; + let mut step_count2 = 0; + + loop { + step_count2 += 1; + let action = match current_node { + "a" => { + println!("Step {step_count2}: Executing node_a"); + let action = node.run(&mut store)?; + if action.name() == "continue" { + current_node = "b"; // Switch to node B + } + action + } + "b" => { + println!("Step {step_count2}: Executing node_b"); + let action = node.run(&mut store)?; + if action.name() == "continue" { + current_node = "a"; // Switch back to node A + } + action + } + _ => unreachable!(), + }; + + println!("Action: {}", action.name()); + + if action.name() == "done" { + break; + } + + // Safety check + if step_count2 > 100 { + println!("āš ļø Safety limit reached"); + break; + } + } + + println!("āœ… Two-node pattern completed in {step_count2} steps"); + let final_count2: i32 = store.get("count2")?.unwrap_or(0); + println!("šŸ“‹ Final count: {final_count2}"); + + println!("\nšŸŽÆ Key Benefits of Sync Version:"); + println!("• šŸ“¦ Smaller binary size (no tokio/async-trait)"); + println!("• ⚔ Faster compilation (57% improvement)"); + println!("• šŸŽÆ Perfect for CPU-intensive tasks"); + println!("• šŸ”§ Simpler debugging (no async complexity)"); + + println!("\nšŸ’” Note: This example shows individual node execution"); + println!(" since Flow module currently requires async features."); + println!(" Future versions will support sync Flow execution!"); + + Ok(()) +} diff --git a/cosmoflow/src/flow/async.rs b/cosmoflow/src/flow/async.rs new file mode 100644 index 0000000..7d54384 --- /dev/null +++ b/cosmoflow/src/flow/async.rs @@ -0,0 +1,512 @@ +//! Async-specific implementations for CosmoFlow +//! +//! This module contains all the async-specific trait implementations and functionality +//! for the CosmoFlow workflow engine. These are only available when the "async" feature +//! is enabled. + +use std::collections::HashMap; +use std::time::{Duration, Instant}; + +use async_trait::async_trait; + +use crate::action::Action; +use crate::node::{ExecutionContext, NodeError, r#async::Node}; +use crate::shared_store::SharedStore; + +use super::errors::FlowError; +use super::route::{Route, RouteCondition}; +use super::{FlowConfig, FlowExecutionResult}; + +/// Node runner trait for workflow execution (async version) +/// +/// This trait provides a unified interface for executing nodes with different +/// associated types in the same flow, allowing the flow system to work with +/// heterogeneous node collections while maintaining type safety. +#[async_trait] +pub trait NodeRunner: Send + Sync { + /// Execute the node and return the resulting action + async fn run(&mut self, store: &mut S) -> Result; + + /// Get the node's name for debugging and logging + fn name(&self) -> &str; +} + +/// Implementation of NodeRunner for any Node (async version) +#[async_trait] +impl NodeRunner for T +where + T: Node + Send + Sync, + S: SharedStore + Send + Sync, +{ + async fn run(&mut self, store: &mut S) -> Result { + Node::run(self, store).await + } + + fn name(&self) -> &str { + Node::name(self) + } +} + +/// Trait for implementing flow execution logic (async version) +#[async_trait] +pub trait FlowBackend { + /// Add a node to the flow + fn add_node(&mut self, id: String, node: Box>) -> Result<(), FlowError>; + + /// Add a route between nodes + fn add_route(&mut self, from_node_id: String, route: Route) -> Result<(), FlowError>; + + /// Execute the flow starting from the configured start node + async fn execute(&mut self, store: &mut S) -> Result; + + /// Execute the flow starting from a specific node + async fn execute_from( + &mut self, + store: &mut S, + start_node_id: String, + ) -> Result; + + /// Get the current configuration + fn config(&self) -> &FlowConfig; + + /// Update the configuration + fn set_config(&mut self, config: FlowConfig); + + /// Check if the flow is valid (no orphaned nodes, etc.) + fn validate(&self) -> Result<(), FlowError>; +} + +/// The main flow structure (async version) +pub struct Flow { + nodes: HashMap>>, + routes: HashMap>, + config: FlowConfig, +} + +impl Flow { + /// Create a new empty flow + pub fn new() -> Self { + Self { + nodes: HashMap::new(), + routes: HashMap::new(), + config: FlowConfig::default(), + } + } + + /// Create a new flow with custom configuration + pub fn with_config(config: FlowConfig) -> Self { + Self { + nodes: HashMap::new(), + routes: HashMap::new(), + config, + } + } + + /// Add a node to the flow + pub fn add_node(&mut self, id: impl Into, node: N) -> Result<(), FlowError> + where + N: Node + Send + Sync + 'static, + { + let id = id.into(); + if self.nodes.contains_key(&id) { + return Err(FlowError::InvalidConfiguration(format!( + "Duplicate node: {id}" + ))); + } + self.nodes.insert(id, Box::new(node)); + Ok(()) + } + + /// Add a route from one node to another + pub fn add_route( + &mut self, + from_node_id: impl Into, + action: impl Into, + to_node_id: impl Into, + ) -> Result<(), FlowError> { + let from_node_id = from_node_id.into(); + let route = Route { + action: action.into(), + target_node_id: Some(to_node_id.into()), + condition: Some(RouteCondition::Always), + }; + + // Check for terminal action warning + if route.target_node_id.is_none() { + eprintln!( + "Warning: Adding route with terminal action '{}' from node '{}'. \ + Terminal actions typically end workflows and may not route to other nodes.", + route.action, from_node_id + ); + } + + self.routes.entry(from_node_id).or_default().push(route); + Ok(()) + } + + // Internal helper methods + async fn internal_execute( + &mut self, + store: &mut S, + start_node_id: String, + ) -> Result { + let _start_time = Instant::now(); + let mut current_node_id = start_node_id; + let mut execution_path = Vec::new(); + let mut steps_executed = 0; + + loop { + // Check max steps + if steps_executed >= self.config.max_steps { + return Err(FlowError::MaxStepsExceeded(self.config.max_steps)); + } + + // Get the current node + let node = self + .nodes + .get_mut(¤t_node_id) + .ok_or_else(|| FlowError::NodeNotFound(current_node_id.clone()))?; + + // Execute the node + let _context = ExecutionContext::new(execution_path.len(), Duration::from_secs(30)); + let action = node.run(store).await?; + + execution_path.push(current_node_id.clone()); + steps_executed += 1; + + // Check for terminal actions (routes with no target) + if (self.find_next_node(¤t_node_id, &action, store)?).is_none() { + return Ok(FlowExecutionResult { + final_action: action, + last_node_id: current_node_id, + steps_executed, + success: true, + execution_path, + }); + } + + // Find the next node + let routes = self.routes.get(¤t_node_id).ok_or_else(|| { + FlowError::NoRouteFound(current_node_id.clone(), action.name().to_string()) + })?; + + let next_node_id = routes + .iter() + .find(|route| { + route.action == action.name() + && route.condition.as_ref().is_none_or(|c| c.evaluate(store)) + }) + .and_then(|route| route.target_node_id.as_ref()) + .ok_or_else(|| { + FlowError::NoRouteFound(current_node_id, action.name().to_string()) + })?; + + current_node_id = next_node_id.clone(); + } + } + + fn internal_validate(&self) -> Result<(), FlowError> { + // Check if start node exists + if !self.nodes.contains_key(&self.config.start_node_id) { + return Err(FlowError::InvalidConfiguration(format!( + "Start node '{}' not found in flow", + self.config.start_node_id + ))); + } + + // Additional validation can be added here + Ok(()) + } + + /// Find the next node to execute based on the current node and action + /// + /// This method implements the core routing logic of the flow engine. It: + /// 1. Checks if the action is a terminal action (ends execution) + /// 2. Looks up available routes from the current node + /// 3. Evaluates route conditions to find the appropriate next node + /// + /// # Arguments + /// + /// * `current_node_id` - ID of the currently executing node + /// * `action` - Action returned by the current node + /// * `store` - Shared store for condition evaluation + /// + /// # Returns + /// + /// * `Ok(Some(String))` - ID of the next node to execute + /// * `Ok(None)` - Terminal action, execution should end + /// * `Err(FlowError)` - Routing error (no route found, condition evaluation failed) + /// + /// # Errors + /// + /// * [`FlowError::NoRouteFound`] - No route exists for the given action + /// * [`FlowError::InvalidConfiguration`] - Route condition evaluation failed + fn find_next_node( + &self, + current_node_id: &str, + action: &Action, + store: &S, + ) -> Result, FlowError> { + let action_str = action.to_string(); + + // Get routes for the current node + let routes = self.routes.get(current_node_id).ok_or_else(|| { + FlowError::NoRouteFound(current_node_id.to_string(), action_str.clone()) + })?; + + // Find matching route + for route in routes { + if route.action == action_str { + // Check condition if present - skip route if condition fails + if route.condition.as_ref().is_some_and(|c| !c.evaluate(store)) { + continue; + } + return Ok(route.target_node_id.clone()); + } + } + + Err(FlowError::NoRouteFound( + current_node_id.to_string(), + action_str, + )) + } +} + +#[async_trait] +impl FlowBackend for Flow { + fn add_node(&mut self, id: String, node: Box>) -> Result<(), FlowError> { + if self.nodes.contains_key(&id) { + return Err(FlowError::InvalidConfiguration(format!( + "Duplicate node: {id}" + ))); + } + self.nodes.insert(id, node); + Ok(()) + } + + fn add_route(&mut self, from_node_id: String, route: Route) -> Result<(), FlowError> { + self.routes.entry(from_node_id).or_default().push(route); + Ok(()) + } + + async fn execute(&mut self, store: &mut S) -> Result { + self.validate()?; + self.internal_execute(store, self.config.start_node_id.clone()) + .await + } + + async fn execute_from( + &mut self, + store: &mut S, + start_node_id: String, + ) -> Result { + self.internal_execute(store, start_node_id).await + } + + fn config(&self) -> &FlowConfig { + &self.config + } + + fn set_config(&mut self, config: FlowConfig) { + self.config = config; + } + + fn validate(&self) -> Result<(), FlowError> { + self.internal_validate() + } +} + +impl Default for Flow { + fn default() -> Self { + Self::new() + } +} + +// Flow as Node implementation (async version) +#[async_trait] +impl Node for Flow { + type PrepResult = (); + type ExecResult = FlowExecutionResult; + type Error = NodeError; + + async fn prep(&mut self, _store: &S, _context: &ExecutionContext) -> Result<(), NodeError> { + self.validate() + .map_err(|e| NodeError::PreparationError(e.to_string()))?; + Ok(()) + } + + async fn exec( + &mut self, + _prep_result: (), + _context: &ExecutionContext, + ) -> Result { + // For now, we can't execute a flow as a node without a store + // This would need to be implemented differently + Err(NodeError::ExecutionError( + "Flow as Node execution not yet implemented".to_string(), + )) + } + + async fn post( + &mut self, + _store: &mut S, + _prep_result: (), + exec_result: FlowExecutionResult, + _context: &ExecutionContext, + ) -> Result { + // Return the final action from the flow execution + Ok(exec_result.final_action) + } + + fn name(&self) -> &'static str { + "Flow" + } +} + +/// Builder for creating async flows easily +pub struct FlowBuilder { + nodes: HashMap>>, + routes: HashMap>, + config: FlowConfig, +} + +impl Default for FlowBuilder { + fn default() -> Self { + Self::new() + } +} + +impl FlowBuilder { + /// Create a new flow builder + pub fn new() -> Self { + Self { + nodes: HashMap::new(), + routes: HashMap::new(), + config: FlowConfig::default(), + } + } + + /// Set the starting node ID + pub fn start_node(mut self, node_id: impl Into) -> Self { + self.config.start_node_id = node_id.into(); + self + } + + /// Set maximum execution steps + pub fn max_steps(mut self, max_steps: usize) -> Self { + self.config.max_steps = max_steps; + self + } + + /// Add a node to the flow + pub fn node(mut self, id: impl Into, node: T) -> Self + where + T: Node + Send + Sync + 'static, + { + self.nodes.insert(id.into(), Box::new(node)); + self + } + + /// Convenience method: add a node and set it as the starting node + pub fn start_with(mut self, id: impl Into, node: T) -> Self + where + T: Node + Send + Sync + 'static, + { + let id = id.into(); + self.config.start_node_id = id.clone(); + self.node(id, node) + } + + /// Add a simple route (action -> target node) + pub fn route( + mut self, + from: impl Into, + action: impl Into, + to: impl Into, + ) -> Self { + let from_id = from.into(); + let action_str = action.into(); + let to_id = to.into(); + + let route = Route { + action: action_str, + target_node_id: Some(to_id), + condition: None, + }; + + self.routes.entry(from_id).or_default().push(route); + self + } + + /// Add a conditional route + pub fn conditional_route( + mut self, + from: impl Into, + action: impl Into, + to: impl Into, + condition: RouteCondition, + ) -> Self { + let from_id = from.into(); + let action_str = action.into(); + let to_id = to.into(); + + let route = Route { + action: action_str, + target_node_id: Some(to_id), + condition: Some(condition), + }; + + self.routes.entry(from_id).or_default().push(route); + self + } + + /// Add an explicit terminal route that does not target any node + pub fn terminal_route(mut self, from: impl Into, action: impl Into) -> Self { + let from_id = from.into(); + let action_str = action.into(); + + let route = Route { + action: action_str, + target_node_id: None, // None indicates termination + condition: None, + }; + + self.routes.entry(from_id).or_default().push(route); + self + } + + /// Add an explicit conditional terminal route + pub fn conditional_terminal_route( + mut self, + from: impl Into, + action: impl Into, + condition: RouteCondition, + ) -> Self { + let from_id = from.into(); + let action_str = action.into(); + + let route = Route { + action: action_str, + target_node_id: None, // None indicates termination + condition: Some(condition), + }; + + self.routes.entry(from_id).or_default().push(route); + self + } + + /// Build the flow with the configured settings + pub fn build(self) -> Flow { + Flow { + nodes: self.nodes, + routes: self.routes, + config: self.config, + } + } + + /// Convenience method to create a self-routing loop + pub fn self_route(self, node_id: impl Into, action: impl Into) -> Self { + let node_id_str = node_id.into(); + self.route(node_id_str.clone(), action, node_id_str) + } +} diff --git a/cosmoflow/src/flow/macros.rs b/cosmoflow/src/flow/macros.rs index 37fc6aa..22dbdc3 100644 --- a/cosmoflow/src/flow/macros.rs +++ b/cosmoflow/src/flow/macros.rs @@ -216,22 +216,45 @@ macro_rules! flow { } $(,)? ) => { { - let mut builder = $crate::flow::FlowBuilder::<$storage>::new() - .start_node($start); + #[cfg(not(feature = "async"))] + { + let mut builder = $crate::flow::FlowBuilder::<$storage>::new() + .start_node($start); - $( - builder = builder.node($id, $backend); - )* + $( + builder = builder.node($id, $backend); + )* - $( - builder = builder.route($from, $action, $to); - )* + $( + builder = builder.route($from, $action, $to); + )* - $( - builder = builder.terminal_route($term_from, $term_action); - )* + $( + builder = builder.terminal_route($term_from, $term_action); + )* + + builder.build() + } + + #[cfg(feature = "async")] + { + let mut builder = $crate::flow::r#async::FlowBuilder::<$storage>::new() + .start_node($start); + + $( + builder = builder.node($id, $backend); + )* - builder.build() + $( + builder = builder.route($from, $action, $to); + )* + + $( + builder = builder.terminal_route($term_from, $term_action); + )* + + builder.build() + } } }; @@ -251,18 +274,37 @@ macro_rules! flow { } $(,)? ) => { { - let mut builder = $crate::flow::FlowBuilder::<$storage>::new() - .start_node($start); + #[cfg(not(feature = "async"))] + { + let mut builder = $crate::flow::FlowBuilder::<$storage>::new() + .start_node($start); - $( - builder = builder.node($id, $backend); - )* + $( + builder = builder.node($id, $backend); + )* - $( - builder = builder.route($from, $action, $to); - )* + $( + builder = builder.route($from, $action, $to); + )* + + builder.build() + } - builder.build() + #[cfg(feature = "async")] + { + let mut builder = $crate::flow::r#async::FlowBuilder::<$storage>::new() + .start_node($start); + + $( + builder = builder.node($id, $backend); + )* + + $( + builder = builder.route($from, $action, $to); + )* + + builder.build() + } } }; } @@ -274,162 +316,336 @@ pub use flow; mod tests { use super::*; use crate::action::Action; - use crate::flow::{FlowBackend, FlowBuilder}; - use crate::node::Node; use crate::shared_store::SharedStore; use crate::shared_store::backends::MemoryStorage; - // Test node implementations - struct TestStartNode; - - #[async_trait::async_trait] - impl Node for TestStartNode { - type PrepResult = (); - type ExecResult = (); - type Error = crate::node::NodeError; - - async fn prep( - &mut self, - _: &S, - _: &crate::node::ExecutionContext, - ) -> Result<(), Self::Error> { - Ok(()) - } + // Import the correct Node trait based on features + #[cfg(not(feature = "async"))] + use crate::node::Node; + #[cfg(feature = "async")] + use crate::node::r#async::Node; + + #[cfg(not(feature = "async"))] + use crate::flow::{FlowBackend, FlowBuilder}; + + #[cfg(feature = "async")] + use crate::flow::r#async::{FlowBackend, FlowBuilder}; + + // Test node implementations - sync version + #[cfg(not(feature = "async"))] + mod sync_nodes { + use super::*; + + pub struct TestStartNode; - async fn exec( - &mut self, - _: (), - _: &crate::node::ExecutionContext, - ) -> Result<(), Self::Error> { - Ok(()) + impl Node for TestStartNode { + type PrepResult = (); + type ExecResult = (); + type Error = crate::node::NodeError; + + fn prep( + &mut self, + _: &S, + _: &crate::node::ExecutionContext, + ) -> Result<(), Self::Error> { + Ok(()) + } + + fn exec( + &mut self, + _: (), + _: &crate::node::ExecutionContext, + ) -> Result<(), Self::Error> { + Ok(()) + } + + fn post( + &mut self, + _: &mut S, + _: (), + _: (), + _: &crate::node::ExecutionContext, + ) -> Result { + Ok(Action::simple("next")) + } } - async fn post( - &mut self, - _: &mut S, - _: (), - _: (), - _: &crate::node::ExecutionContext, - ) -> Result { - Ok(Action::simple("next")) + pub struct TestProcessNode; + + impl Node for TestProcessNode { + type PrepResult = (); + type ExecResult = (); + type Error = crate::node::NodeError; + + fn prep( + &mut self, + _: &S, + _: &crate::node::ExecutionContext, + ) -> Result<(), Self::Error> { + Ok(()) + } + + fn exec( + &mut self, + _: (), + _: &crate::node::ExecutionContext, + ) -> Result<(), Self::Error> { + Ok(()) + } + + fn post( + &mut self, + _: &mut S, + _: (), + _: (), + _: &crate::node::ExecutionContext, + ) -> Result { + Ok(Action::simple("next")) + } } - } - struct TestProcessNode; + pub struct TestEndNode; + + impl Node for TestEndNode { + type PrepResult = (); + type ExecResult = (); + type Error = crate::node::NodeError; - #[async_trait::async_trait] - impl Node for TestProcessNode { - type PrepResult = (); - type ExecResult = (); - type Error = crate::node::NodeError; + fn prep( + &mut self, + _: &S, + _: &crate::node::ExecutionContext, + ) -> Result<(), Self::Error> { + Ok(()) + } - async fn prep( - &mut self, - _: &S, - _: &crate::node::ExecutionContext, - ) -> Result<(), Self::Error> { - Ok(()) + fn exec( + &mut self, + _: (), + _: &crate::node::ExecutionContext, + ) -> Result<(), Self::Error> { + Ok(()) + } + + fn post( + &mut self, + _: &mut S, + _: (), + _: (), + _: &crate::node::ExecutionContext, + ) -> Result { + Ok(Action::simple("complete")) + } } - async fn exec( - &mut self, - _: (), - _: &crate::node::ExecutionContext, - ) -> Result<(), Self::Error> { - Ok(()) + pub struct TestCustomNode { + pub action: String, } - async fn post( - &mut self, - _: &mut S, - _: (), - _: (), - _: &crate::node::ExecutionContext, - ) -> Result { - Ok(Action::simple("next")) + impl TestCustomNode { + pub fn new(action: impl Into) -> Self { + Self { + action: action.into(), + } + } } - } - struct TestEndNode; + impl Node for TestCustomNode { + type PrepResult = (); + type ExecResult = (); + type Error = crate::node::NodeError; + + fn prep( + &mut self, + _: &S, + _: &crate::node::ExecutionContext, + ) -> Result<(), Self::Error> { + Ok(()) + } - #[async_trait::async_trait] - impl Node for TestEndNode { - type PrepResult = (); - type ExecResult = (); - type Error = crate::node::NodeError; + fn exec( + &mut self, + _: (), + _: &crate::node::ExecutionContext, + ) -> Result<(), Self::Error> { + Ok(()) + } - async fn prep( - &mut self, - _: &S, - _: &crate::node::ExecutionContext, - ) -> Result<(), Self::Error> { - Ok(()) + fn post( + &mut self, + _: &mut S, + _: (), + _: (), + _: &crate::node::ExecutionContext, + ) -> Result { + Ok(Action::simple(&self.action)) + } } + } + + // Test node implementations - async version + #[cfg(feature = "async")] + mod async_nodes { + use super::*; + use async_trait::async_trait; + + pub struct TestStartNode; + + #[async_trait] + impl Node for TestStartNode { + type PrepResult = (); + type ExecResult = (); + type Error = crate::node::NodeError; + + async fn prep( + &mut self, + _: &S, + _: &crate::node::ExecutionContext, + ) -> Result<(), Self::Error> { + Ok(()) + } + + async fn exec( + &mut self, + _: (), + _: &crate::node::ExecutionContext, + ) -> Result<(), Self::Error> { + Ok(()) + } - async fn exec( - &mut self, - _: (), - _: &crate::node::ExecutionContext, - ) -> Result<(), Self::Error> { - Ok(()) + async fn post( + &mut self, + _: &mut S, + _: (), + _: (), + _: &crate::node::ExecutionContext, + ) -> Result { + Ok(Action::simple("next")) + } } - async fn post( - &mut self, - _: &mut S, - _: (), - _: (), - _: &crate::node::ExecutionContext, - ) -> Result { - Ok(Action::simple("complete")) + pub struct TestProcessNode; + + #[async_trait] + impl Node for TestProcessNode { + type PrepResult = (); + type ExecResult = (); + type Error = crate::node::NodeError; + + async fn prep( + &mut self, + _: &S, + _: &crate::node::ExecutionContext, + ) -> Result<(), Self::Error> { + Ok(()) + } + + async fn exec( + &mut self, + _: (), + _: &crate::node::ExecutionContext, + ) -> Result<(), Self::Error> { + Ok(()) + } + + async fn post( + &mut self, + _: &mut S, + _: (), + _: (), + _: &crate::node::ExecutionContext, + ) -> Result { + Ok(Action::simple("next")) + } } - } - struct TestCustomNode { - action: String, - } + pub struct TestEndNode; + + #[async_trait] + impl Node for TestEndNode { + type PrepResult = (); + type ExecResult = (); + type Error = crate::node::NodeError; - impl TestCustomNode { - fn new(action: impl Into) -> Self { - Self { - action: action.into(), + async fn prep( + &mut self, + _: &S, + _: &crate::node::ExecutionContext, + ) -> Result<(), Self::Error> { + Ok(()) + } + + async fn exec( + &mut self, + _: (), + _: &crate::node::ExecutionContext, + ) -> Result<(), Self::Error> { + Ok(()) + } + + async fn post( + &mut self, + _: &mut S, + _: (), + _: (), + _: &crate::node::ExecutionContext, + ) -> Result { + Ok(Action::simple("complete")) } } - } - #[async_trait::async_trait] - impl Node for TestCustomNode { - type PrepResult = (); - type ExecResult = (); - type Error = crate::node::NodeError; - - async fn prep( - &mut self, - _: &S, - _: &crate::node::ExecutionContext, - ) -> Result<(), Self::Error> { - Ok(()) + pub struct TestCustomNode { + pub action: String, } - async fn exec( - &mut self, - _: (), - _: &crate::node::ExecutionContext, - ) -> Result<(), Self::Error> { - Ok(()) + impl TestCustomNode { + pub fn new(action: impl Into) -> Self { + Self { + action: action.into(), + } + } } - async fn post( - &mut self, - _: &mut S, - _: (), - _: (), - _: &crate::node::ExecutionContext, - ) -> Result { - Ok(Action::simple(&self.action)) + #[async_trait] + impl Node for TestCustomNode { + type PrepResult = (); + type ExecResult = (); + type Error = crate::node::NodeError; + + async fn prep( + &mut self, + _: &S, + _: &crate::node::ExecutionContext, + ) -> Result<(), Self::Error> { + Ok(()) + } + + async fn exec( + &mut self, + _: (), + _: &crate::node::ExecutionContext, + ) -> Result<(), Self::Error> { + Ok(()) + } + + async fn post( + &mut self, + _: &mut S, + _: (), + _: (), + _: &crate::node::ExecutionContext, + ) -> Result { + Ok(Action::simple(&self.action)) + } } } + // Use appropriate node implementations based on feature + #[cfg(feature = "async")] + use async_nodes::*; + #[cfg(not(feature = "async"))] + use sync_nodes::*; + // Test: Structured syntax with explicit terminal routes #[test] fn test_flow_macro_with_terminal_routes() { @@ -476,98 +692,199 @@ mod tests { // Test legacy syntax for backward compatibility } - // Test: New macro with terminal routes execution - #[tokio::test] - async fn test_flow_macro_with_terminal_routes_execution() { - let mut workflow = flow! { - storage: MemoryStorage, - start: "entry", - nodes: { - "entry": TestCustomNode::new("default"), - "process": TestCustomNode::new("continue"), - "end": TestEndNode, - }, - routes: { - "entry" - "default" => "process", - "process" - "continue" => "end", - }, - terminals: { - "end" - "complete", - } - }; + // Sync execution tests + #[cfg(not(feature = "async"))] + mod sync_tests { + use super::*; + + #[test] + fn test_flow_macro_with_terminal_routes_execution() { + let mut workflow = flow! { + storage: MemoryStorage, + start: "entry", + nodes: { + "entry": TestCustomNode::new("default"), + "process": TestCustomNode::new("continue"), + "end": TestEndNode, + }, + routes: { + "entry" - "default" => "process", + "process" - "continue" => "end", + }, + terminals: { + "end" - "complete", + } + }; + + let mut store = MemoryStorage::new(); + let result = workflow.execute(&mut store); + + assert!(result.is_ok(), "Flow macro execution should succeed"); + let execution_result = result.unwrap(); + assert!(execution_result.success); + assert_eq!(execution_result.steps_executed, 3); + assert_eq!( + execution_result.execution_path, + vec!["entry", "process", "end"] + ); + } - let mut store = MemoryStorage::new(); - let result = workflow.execute(&mut store).await; - - assert!(result.is_ok(), "Flow macro execution should succeed"); - let execution_result = result.unwrap(); - assert!(execution_result.success); - assert_eq!(execution_result.steps_executed, 3); - assert_eq!( - execution_result.execution_path, - vec!["entry", "process", "end"] - ); - } + #[test] + fn test_flow_macro_legacy_execution() { + let mut workflow = FlowBuilder::new() + .start_node("entry") + .node("entry", TestCustomNode::new("default")) + .node("process", TestCustomNode::new("continue")) + .node("end", TestEndNode) + .route("entry", "default", "process") + .route("process", "continue", "end") + .terminal_route("end", "complete") + .build(); + + let mut store = MemoryStorage::new(); + let result = workflow.execute(&mut store); + + assert!(result.is_ok(), "Legacy flow execution should succeed"); + let execution_result = result.unwrap(); + assert!(execution_result.success); + assert_eq!(execution_result.steps_executed, 3); + assert_eq!( + execution_result.execution_path, + vec!["entry", "process", "end"] + ); + } - // Test: Legacy execution test (updated to use explicit terminal routes) - #[tokio::test] - async fn test_flow_macro_legacy_execution() { - let mut workflow = FlowBuilder::new() - .start_node("entry") - .node("entry", TestCustomNode::new("default")) - .node("process", TestCustomNode::new("continue")) - .node("end", TestEndNode) - .route("entry", "default", "process") - .route("process", "continue", "end") - .terminal_route("end", "complete") - .build(); - - let mut store = MemoryStorage::new(); - let result = workflow.execute(&mut store).await; - - assert!(result.is_ok(), "Legacy flow execution should succeed"); - let execution_result = result.unwrap(); - assert!(execution_result.success); - assert_eq!(execution_result.steps_executed, 3); - assert_eq!( - execution_result.execution_path, - vec!["entry", "process", "end"] - ); + #[test] + fn test_flow_macro_multiple_terminal_routes() { + let mut success_workflow = flow! { + storage: MemoryStorage, + start: "check", + nodes: { + "check": TestCustomNode::new("success"), + "success_handler": TestEndNode, + "error_handler": TestEndNode, + }, + routes: { + "check" - "success" => "success_handler", + "check" - "error" => "error_handler", + }, + terminals: { + "success_handler" - "complete", + "error_handler" - "failed", + } + }; + + let mut store = MemoryStorage::new(); + let result = success_workflow.execute(&mut store); + + assert!( + result.is_ok(), + "Multiple terminal routes workflow should succeed" + ); + let execution_result = result.unwrap(); + assert!(execution_result.success); + assert_eq!( + execution_result.execution_path, + vec!["check", "success_handler"] + ); + } } - // Test: Multiple terminal routes - #[tokio::test] - async fn test_flow_macro_multiple_terminal_routes() { - let mut success_workflow = flow! { - storage: MemoryStorage, - start: "check", - nodes: { - "check": TestCustomNode::new("success"), - "success_handler": TestEndNode, - "error_handler": TestEndNode, - }, - routes: { - "check" - "success" => "success_handler", - "check" - "error" => "error_handler", - }, - terminals: { - "success_handler" - "complete", - "error_handler" - "failed", - } - }; + // Async execution tests + #[cfg(feature = "async")] + mod async_tests { + use super::*; + + #[tokio::test] + async fn test_flow_macro_with_terminal_routes_execution() { + let mut workflow = flow! { + storage: MemoryStorage, + start: "entry", + nodes: { + "entry": TestCustomNode::new("default"), + "process": TestCustomNode::new("continue"), + "end": TestEndNode, + }, + routes: { + "entry" - "default" => "process", + "process" - "continue" => "end", + }, + terminals: { + "end" - "complete", + } + }; + + let mut store = MemoryStorage::new(); + let result = workflow.execute(&mut store).await; + + assert!(result.is_ok(), "Flow macro execution should succeed"); + let execution_result = result.unwrap(); + assert!(execution_result.success); + assert_eq!(execution_result.steps_executed, 3); + assert_eq!( + execution_result.execution_path, + vec!["entry", "process", "end"] + ); + } - let mut store = MemoryStorage::new(); - let result = success_workflow.execute(&mut store).await; - - assert!( - result.is_ok(), - "Multiple terminal routes workflow should succeed" - ); - let execution_result = result.unwrap(); - assert!(execution_result.success); - assert_eq!( - execution_result.execution_path, - vec!["check", "success_handler"] - ); + #[tokio::test] + async fn test_flow_macro_legacy_execution() { + let mut workflow = FlowBuilder::new() + .start_node("entry") + .node("entry", TestCustomNode::new("default")) + .node("process", TestCustomNode::new("continue")) + .node("end", TestEndNode) + .route("entry", "default", "process") + .route("process", "continue", "end") + .terminal_route("end", "complete") + .build(); + + let mut store = MemoryStorage::new(); + let result = workflow.execute(&mut store).await; + + assert!(result.is_ok(), "Legacy flow execution should succeed"); + let execution_result = result.unwrap(); + assert!(execution_result.success); + assert_eq!(execution_result.steps_executed, 3); + assert_eq!( + execution_result.execution_path, + vec!["entry", "process", "end"] + ); + } + + #[tokio::test] + async fn test_flow_macro_multiple_terminal_routes() { + let mut success_workflow = flow! { + storage: MemoryStorage, + start: "check", + nodes: { + "check": TestCustomNode::new("success"), + "success_handler": TestEndNode, + "error_handler": TestEndNode, + }, + routes: { + "check" - "success" => "success_handler", + "check" - "error" => "error_handler", + }, + terminals: { + "success_handler" - "complete", + "error_handler" - "failed", + } + }; + + let mut store = MemoryStorage::new(); + let result = success_workflow.execute(&mut store).await; + + assert!( + result.is_ok(), + "Multiple terminal routes workflow should succeed" + ); + let execution_result = result.unwrap(); + assert!(execution_result.success); + assert_eq!( + execution_result.execution_path, + vec!["check", "success_handler"] + ); + } } } diff --git a/cosmoflow/src/flow/mod.rs b/cosmoflow/src/flow/mod.rs index 7a08933..a3db872 100644 --- a/cosmoflow/src/flow/mod.rs +++ b/cosmoflow/src/flow/mod.rs @@ -16,14 +16,13 @@ //! //! ## Quick Start //! -//! ```rust -//! # #[cfg(feature = "storage-memory")] -//! # { -//! use cosmoflow::flow::{Flow, FlowBuilder, FlowBackend}; +//! ```rust,no_run +//! # #[cfg(all(feature = "async", feature = "storage-memory"))] +//! # async fn example() -> Result<(), Box> { +//! use cosmoflow::{Flow, FlowBuilder, FlowBackend}; //! use cosmoflow::shared_store::SharedStore; //! use cosmoflow::shared_store::backends::MemoryStorage; //! -//! # async fn example() -> Result<(), Box> { //! // Create a shared store //! let mut store = MemoryStorage::new(); //! @@ -37,7 +36,6 @@ //! println!("Flow completed with {} steps", result.steps_executed); //! # Ok(()) //! # } -//! # } //! ``` //! //! ## Core Types @@ -52,12 +50,12 @@ //! //! The flow crate provides comprehensive error handling through [`FlowError`]: //! -//! ```rust -//! # #[cfg(feature = "storage-memory")] -//! # { -//! use cosmoflow::flow::{Flow, errors::FlowError, FlowBackend}; +//! ```rust,no_run +//! # #[cfg(all(feature = "async", feature = "storage-memory"))] +//! # async fn example() -> Result<(), Box> { +//! use cosmoflow::{Flow, FlowBackend}; +//! use cosmoflow::flow::errors::FlowError; //! -//! # async fn example() -> Result<(), FlowError> { //! # let mut flow = Flow::new(); //! # let mut store = cosmoflow::shared_store::backends::MemoryStorage::new(); //! match flow.execute(&mut store).await { @@ -70,7 +68,6 @@ //! } //! # Ok(()) //! # } -//! # } //! ``` /// The errors module contains the error types for the flow crate. @@ -80,50 +77,51 @@ pub mod macros; /// The route module contains the `Route` struct and `RouteCondition` enum. pub mod route; +/// Async-specific implementations (only available with "async" feature) +#[cfg(feature = "async")] +pub mod r#async; + +#[cfg(feature = "async")] +pub use r#async::NodeRunner; + +#[cfg(not(feature = "async"))] use std::collections::HashMap; +#[cfg(not(feature = "async"))] use std::time::Duration; use crate::action::Action; +#[cfg(not(feature = "async"))] use crate::node::{ExecutionContext, Node, NodeError}; use crate::shared_store::SharedStore; -use async_trait::async_trait; use errors::FlowError; -use route::{Route, RouteCondition}; +use route::Route; +#[cfg(not(feature = "async"))] +use route::RouteCondition; -/// Node runner trait for workflow execution +/// Node runner trait for workflow execution (sync version only) /// /// This trait provides a unified interface for executing nodes with different /// associated types in the same flow, allowing the flow system to work with /// heterogeneous node collections while maintaining type safety. -/// -/// ## Purpose -/// -/// The NodeRunner trait provides a simplified interface that: -/// - Enables different node types to work together in the same flow -/// - Maintains a consistent execution interface for the flow system -/// - Enables storage of different node types in the same collection -/// - Preserves type safety -#[async_trait] +#[cfg(not(feature = "async"))] pub trait NodeRunner: Send + Sync { /// Execute the node and return the resulting action - async fn run(&mut self, store: &mut S) -> Result; + fn run(&mut self, store: &mut S) -> Result; /// Get the node's name for debugging and logging fn name(&self) -> &str; } -/// Implementation of NodeRunner for any Node -/// -/// Any type that implements Node automatically implements NodeRunner. -#[async_trait] +/// Implementation of NodeRunner for any Node (sync version only) +#[cfg(not(feature = "async"))] impl NodeRunner for T where T: Node + Send + Sync, S: SharedStore + Send + Sync, { - async fn run(&mut self, store: &mut S) -> Result { - Node::run(self, store).await + fn run(&mut self, store: &mut S) -> Result { + Node::run(self, store) } fn name(&self) -> &str { @@ -139,9 +137,9 @@ where /// # Examples /// /// ```rust -/// # #[cfg(feature = "storage-memory")] +/// # #[cfg(all(feature = "storage-memory", feature = "async"))] /// # { -/// use cosmoflow::flow::{Flow, FlowExecutionResult, FlowBackend}; +/// use cosmoflow::{Flow, FlowExecutionResult, FlowBackend}; /// use cosmoflow::action::Action; /// /// # async fn example() -> Result<(), Box> { @@ -208,7 +206,6 @@ impl Default for FlowConfig { } /// Trait for implementing flow execution logic -#[async_trait] pub trait FlowBackend { /// Add a node to the flow fn add_node(&mut self, id: String, node: Box>) -> Result<(), FlowError>; @@ -217,10 +214,10 @@ pub trait FlowBackend { fn add_route(&mut self, from_node_id: String, route: Route) -> Result<(), FlowError>; /// Execute the flow starting from the configured start node - async fn execute(&mut self, store: &mut S) -> Result; + fn execute(&mut self, store: &mut S) -> Result; /// Execute the flow starting from a specific node - async fn execute_from( + fn execute_from( &mut self, store: &mut S, start_node_id: String, @@ -237,18 +234,21 @@ pub trait FlowBackend { } /// Builder for creating flows easily +#[cfg(not(feature = "async"))] pub struct FlowBuilder { nodes: HashMap>>, routes: HashMap>, config: FlowConfig, } +#[cfg(not(feature = "async"))] impl Default for FlowBuilder { fn default() -> Self { Self::new() } } +#[cfg(not(feature = "async"))] impl FlowBuilder { /// Create a new flow builder pub fn new() -> Self { @@ -376,7 +376,8 @@ impl FlowBuilder { /// ```rust /// # #[cfg(feature = "storage-memory")] /// # { - /// use cosmoflow::flow::{FlowBuilder, route::RouteCondition}; + /// use cosmoflow::FlowBuilder; + /// use cosmoflow::flow::route::RouteCondition; /// use cosmoflow::shared_store::backends::MemoryStorage; /// /// let flow = FlowBuilder::::new() @@ -461,7 +462,7 @@ impl FlowBuilder { /// ```rust /// # #[cfg(feature = "storage-memory")] /// # { -/// use cosmoflow::flow::{Flow, FlowConfig}; +/// use cosmoflow::{Flow, FlowConfig}; /// use cosmoflow::shared_store::backends::MemoryStorage; /// /// // Create a flow with default configuration @@ -481,12 +482,14 @@ impl FlowBuilder { /// The `Flow` struct is designed to be used in single-threaded contexts within /// CosmoFlow's execution model. For concurrent execution of multiple workflows, /// create separate `Flow` instances for each workflow. +#[cfg(not(feature = "async"))] pub struct Flow { nodes: HashMap>>, routes: HashMap>, config: FlowConfig, } +#[cfg(not(feature = "async"))] impl Flow { /// Create a new basic flow with default configuration /// @@ -526,7 +529,7 @@ impl Flow { /// ```rust /// # #[cfg(feature = "storage-memory")] /// # { - /// use cosmoflow::flow::{Flow, FlowConfig}; + /// use cosmoflow::{Flow, FlowConfig}; /// use cosmoflow::shared_store::backends::MemoryStorage; /// /// let config = FlowConfig { @@ -599,7 +602,8 @@ impl Flow { } } -#[async_trait] +// Implementation of FlowBackend for Flow +#[cfg(not(feature = "async"))] impl FlowBackend for Flow where S::Error: Send + Sync + 'static, @@ -699,29 +703,25 @@ where /// /// # Examples /// - /// ```rust - /// # #[cfg(feature = "storage-memory")] + /// ```rust,no_run + /// # #[cfg(all(not(feature = "async"), feature = "storage-memory"))] /// # { /// use cosmoflow::flow::{Flow, FlowBackend, FlowConfig}; /// use cosmoflow::shared_store::SharedStore; /// use cosmoflow::shared_store::backends::MemoryStorage; /// - /// # async { /// let mut flow: Flow = Flow::new(); /// let mut store = MemoryStorage::new(); /// /// // Add nodes and routes to flow... /// - /// let result = flow.execute(&mut store).await.unwrap(); - /// # }; - /// # } - /// ``` + /// let result = flow.execute(&mut store).unwrap(); /// println!("Execution completed in {} steps", result.steps_executed); - /// # }; + /// # } /// ``` - async fn execute(&mut self, store: &mut S) -> Result { + fn execute(&mut self, store: &mut S) -> Result { let start_node_id = self.config.start_node_id.clone(); - self.execute_from(store, start_node_id).await + self.execute_from(store, start_node_id) } /// Execute the flow from a specific node @@ -756,24 +756,21 @@ where /// /// # Examples /// - /// ```rust - /// # #[cfg(feature = "storage-memory")] + /// ```rust,no_run + /// # #[cfg(all(not(feature = "async"), feature = "storage-memory"))] /// # { /// use cosmoflow::flow::{Flow, FlowBackend}; /// use cosmoflow::shared_store::SharedStore; /// use cosmoflow::shared_store::backends::MemoryStorage; /// - /// # async { /// let mut flow: Flow = Flow::new(); /// let mut store = MemoryStorage::new(); /// /// // Execute from a specific node (useful for testing) - /// let result = flow.execute_from(&mut store, "validation_step".to_string()).await.unwrap(); - /// # }; + /// let result = flow.execute_from(&mut store, "validation_step".to_string()).unwrap(); /// # } /// ``` - /// ``` - async fn execute_from( + fn execute_from( &mut self, store: &mut S, start_node_id: String, @@ -798,7 +795,7 @@ where .ok_or_else(|| FlowError::NodeNotFound(current_node_id.clone()))?; // Execute the node - let action = node.run(store).await?; + let action = node.run(store)?; steps_executed += 1; // Find next node @@ -949,13 +946,14 @@ where } } +#[cfg(not(feature = "async"))] impl Default for Flow { fn default() -> Self { Self::new() } } -/// Implementation of Node for Flow, allowing flows to be nested +/// Implementation of Node for Flow, allowing flows to be nested (sync version) /// /// This implementation enables flows to be used as nodes within other flows, /// creating hierarchical workflow structures. Key features: @@ -964,7 +962,7 @@ impl Default for Flow { /// - Nesting depth protection to prevent infinite recursion /// - Result storage in shared store for parent flow access /// - Proper error propagation through the flow hierarchy -#[async_trait] +#[cfg(not(feature = "async"))] impl Node for Flow where S::Error: Send + Sync + 'static, @@ -973,7 +971,7 @@ where type ExecResult = FlowExecutionResult; type Error = FlowError; - async fn prep( + fn prep( &mut self, _store: &S, context: &ExecutionContext, @@ -995,7 +993,7 @@ where Ok(()) } - async fn exec( + fn exec( &mut self, _prep_result: Self::PrepResult, context: &ExecutionContext, @@ -1011,7 +1009,7 @@ where }) } - async fn post( + fn post( &mut self, store: &mut S, _prep_result: Self::PrepResult, @@ -1031,7 +1029,7 @@ where .map_err(|e| FlowError::NodeError(format!("Failed to set nesting depth: {e}")))?; // Execute the nested flow - let result = self.execute(store).await?; + let result = self.execute(store)?; // Store the nested flow result in the shared store for parent flow access let result_key = format!("nested_flow_result_{}", context.execution_id()); @@ -1063,177 +1061,427 @@ where } } -#[cfg(all(test, feature = "storage-memory"))] +/// Implementation of Node for Flow, allowing flows to be nested (sync version) +/// +/// This implementation enables flows to be used as nodes within other flows, +/// creating hierarchical workflow structures. Key features: +/// +/// - Automatic flow validation during preparation +/// - Nesting depth protection to prevent infinite recursion +/// - Result storage in shared store for parent flow access +/// - Proper error propagation through the flow hierarchy +#[cfg(all(test, feature = "storage-memory", not(feature = "async")))] mod tests { use super::*; use crate::action::Action; use crate::node::ExecutionContext; use crate::shared_store::backends::MemoryStorage; - use async_trait::async_trait; - // Test helper node - struct TestNode { - action: Action, - should_fail: bool, - } + // Sync test helper node + #[cfg(not(feature = "async"))] + mod sync_test_node { + use super::*; + + pub struct TestNode { + pub action: Action, + pub should_fail: bool, + } - impl TestNode { - fn new(action: Action) -> Self { - Self { - action, - should_fail: false, + impl TestNode { + pub fn new(action: Action) -> Self { + Self { + action, + should_fail: false, + } } } - } - #[async_trait] - impl Node for TestNode { - type PrepResult = (); - type ExecResult = (); - type Error = NodeError; - - async fn prep( - &mut self, - _store: &MemoryStorage, - _context: &ExecutionContext, - ) -> Result { - if self.should_fail { - return Err(NodeError::PrepError("Test failure".to_string())); + impl Node for TestNode { + type PrepResult = (); + type ExecResult = (); + type Error = NodeError; + + fn prep( + &mut self, + _store: &MemoryStorage, + _context: &ExecutionContext, + ) -> Result { + if self.should_fail { + return Err(NodeError::PreparationError("Test failure".to_string())); + } + Ok(()) + } + + fn exec( + &mut self, + _prep_result: Self::PrepResult, + _context: &ExecutionContext, + ) -> Result { + Ok(()) + } + + fn post( + &mut self, + _store: &mut MemoryStorage, + _prep_result: Self::PrepResult, + _exec_result: Self::ExecResult, + _context: &ExecutionContext, + ) -> Result { + Ok(self.action.clone()) } - Ok(()) } + } + + // Async test helper node + #[cfg(feature = "async")] + mod async_test_node { + use super::*; + use async_trait::async_trait; - async fn exec( - &mut self, - _prep_result: Self::PrepResult, - _context: &ExecutionContext, - ) -> Result { - Ok(()) + pub struct TestNode { + pub action: Action, + pub should_fail: bool, } - async fn post( - &mut self, - _store: &mut MemoryStorage, - _prep_result: Self::PrepResult, - _exec_result: Self::ExecResult, - _context: &ExecutionContext, - ) -> Result { - Ok(self.action.clone()) + impl TestNode { + pub fn new(action: Action) -> Self { + Self { + action, + should_fail: false, + } + } } - } - #[tokio::test] - async fn test_basic_flow_execution() { - let mut flow = FlowBuilder::new() - .start_node("start") - .node("start", TestNode::new(Action::simple("next"))) - .node("middle", TestNode::new(Action::simple("end"))) - .route("start", "next", "middle") - .terminal_route("middle", "end") - .build(); + #[async_trait] + impl Node for TestNode { + type PrepResult = (); + type ExecResult = (); + type Error = NodeError; + + async fn prep( + &mut self, + _store: &MemoryStorage, + _context: &ExecutionContext, + ) -> Result { + if self.should_fail { + return Err(NodeError::PreparationError("Test failure".to_string())); + } + Ok(()) + } - let mut store = MemoryStorage::new(); - let result = flow.execute(&mut store).await; + async fn exec( + &mut self, + _prep_result: Self::PrepResult, + _context: &ExecutionContext, + ) -> Result { + Ok(()) + } - if let Err(e) = &result { - eprintln!("Flow execution failed: {:?}", e); + async fn post( + &mut self, + _store: &mut MemoryStorage, + _prep_result: Self::PrepResult, + _exec_result: Self::ExecResult, + _context: &ExecutionContext, + ) -> Result { + Ok(self.action.clone()) + } } - assert!(result.is_ok()); - let result = result.unwrap(); - assert!(result.success); - assert_eq!(result.steps_executed, 2); - assert_eq!(result.execution_path, vec!["start", "middle"]); } - #[tokio::test] - async fn test_flow_max_steps_exceeded() { - let mut flow = FlowBuilder::new() - .start_node("start") - .max_steps(2) - .node("start", TestNode::new(Action::simple("next"))) - .node("middle", TestNode::new(Action::simple("continue"))) - .node("end_node", TestNode::new(Action::simple("end"))) - .route("start", "next", "middle") - .route("middle", "continue", "end_node") - .route("end_node", "end", "final") - .build(); + // Use appropriate TestNode implementation + #[cfg(feature = "async")] + use async_test_node::TestNode; + #[cfg(not(feature = "async"))] + use sync_test_node::TestNode; + + // Sync tests + #[cfg(not(feature = "async"))] + mod sync_tests { + use super::*; + + #[test] + fn test_basic_flow_execution() { + let mut flow = FlowBuilder::new() + .start_node("start") + .node("start", TestNode::new(Action::simple("next"))) + .node("middle", TestNode::new(Action::simple("end"))) + .route("start", "next", "middle") + .terminal_route("middle", "end") + .build(); + + let mut store = MemoryStorage::new(); + let result = flow.execute(&mut store); + + if let Err(e) = &result { + eprintln!("Flow execution failed: {:?}", e); + } + assert!(result.is_ok()); + let result = result.unwrap(); + assert!(result.success); + assert_eq!(result.steps_executed, 2); + assert_eq!(result.execution_path, vec!["start", "middle"]); + } - let mut store = MemoryStorage::new(); - let result = flow.execute(&mut store).await; + #[test] + fn test_flow_max_steps_exceeded() { + let mut flow = FlowBuilder::new() + .start_node("start") + .max_steps(2) + .node("start", TestNode::new(Action::simple("next"))) + .node("middle", TestNode::new(Action::simple("continue"))) + .node("end_node", TestNode::new(Action::simple("end"))) + .route("start", "next", "middle") + .route("middle", "continue", "end_node") + .route("end_node", "end", "final") + .build(); + + let mut store = MemoryStorage::new(); + let result = flow.execute(&mut store); + + assert!(result.is_err()); + match result.unwrap_err() { + FlowError::MaxStepsExceeded(max) => assert_eq!(max, 2), + _ => panic!("Expected MaxStepsExceeded error"), + } + } - assert!(result.is_err()); - match result.unwrap_err() { - FlowError::MaxStepsExceeded(max) => assert_eq!(max, 2), - _ => panic!("Expected MaxStepsExceeded error"), + #[test] + fn test_flow_node_not_found() { + let mut flow = FlowBuilder::new() + .start_node("start") + .node("start", TestNode::new(Action::simple("next"))) + .route("start", "next", "nonexistent") + .build(); + + let mut store = MemoryStorage::new(); + let result = flow.execute(&mut store); + + assert!(result.is_err()); + match result.unwrap_err() { + FlowError::NodeNotFound(id) => assert_eq!(id, "nonexistent"), + _ => panic!("Expected NodeNotFound error"), + } } - } - #[tokio::test] - async fn test_flow_node_not_found() { - let mut flow = FlowBuilder::new() - .start_node("start") - .node("start", TestNode::new(Action::simple("next"))) - .route("start", "next", "nonexistent") - .build(); + #[test] + fn test_flow_no_route_found() { + let mut flow = FlowBuilder::new() + .start_node("start") + .node("start", TestNode::new(Action::simple("unknown"))) + .build(); + + let mut store = MemoryStorage::new(); + let result = flow.execute(&mut store); + + assert!(result.is_err()); + match result.unwrap_err() { + FlowError::NoRouteFound(node_id, action) => { + assert_eq!(node_id, "start"); + assert_eq!(action, "unknown"); + } + _ => panic!("Expected NoRouteFound error"), + } + } + + #[test] + fn test_flow_validation() { + let flow = FlowBuilder::new() + .start_node("nonexistent") + .node("start", TestNode::new(Action::simple("next"))) + .build(); + + let result = flow.validate(); + assert!(result.is_err()); + match result.unwrap_err() { + FlowError::InvalidConfiguration(msg) => { + assert!(msg.contains("Start node 'nonexistent' not found")); + } + _ => panic!("Expected InvalidConfiguration error"), + } + } - let mut store = MemoryStorage::new(); - let result = flow.execute(&mut store).await; + #[test] + fn test_flow_builder_methods() { + let flow: Flow = FlowBuilder::new() + .start_node("custom_start") + .max_steps(500) + .build(); - assert!(result.is_err()); - match result.unwrap_err() { - FlowError::NodeNotFound(id) => assert_eq!(id, "nonexistent"), - _ => panic!("Expected NodeNotFound error"), + assert_eq!(flow.config().start_node_id, "custom_start"); + assert_eq!(flow.config().max_steps, 500); + } + + #[test] + fn test_conditional_route() { + use crate::flow::route::RouteCondition; + + let mut flow = FlowBuilder::new() + .start_node("start") + .node("start", TestNode::new(Action::simple("check"))) + .node("success", TestNode::new(Action::simple("end"))) + .node("failure", TestNode::new(Action::simple("end"))) + .conditional_route("start", "check", "success", RouteCondition::Always) + .terminal_route("success", "end") + .terminal_route("failure", "end") + .build(); + + let mut store = MemoryStorage::new(); + let result = flow.execute(&mut store); + + if let Err(e) = &result { + eprintln!("Conditional route test failed: {:?}", e); + } + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.execution_path, vec!["start", "success"]); } } - #[tokio::test] - async fn test_flow_no_route_found() { - let mut flow = FlowBuilder::new() - .start_node("start") - .node("start", TestNode::new(Action::simple("unknown"))) - .build(); + // Async tests + #[cfg(feature = "async")] + mod async_tests { + use super::*; + + #[tokio::test] + fn test_basic_flow_execution() { + let mut flow = FlowBuilder::new() + .start_node("start") + .node("start", TestNode::new(Action::simple("next"))) + .node("middle", TestNode::new(Action::simple("end"))) + .route("start", "next", "middle") + .terminal_route("middle", "end") + .build(); + + let mut store = MemoryStorage::new(); + let result = flow.execute(&mut store); + + if let Err(e) = &result { + eprintln!("Flow execution failed: {:?}", e); + } + assert!(result.is_ok()); + let result = result.unwrap(); + assert!(result.success); + assert_eq!(result.steps_executed, 2); + assert_eq!(result.execution_path, vec!["start", "middle"]); + } - let mut store = MemoryStorage::new(); - let result = flow.execute(&mut store).await; + #[tokio::test] + fn test_flow_max_steps_exceeded() { + let mut flow = FlowBuilder::new() + .start_node("start") + .max_steps(2) + .node("start", TestNode::new(Action::simple("next"))) + .node("middle", TestNode::new(Action::simple("continue"))) + .node("end_node", TestNode::new(Action::simple("end"))) + .route("start", "next", "middle") + .route("middle", "continue", "end_node") + .route("end_node", "end", "final") + .build(); + + let mut store = MemoryStorage::new(); + let result = flow.execute(&mut store); + + assert!(result.is_err()); + match result.unwrap_err() { + FlowError::MaxStepsExceeded(max) => assert_eq!(max, 2), + _ => panic!("Expected MaxStepsExceeded error"), + } + } - assert!(result.is_err()); - match result.unwrap_err() { - FlowError::NoRouteFound(node_id, action) => { - assert_eq!(node_id, "start"); - assert_eq!(action, "unknown"); + #[tokio::test] + fn test_flow_node_not_found() { + let mut flow = FlowBuilder::new() + .start_node("start") + .node("start", TestNode::new(Action::simple("next"))) + .route("start", "next", "nonexistent") + .build(); + + let mut store = MemoryStorage::new(); + let result = flow.execute(&mut store); + + assert!(result.is_err()); + match result.unwrap_err() { + FlowError::NodeNotFound(id) => assert_eq!(id, "nonexistent"), + _ => panic!("Expected NodeNotFound error"), } - _ => panic!("Expected NoRouteFound error"), } - } - #[tokio::test] - async fn test_flow_validation() { - let flow = FlowBuilder::new() - .start_node("nonexistent") - .node("start", TestNode::new(Action::simple("next"))) - .build(); + #[tokio::test] + fn test_flow_no_route_found() { + let mut flow = FlowBuilder::new() + .start_node("start") + .node("start", TestNode::new(Action::simple("unknown"))) + .build(); + + let mut store = MemoryStorage::new(); + let result = flow.execute(&mut store); + + assert!(result.is_err()); + match result.unwrap_err() { + FlowError::NoRouteFound(node_id, action) => { + assert_eq!(node_id, "start"); + assert_eq!(action, "unknown"); + } + _ => panic!("Expected NoRouteFound error"), + } + } - let result = flow.validate(); - assert!(result.is_err()); - match result.unwrap_err() { - FlowError::InvalidConfiguration(msg) => { - assert!(msg.contains("Start node 'nonexistent' not found")); + #[tokio::test] + fn test_flow_validation() { + let flow = FlowBuilder::new() + .start_node("nonexistent") + .node("start", TestNode::new(Action::simple("next"))) + .build(); + + let result = flow.validate(); + assert!(result.is_err()); + match result.unwrap_err() { + FlowError::InvalidConfiguration(msg) => { + assert!(msg.contains("Start node 'nonexistent' not found")); + } + _ => panic!("Expected InvalidConfiguration error"), } - _ => panic!("Expected InvalidConfiguration error"), } - } - #[tokio::test] - async fn test_flow_builder_methods() { - let flow: Flow = FlowBuilder::new() - .start_node("custom_start") - .max_steps(500) - .build(); + #[tokio::test] + fn test_flow_builder_methods() { + let flow: Flow = FlowBuilder::new() + .start_node("custom_start") + .max_steps(500) + .build(); - assert_eq!(flow.config().start_node_id, "custom_start"); - assert_eq!(flow.config().max_steps, 500); + assert_eq!(flow.config().start_node_id, "custom_start"); + assert_eq!(flow.config().max_steps, 500); + } + + #[tokio::test] + fn test_conditional_route() { + use crate::flow::route::RouteCondition; + + let mut flow = FlowBuilder::new() + .start_node("start") + .node("start", TestNode::new(Action::simple("check"))) + .node("success", TestNode::new(Action::simple("end"))) + .node("failure", TestNode::new(Action::simple("end"))) + .conditional_route("start", "check", "success", RouteCondition::Always) + .terminal_route("success", "end") + .terminal_route("failure", "end") + .build(); + + let mut store = MemoryStorage::new(); + let result = flow.execute(&mut store); + + if let Err(e) = &result { + eprintln!("Conditional route test failed: {:?}", e); + } + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.execution_path, vec!["start", "success"]); + } } + // Common tests that don't require flow execution #[test] fn test_flow_config_default() { let config = FlowConfig::default(); @@ -1241,31 +1489,6 @@ mod tests { assert_eq!(config.max_steps, 1000); } - #[tokio::test] - async fn test_conditional_route() { - use crate::flow::route::RouteCondition; - - let mut flow = FlowBuilder::new() - .start_node("start") - .node("start", TestNode::new(Action::simple("check"))) - .node("success", TestNode::new(Action::simple("end"))) - .node("failure", TestNode::new(Action::simple("end"))) - .conditional_route("start", "check", "success", RouteCondition::Always) - .terminal_route("success", "end") - .terminal_route("failure", "end") - .build(); - - let mut store = MemoryStorage::new(); - let result = flow.execute(&mut store).await; - - if let Err(e) = &result { - eprintln!("Conditional route test failed: {:?}", e); - } - assert!(result.is_ok()); - let result = result.unwrap(); - assert_eq!(result.execution_path, vec!["start", "success"]); - } - #[test] fn test_terminal_action_warning_in_route() { // Test that building a flow with terminal actions in routes doesn't panic diff --git a/cosmoflow/src/lib.rs b/cosmoflow/src/lib.rs index 52d3dfc..374ddca 100644 --- a/cosmoflow/src/lib.rs +++ b/cosmoflow/src/lib.rs @@ -16,13 +16,53 @@ //! * **Shared Store**: A key-value store used to share data between nodes. //! * **Storage Backend**: A pluggable storage mechanism for the shared store. //! -//! ## Quick Start +//! # Quick Start +//! +//! ## Synchronous Usage (default) +//! +//! ```rust,no_run +//! # #[cfg(all(feature = "storage-memory", not(feature = "async")))] +//! # fn example() -> Result<(), Box> { +//! use cosmoflow::prelude::*; +//! +//! // Create a shared store with memory backend +//! let mut store = MemoryStorage::new(); +//! +//! // Define a simple node +//! struct MyNode; +//! impl Node for MyNode { +//! type PrepResult = String; +//! type ExecResult = (); +//! type Error = NodeError; +//! fn prep(&mut self, _store: &S, _context: &ExecutionContext) -> Result { +//! Ok("prepared".to_string()) +//! } +//! fn exec(&mut self, _prep_result: String, _context: &ExecutionContext) -> Result<(), Self::Error> { +//! Ok(()) +//! } +//! fn post(&mut self, _store: &mut S, _prep_result: String, _exec_result: (), _context: &ExecutionContext) -> Result { +//! Ok(Action::simple("complete")) +//! } +//! } +//! +//! // Create a flow +//! let mut flow = FlowBuilder::new() +//! .node("start", MyNode) +//! .terminal_route("start", "complete") +//! .build(); +//! +//! // Execute the flow +//! let result = flow.execute(&mut store)?; +//! # Ok(()) +//! # } +//! ``` +//! +//! ## Asynchronous Usage (with async feature) //! //! ```rust,no_run -//! # #[cfg(feature = "storage-memory")] +//! # #[cfg(all(feature = "async", feature = "storage-memory"))] //! # async fn example() -> Result<(), Box> { //! use cosmoflow::prelude::*; -//! use cosmoflow::flow::FlowBackend; //! use async_trait::async_trait; //! //! // Create a shared store with memory backend @@ -71,10 +111,15 @@ //! //! ### Convenience Features //! -//! * `minimal`: Just the core engine (bring your own storage). -//! * `basic`: Core + memory storage (perfect for development). -//! * `standard`: Core + memory storage. -//! * `full`: All storage backends enabled. +//! * `minimal`: Just the core engine (bring your own storage) - sync only. +//! * `basic`: Core + memory storage - sync only. +//! * `standard`: Core + memory storage + async support. +//! * `full`: All storage backends + async support enabled. +//! +//! ### Sync/Async Mode +//! +//! * `async`: Enable async/await support (requires tokio runtime). +//! * Without `async`: Synchronous execution only (lighter weight). // ============================================================================ // CORE EXPORTS @@ -90,15 +135,34 @@ pub use action::Action; /// Flow definition and execution pub mod flow; + +// Sync exports +#[cfg(not(feature = "async"))] pub use flow::{ Flow, FlowBackend, FlowBuilder, FlowConfig, FlowExecutionResult, errors::FlowError, route::Route, }; +// Async exports +#[cfg(feature = "async")] +pub use flow::{ + FlowConfig, FlowExecutionResult, + r#async::{Flow, FlowBackend, FlowBuilder}, + errors::FlowError, + route::Route, +}; + /// Node execution system and traits pub mod node; + +// Sync Node exports +#[cfg(not(feature = "async"))] pub use node::{ExecutionContext, Node, NodeError}; +// Async Node exports +#[cfg(feature = "async")] +pub use node::{ExecutionContext, NodeError, r#async::Node}; + // ============================================================================ // CONVENIENCE TYPE ALIAS // ============================================================================ @@ -119,11 +183,15 @@ pub type Result = std::result::Result; /// use cosmoflow::prelude::*; /// ``` pub mod prelude { - // Core types - pub use crate::{ - Action, ExecutionContext, Flow, FlowBackend, FlowBuilder, FlowConfig, FlowExecutionResult, - Node, NodeError, SharedStore, - }; + // Core types (always available) + pub use crate::{Action, ExecutionContext, Node, NodeError, SharedStore}; + + // Flow types (always available) + pub use crate::{Flow, FlowBackend, FlowBuilder, FlowConfig, FlowExecutionResult}; + + // Re-export async_trait when async feature is enabled + #[cfg(feature = "async")] + pub use async_trait::async_trait; // Storage backends #[cfg(feature = "storage-memory")] diff --git a/cosmoflow/src/node/async.rs b/cosmoflow/src/node/async.rs new file mode 100644 index 0000000..5ed23a3 --- /dev/null +++ b/cosmoflow/src/node/async.rs @@ -0,0 +1,215 @@ +//! # Async Node Implementation +//! +//! This module contains the async version of the Node trait and related functionality. +//! It's only available when the `async` feature is enabled. + +use async_trait::async_trait; +use std::time::Duration; + +use super::{ExecutionContext, NodeError}; +use crate::action::Action; +use crate::shared_store::SharedStore; + +/// Node trait that defines the complete interface for nodes in CosmoFlow workflows (async version). +/// +/// This trait combines all functionality needed for node execution in a single, cohesive +/// interface. It incorporates the three-phase execution model (prep/exec/post) with +/// built-in retry logic, error handling, and configuration methods. +/// +/// This is the async version that requires the `async` feature flag (enabled by default). +/// For synchronous execution, disable the `async` feature. +#[async_trait] +pub trait Node: Send + Sync { + /// Result type from the preparation phase. + type PrepResult: Send + Sync + Clone + 'static; + /// Result type from the execution phase. + type ExecResult: Send + Sync + 'static; + /// Error type for all operations in this node. + type Error: std::error::Error + Send + Sync + 'static; + + /// Preparation phase: Read and preprocess data from shared storage (async). + async fn prep( + &mut self, + store: &S, + context: &ExecutionContext, + ) -> Result; + + /// Execution phase: Perform the core computation logic (async). + async fn exec( + &mut self, + prep_result: Self::PrepResult, + context: &ExecutionContext, + ) -> Result; + + /// Post-processing phase: Write results and determine next action (async). + async fn post( + &mut self, + store: &mut S, + prep_result: Self::PrepResult, + exec_result: Self::ExecResult, + context: &ExecutionContext, + ) -> Result; + + /// Maximum number of retries for the exec phase. + fn max_retries(&self) -> usize { + 3 + } + + /// Delay between retry attempts. + fn retry_delay(&self) -> Duration { + Duration::from_millis(100) + } + + /// Get the node's name for debugging and logging. + fn name(&self) -> &str { + std::any::type_name::() + } + + /// Fallback execution when all retries are exhausted (async). + async fn exec_fallback( + &mut self, + _prep_result: Self::PrepResult, + error: Self::Error, + _context: &ExecutionContext, + ) -> Result { + Err(error) + } + + /// Run the complete node execution cycle (async). + async fn run(&mut self, store: &mut S) -> Result { + let context = ExecutionContext::new(self.max_retries(), self.retry_delay()); + + let prep_result = self + .prep(store, &context) + .await + .map_err(|e| NodeError::PreparationError(e.to_string()))?; + + let exec_result = self + .exec_with_retries(prep_result.clone(), context.clone()) + .await + .map_err(|e| NodeError::ExecutionError(e.to_string()))?; + + self.post(store, prep_result, exec_result, &context) + .await + .map_err(|e| NodeError::PostProcessingError(e.to_string())) + } + + /// Execute with retry logic (async). + async fn exec_with_retries( + &mut self, + prep_result: Self::PrepResult, + mut context: ExecutionContext, + ) -> Result { + loop { + match self.exec(prep_result.clone(), &context).await { + Ok(result) => return Ok(result), + Err(error) => { + if context.can_retry() { + context.next_retry(); + tokio::time::sleep(context.retry_delay).await; + } else { + return self.exec_fallback(prep_result, error, &context).await; + } + } + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::shared_store::backends::MemoryStorage; + + #[derive(Debug)] + struct TestError; + impl std::fmt::Display for TestError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "test error") + } + } + impl std::error::Error for TestError {} + + struct TestNode { + prep_success: bool, + exec_success: bool, + post_success: bool, + } + + impl TestNode { + fn new(prep_success: bool, exec_success: bool, post_success: bool) -> Self { + Self { + prep_success, + exec_success, + post_success, + } + } + } + + #[async_trait] + impl Node for TestNode { + type PrepResult = String; + type ExecResult = String; + type Error = TestError; + + async fn prep( + &mut self, + _store: &S, + _context: &ExecutionContext, + ) -> Result { + if self.prep_success { + Ok("prep_result".to_string()) + } else { + Err(TestError) + } + } + + async fn exec( + &mut self, + prep_result: Self::PrepResult, + _context: &ExecutionContext, + ) -> Result { + if self.exec_success { + Ok(format!("exec_{}", prep_result)) + } else { + Err(TestError) + } + } + + async fn post( + &mut self, + _store: &mut S, + _prep_result: Self::PrepResult, + exec_result: Self::ExecResult, + _context: &ExecutionContext, + ) -> Result { + if self.post_success { + Ok(Action::simple(&exec_result)) + } else { + Err(TestError) + } + } + } + + #[tokio::test] + async fn test_node_successful_execution() { + let mut node = TestNode::new(true, true, true); + let mut store = MemoryStorage::new(); + + let result = node.run(&mut store).await; + assert!(result.is_ok()); + } + + #[tokio::test] + async fn test_node_prep_failure() { + let mut node = TestNode::new(false, true, true); + let mut store = MemoryStorage::new(); + + let result = node.run(&mut store).await; + assert!(result.is_err()); + assert!(matches!( + result.unwrap_err(), + NodeError::PreparationError(_) + )); + } +} diff --git a/cosmoflow/src/node/errors.rs b/cosmoflow/src/node/errors.rs index 88f40a2..585c349 100644 --- a/cosmoflow/src/node/errors.rs +++ b/cosmoflow/src/node/errors.rs @@ -3,18 +3,21 @@ use thiserror::Error; /// Simple error type for Node operations #[derive(Debug, Error)] pub enum NodeError { - #[error("Execution error: {0}")] + /// An error that occurs during the preparation phase of a node. + #[error("Preparation error: {0}")] + PreparationError(String), /// An error that occurs during node execution. + #[error("Execution error: {0}")] ExecutionError(String), + /// An error that occurs during the post-processing phase of a node. + #[error("Post-processing error: {0}")] + PostProcessingError(String), /// An error that occurs in the storage backend. #[error("Storage error: {0}")] StorageError(String), /// An error that occurs during input validation. #[error("Validation error: {0}")] ValidationError(String), - /// An error that occurs during the preparation phase of a node. - #[error("Preparation error: {0}")] - PrepError(String), } impl From for NodeError { @@ -48,8 +51,11 @@ mod tests { "Validation error: validation failed" ); - let prep_error = NodeError::PrepError("prep failed".to_string()); + let prep_error = NodeError::PreparationError("prep failed".to_string()); assert_eq!(prep_error.to_string(), "Preparation error: prep failed"); + + let post_error = NodeError::PostProcessingError("post failed".to_string()); + assert_eq!(post_error.to_string(), "Post-processing error: post failed"); } #[test] diff --git a/cosmoflow/src/node/mod.rs b/cosmoflow/src/node/mod.rs index 6885d7d..281571a 100644 --- a/cosmoflow/src/node/mod.rs +++ b/cosmoflow/src/node/mod.rs @@ -9,12 +9,17 @@ //! //! - **Unified Node Trait**: Single trait combining all node functionality //! - **Type Safety**: Associated types provide compile-time guarantees -//! - **Async Execution**: Full async/await support for modern Rust applications +//! - **Execution Support**: Both async and sync execution modes (configurable via features) //! - **Retry Logic**: Built-in retry mechanisms with configurable policies //! - **Error Handling**: Comprehensive error types and propagation //! - **Execution Context**: Rich context information for node execution //! - **Automatic Integration**: Seamless integration with the flow system //! +//! ## Feature Flags +//! +//! - `async` (default): Enables async/await support with tokio runtime +//! - Without `async`: Provides synchronous execution for minimal compilation +//! //! ## Node Trait Design //! //! The Node trait provides a comprehensive interface that includes: @@ -25,9 +30,11 @@ //! //! ## Quick Start //! -//! ### Implementing a Custom Node +//! ### Async Implementation (default) //! //! ```rust +//! # #[cfg(feature = "async")] +//! # { //! use cosmoflow::node::{Node, ExecutionContext, NodeError}; //! use cosmoflow::shared_store::SharedStore; //! use cosmoflow::action::Action; @@ -42,7 +49,6 @@ //! where //! S: SharedStore + Send + Sync //! { -//! // Define associated types for type safety //! type PrepResult = String; //! type ExecResult = String; //! type Error = NodeError; @@ -52,7 +58,6 @@ //! _store: &S, //! _context: &ExecutionContext, //! ) -> Result { -//! // Prepare data for execution //! Ok(format!("Preparing: {}", self.name)) //! } //! @@ -61,7 +66,6 @@ //! prep_result: Self::PrepResult, //! _context: &ExecutionContext, //! ) -> Result { -//! // Core business logic //! Ok(format!("Executed: {}", prep_result)) //! } //! @@ -72,68 +76,81 @@ //! exec_result: Self::ExecResult, //! _context: &ExecutionContext, //! ) -> Result { -//! // Handle results and determine next action //! println!("{}", exec_result); //! Ok(Action::simple("complete")) //! } //! } +//! # } //! ``` //! -//! ### Using Execution Context +//! ### Sync Implementation (with --no-default-features) //! //! ```rust -//! use cosmoflow::node::ExecutionContext; -//! use std::time::Duration; -//! -//! let context = ExecutionContext::new(3, Duration::from_millis(500)); -//! -//! println!("Execution ID: {}", context.execution_id); -//! println!("Max retries: {}", context.max_retries); -//! println!("Can retry: {}", context.can_retry()); -//! ``` -//! -//! ## Core Types -//! -//! - [`ExecutionContext`]: Contains execution metadata and retry configuration -//! - [`Node`]: Unified trait for implementing custom node types with associated types -//! - [`NodeError`]: Comprehensive error types for node execution -//! -//! ## Three-Phase Execution -//! -//! The Node trait implements a three-phase execution model: +//! # #[cfg(not(feature = "async"))] +//! # { +//! use cosmoflow::node::{Node, ExecutionContext, NodeError}; +//! use cosmoflow::shared_store::SharedStore; +//! use cosmoflow::action::Action; //! -//! 1. **Prep Phase**: Read and validate inputs (fast, side-effect free) -//! 2. **Exec Phase**: Perform core computation (idempotent, retryable) -//! 3. **Post Phase**: Write results and determine next action +//! struct MyCustomNode { +//! name: String, +//! } //! -//! ## Error Handling +//! impl Node for MyCustomNode +//! where +//! S: SharedStore + Send + Sync +//! { +//! type PrepResult = String; +//! type ExecResult = String; +//! type Error = NodeError; //! -//! The node system provides detailed error information: +//! fn prep( +//! &mut self, +//! _store: &S, +//! _context: &ExecutionContext, +//! ) -> Result { +//! Ok(format!("Preparing: {}", self.name)) +//! } //! -//! ```rust -//! use cosmoflow::node::NodeError; +//! fn exec( +//! &mut self, +//! prep_result: Self::PrepResult, +//! _context: &ExecutionContext, +//! ) -> Result { +//! Ok(format!("Executed: {}", prep_result)) +//! } //! -//! # let node_result: Result<(), NodeError> = Err(NodeError::ExecutionError("timeout".to_string())); -//! match node_result { -//! Err(NodeError::ExecutionError(msg)) if msg.contains("timeout") => { -//! println!("Node execution timed out"); -//! }, -//! Err(NodeError::ValidationError(message)) => { -//! println!("Invalid input: {}", message); -//! }, -//! _ => {} +//! fn post( +//! &mut self, +//! _store: &mut S, +//! _prep_result: Self::PrepResult, +//! exec_result: Self::ExecResult, +//! _context: &ExecutionContext, +//! ) -> Result { +//! println!("{}", exec_result); +//! Ok(Action::simple("complete")) +//! } //! } +//! # } //! ``` /// The errors module contains the error types for the node crate. pub mod errors; -use async_trait::async_trait; +/// Async node implementation (available when async feature is enabled) +#[cfg(feature = "async")] +pub mod r#async; + pub use errors::NodeError; +#[cfg(feature = "async")] +pub use r#async::Node; + use std::{collections::HashMap, time::Duration}; +#[cfg(not(feature = "async"))] use crate::action::Action; +#[cfg(not(feature = "async"))] use crate::shared_store::SharedStore; use serde_json::Value; use uuid::Uuid; @@ -202,266 +219,40 @@ impl ExecutionContext { } } -/// Node trait that defines the complete interface for nodes in CosmoFlow workflows. +// Sync version of the Node trait (when async feature is disabled) +#[cfg(not(feature = "async"))] +/// Node trait that defines the complete interface for nodes in CosmoFlow workflows (sync version). /// /// This trait combines all functionality needed for node execution in a single, cohesive /// interface. It incorporates the three-phase execution model (prep/exec/post) with /// built-in retry logic, error handling, and configuration methods. /// -/// ## Design Philosophy -/// -/// The Node trait provides a comprehensive interface that includes: -/// - Core execution methods (prep/exec/post) with configurable associated types -/// - Configuration methods with sensible defaults -/// - Built-in retry logic and error handling -/// - Seamless integration with the flow system -/// - Type safety through associated types -/// -/// ## Associated Types -/// -/// Each node defines three associated types that provide compile-time type safety: -/// - `PrepResult`: Data type prepared in prep phase and passed to exec/post -/// - `ExecResult`: Data type returned by exec phase and passed to post -/// - `Error`: Error type for this node's operations -/// -/// ## Three-Phase Execution Model -/// -/// 1. **Prep Phase** (`prep`): Read and validate inputs from shared storage -/// - Should be fast and side-effect free -/// - Used for input validation and data preparation -/// - Results are passed to both exec and post phases -/// -/// 2. **Exec Phase** (`exec`): Perform core computation logic -/// - Must be idempotent (safe to retry) -/// - Should not access shared storage directly -/// - Contains the main business logic (API calls, computations, etc.) -/// -/// 3. **Post Phase** (`post`): Write results and determine next action -/// - Handles side effects (storage writes, notifications, etc.) -/// - Determines the next workflow action -/// - Has access to both prep and exec results -/// -/// ## Error Handling and Retries -/// -/// The trait includes built-in retry logic for the exec phase: -/// - Configurable retry count via `max_retries()` -/// - Configurable retry delay via `retry_delay()` -/// - Fallback mechanism via `exec_fallback()` when all retries are exhausted -/// - Automatic error wrapping and propagation -/// -/// ## Type Parameters -/// -/// * `S` - Storage backend type that implements `SharedStore` -#[async_trait] +/// This is the synchronous version that's available when the `async` feature is disabled. +/// For async execution, enable the `async` feature flag. pub trait Node: Send + Sync { /// Result type from the preparation phase. - /// - /// This type must be `Clone` because it's passed to both the exec and post phases. - /// It should contain all the data needed for the execution phase. type PrepResult: Send + Sync + Clone + 'static; - /// Result type from the execution phase. - /// - /// This type contains the output of the core computation and is passed - /// to the post phase for storage and further processing. type ExecResult: Send + Sync + 'static; - /// Error type for all operations in this node. - /// - /// This should be a comprehensive error type that can represent all - /// possible failure modes for this node's operations. type Error: std::error::Error + Send + Sync + 'static; - // === Core Execution Methods (Must be implemented) === - - /// Preparation phase: Read and preprocess data from shared storage. - /// - /// This phase is responsible for: - /// - Reading necessary data from the shared store - /// - Validating inputs and checking preconditions - /// - Preparing data structures for the execution phase - /// - Performing any setup operations that don't have side effects - /// - /// The preparation phase should be: - /// - **Fast**: Avoid expensive operations here - /// - **Side-effect free**: No writes to storage or external systems - /// - **Deterministic**: Same inputs should produce same outputs - /// - /// The returned data will be passed to both `exec()` and `post()` phases. - /// - /// # Arguments - /// - /// * `store` - Immutable reference to shared storage for reading data - /// * `context` - Execution context containing retry info and metadata - /// - /// # Returns - /// - /// * `Ok(PrepResult)` - Successfully prepared data for execution - /// * `Err(Self::Error)` - Preparation failed (will abort the entire node execution) - /// - /// # Examples - /// - /// ```rust - /// # use cosmoflow::node::ExecutionContext; - /// # use cosmoflow::shared_store::SharedStore; - /// # - /// # struct MyPrepData { message: String } - /// # #[derive(Debug)] - /// # enum MyError { MissingConfig, InvalidTimeout } - /// # impl std::fmt::Display for MyError { - /// # fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "error") } - /// # } - /// # impl std::error::Error for MyError {} - /// # - /// # struct MyNode; - /// # impl MyNode { - /// async fn prep(&mut self, _store: &S, _context: &ExecutionContext) - /// -> Result { - /// // Read configuration from storage - /// // Validate inputs - /// Ok(MyPrepData { message: "prepared".to_string() }) - /// } - /// # } - /// ``` - async fn prep( + /// Preparation phase: Read and preprocess data from shared storage (sync). + fn prep( &mut self, store: &S, context: &ExecutionContext, ) -> Result; - /// Execution phase: Perform the core computation logic. - /// - /// This phase contains the main business logic of the node: - /// - API calls to external services - /// - Data processing and transformations - /// - Complex computations or algorithms - /// - LLM interactions or other AI operations - /// - /// The execution phase must be: - /// - **Idempotent**: Safe to retry multiple times with same inputs - /// - **Stateless**: Should not modify node state or access shared storage - /// - **Pure**: Given same prep_result, should produce same output - /// - /// This phase is automatically retried according to the node's retry configuration. - /// If all retries fail, the `exec_fallback()` method will be called. - /// - /// # Arguments - /// - /// * `prep_result` - Data prepared in the prep phase - /// * `context` - Execution context with current retry count and metadata - /// - /// # Returns - /// - /// * `Ok(ExecResult)` - Successfully computed result - /// * `Err(Self::Error)` - Execution failed (will trigger retry or fallback) - /// - /// # Examples - /// - /// ```rust - /// # use cosmoflow::node::ExecutionContext; - /// # use std::time::Duration; - /// # - /// # struct MyPrepData { config: Config } - /// # struct Config { timeout: u64 } - /// # #[derive(Debug)] - /// # enum MyError { NetworkError } - /// # impl std::fmt::Display for MyError { - /// # fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "error") } - /// # } - /// # impl std::error::Error for MyError {} - /// # - /// # struct HttpClient; - /// # impl HttpClient { - /// # fn new() -> Self { HttpClient } - /// # fn post(&self, _url: &str) -> RequestBuilder { RequestBuilder } - /// # } - /// # struct RequestBuilder; - /// # impl RequestBuilder { - /// # fn json(self, _json: &T) -> Self { self } - /// # fn timeout(self, _duration: Duration) -> Self { self } - /// # async fn send(self) -> Result { Ok(Response) } - /// # } - /// # struct Response; - /// # impl Response { - /// # async fn text(self) -> Result { Ok("result".to_string()) } - /// # } - /// # - /// # struct MyNode; - /// # impl MyNode { - /// async fn exec(&mut self, prep_result: MyPrepData, _context: &ExecutionContext) - /// -> Result { - /// // Perform main computation - /// let client = HttpClient::new(); - /// let response = client.post("https://api.example.com/process") - /// .json(&prep_result.config) - /// .timeout(Duration::from_secs(prep_result.config.timeout)) - /// .send() - /// .await?; - /// - /// let result = response.text().await?; - /// Ok(result) - /// } - /// # } - /// ``` - async fn exec( + /// Execution phase: Perform the core computation logic (sync). + fn exec( &mut self, prep_result: Self::PrepResult, context: &ExecutionContext, ) -> Result; - /// Post-processing phase: Write results and determine the next action. - /// - /// This phase is responsible for: - /// - Writing results to shared storage - /// - Updating external systems or databases - /// - Sending notifications or triggering events - /// - Determining what action the workflow should take next - /// - /// The post phase has access to both the prep and exec results, allowing - /// for comprehensive result processing and decision making. - /// - /// # Arguments - /// - /// * `store` - Mutable reference to shared storage for writing results - /// * `prep_result` - Data from the preparation phase - /// * `exec_result` - Result from the execution phase - /// * `context` - Execution context with metadata - /// - /// # Returns - /// - /// * `Ok(Action)` - The action that determines the next step in the workflow - /// * `Err(Self::Error)` - Post-processing failed - /// - /// # Examples - /// - /// ```rust - /// # use cosmoflow::node::ExecutionContext; - /// # use cosmoflow::shared_store::SharedStore; - /// # use cosmoflow::action::Action; - /// # - /// # struct MyPrepData; - /// # #[derive(Debug)] - /// # enum MyError { StorageError } - /// # impl std::fmt::Display for MyError { - /// # fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "error") } - /// # } - /// # impl std::error::Error for MyError {} - /// # - /// # struct MyNode; - /// # impl MyNode { - /// async fn post(&mut self, _store: &mut S, _prep_result: MyPrepData, - /// exec_result: String, _context: &ExecutionContext) - /// -> Result { - /// // Store results and determine next action - /// if exec_result.contains("error") { - /// Ok(Action::simple("handle_error")) - /// } else { - /// Ok(Action::simple("continue")) - /// } - /// } - /// # } - /// ``` - async fn post( + /// Post-processing phase: Write results and determine next action (sync). + fn post( &mut self, store: &mut S, prep_result: Self::PrepResult, @@ -469,138 +260,23 @@ pub trait Node: Send + Sync { context: &ExecutionContext, ) -> Result; - // === Configuration Methods (Default implementations provided) === - - /// Returns the node's name/identifier for logging and debugging purposes. - /// - /// This name is used in: - /// - Log messages and error reports - /// - Workflow execution traces - /// - Debugging and monitoring tools - /// - Flow validation and visualization - /// - /// The default implementation returns the Rust type name, but nodes - /// should typically override this to provide a more user-friendly name. - /// - /// # Returns - /// - /// A string slice containing the node's human-readable name. - /// - /// # Examples - /// - /// ```rust - /// # struct MyNode { name: String } - /// # impl MyNode { - /// fn name(&self) -> &str { - /// "UserDataProcessor" // Custom meaningful name - /// } - /// # } - /// ``` - fn name(&self) -> &str { - std::any::type_name::() - } - - /// Returns the maximum number of retry attempts for the execution phase. - /// - /// When the `exec()` method fails, it will be retried up to this many times - /// before calling the `exec_fallback()` method. The retry count does not - /// include the initial attempt. - /// - /// **Default**: 1 (no retries - fail immediately on first error) - /// - /// # Returns - /// - /// The maximum number of retry attempts (0 = no retries, 1+ = retry count) - /// - /// # Examples - /// - /// ```rust - /// # struct MyNode; - /// # impl MyNode { - /// fn max_retries(&self) -> usize { - /// 3 // Retry up to 3 times for network operations - /// } - /// # } - /// ``` + /// Maximum number of retries for the exec phase. fn max_retries(&self) -> usize { - 1 // Default: no retries + 3 } - /// Returns the delay between retry attempts. - /// - /// This delay is applied before each retry attempt (not before the initial attempt). - /// The delay helps prevent overwhelming external services and allows time for - /// transient issues to resolve. - /// - /// **Default**: 0 seconds (no delay between retries) - /// - /// # Returns - /// - /// Duration to wait between retry attempts - /// - /// # Examples - /// - /// ```rust - /// # use std::time::Duration; - /// # struct MyNode; - /// # impl MyNode { - /// fn retry_delay(&self) -> Duration { - /// Duration::from_millis(500) // Wait 500ms between retries - /// } - /// # } - /// ``` + /// Delay between retry attempts. fn retry_delay(&self) -> Duration { - Duration::from_secs(0) // Default: no delay + Duration::from_millis(100) } - /// Fallback handler called when exec() fails after all retries are exhausted. - /// - /// This method provides a way to handle execution failures gracefully instead - /// of propagating the error up the call stack. Common use cases include: - /// - Providing default values when external services are unavailable - /// - Implementing circuit breaker patterns - /// - Logging detailed error information before failing - /// - Attempting alternative computation methods - /// - /// **Default behavior**: Re-raises the original error (no fallback) - /// - /// # Arguments - /// - /// * `_prep_result` - The data from the prep phase (available for fallback logic) - /// * `error` - The final error that caused all retries to fail - /// * `_context` - Execution context with retry information - /// - /// # Returns - /// - /// * `Ok(ExecResult)` - Fallback succeeded, continue with this result - /// * `Err(Self::Error)` - Fallback also failed, propagate this error - /// - /// # Examples - /// - /// ```rust - /// # use cosmoflow::node::ExecutionContext; - /// # - /// # #[derive(Debug)] - /// # enum MyError { NetworkError } - /// # impl std::fmt::Display for MyError { - /// # fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "network error") } - /// # } - /// # impl std::error::Error for MyError {} - /// # - /// # struct MyNode; - /// # impl MyNode { - /// async fn exec_fallback(&mut self, _prep_result: String, - /// error: MyError, _context: &ExecutionContext) - /// -> Result { - /// // Log the failure for monitoring - /// eprintln!("Execution failed after retries: {}", error); - /// - /// // Provide a default result instead of failing - /// Ok("default_response".to_string()) - /// } - /// # } - /// ``` - async fn exec_fallback( + /// Get the node's name for debugging and logging. + fn name(&self) -> &str { + std::any::type_name::() + } + + /// Fallback execution when all retries are exhausted (sync). + fn exec_fallback( &mut self, _prep_result: Self::PrepResult, error: Self::Error, @@ -609,141 +285,37 @@ pub trait Node: Send + Sync { Err(error) } - // === Convenience Methods (Default implementations provided) === - - /// Complete execution flow: prep → exec → post. - /// - /// This is the main entry point for node execution and orchestrates the - /// complete three-phase execution process: - /// - /// 1. **Preparation Phase**: Calls `prep()` to read and validate inputs - /// 2. **Execution Phase**: Calls `exec_with_retries()` for core computation with retry logic - /// 3. **Post-processing Phase**: Calls `post()` to store results and determine next action - /// - /// This method handles: - /// - Automatic retry configuration setup - /// - Error wrapping and propagation between phases - /// - Execution context management - /// - Phase coordination and data passing - /// - /// # Arguments - /// - /// * `store` - Mutable reference to shared storage (immutable for prep, mutable for post) - /// - /// # Returns - /// - /// * `Ok(Action)` - The action returned by the post phase - /// * `Err(NodeError)` - Wrapped error from any phase that failed - /// - /// # Error Handling - /// - /// Errors from each phase are wrapped in specific `NodeError` variants: - /// - Prep errors → `NodeError::PrepError` - /// - Exec errors → `NodeError::ExecutionError` (after all retries and fallback) - /// - Post errors → `NodeError::ExecutionError` - /// - /// # Examples - /// - /// ```rust - /// # use cosmoflow::node::{ExecutionContext, NodeError}; - /// # use cosmoflow::SharedStore; - /// # use cosmoflow::action::Action; - /// # - /// # struct MyCustomNode; - /// # - /// # async fn example() -> Result<(), NodeError> { - /// # let mut my_node = MyCustomNode; - /// # let action = Action::simple("complete"); - /// # println!("Node completed with action: {}", action.name()); - /// # Ok(()) - /// # } - /// ``` - async fn run(&mut self, store: &mut S) -> Result { + /// Run the complete node execution cycle (sync). + fn run(&mut self, store: &mut S) -> Result { let context = ExecutionContext::new(self.max_retries(), self.retry_delay()); - // Prep phase: read and validate inputs let prep_result = self .prep(store, &context) - .await - .map_err(|e| NodeError::PrepError(format!("Prep failed: {e}")))?; + .map_err(|e| NodeError::PreparationError(e.to_string()))?; - // Exec phase with retries: perform core computation let exec_result = self .exec_with_retries(prep_result.clone(), context.clone()) - .await - .map_err(|e| NodeError::ExecutionError(format!("Exec failed: {e}")))?; + .map_err(|e| NodeError::ExecutionError(e.to_string()))?; - // Post phase: store results and determine next action - let action = self - .post(store, prep_result, exec_result, &context) - .await - .map_err(|e| NodeError::ExecutionError(format!("Post failed: {e}")))?; - - Ok(action) + self.post(store, prep_result, exec_result, &context) + .map_err(|e| NodeError::PostProcessingError(e.to_string())) } - /// Execution phase with built-in retry logic and fallback handling. - /// - /// This method implements the retry mechanism for the execution phase: - /// - /// 1. **Initial Attempt**: Calls `exec()` with the prep result - /// 2. **Retry Loop**: If exec fails and retries are available: - /// - Waits for the configured retry delay - /// - Increments retry count in execution context - /// - Attempts exec again - /// 3. **Fallback**: If all retries are exhausted, calls `exec_fallback()` - /// - /// The retry logic is designed to handle transient failures while avoiding - /// infinite loops or overwhelming external services. - /// - /// # Arguments - /// - /// * `prep_result` - Data from the preparation phase (cloned for each retry) - /// * `context` - Mutable execution context to track retry attempts - /// - /// # Returns - /// - /// * `Ok(ExecResult)` - Execution succeeded (either normally or via fallback) - /// * `Err(Self::Error)` - All attempts failed including fallback - /// - /// # Retry Behavior - /// - /// - **Retry Count**: Controlled by `max_retries()` method - /// - **Retry Delay**: Controlled by `retry_delay()` method - /// - **Fallback**: Controlled by `exec_fallback()` method - /// - **Context Updates**: Retry count and metadata are updated automatically - /// - /// # Examples - /// - /// ```rust - /// # use cosmoflow::node::ExecutionContext; - /// # use std::time::Duration; - /// # - /// # async fn example() { - /// // This method is typically called internally by run(), but can be used directly: - /// let context = ExecutionContext::new(3, Duration::from_millis(500)); - /// println!("Context created with {} max retries", context.max_retries); - /// # } - /// ``` - async fn exec_with_retries( + /// Execute with retry logic (sync). + fn exec_with_retries( &mut self, prep_result: Self::PrepResult, mut context: ExecutionContext, ) -> Result { loop { - match self.exec(prep_result.clone(), &context).await { + match self.exec(prep_result.clone(), &context) { Ok(result) => return Ok(result), Err(error) => { if context.can_retry() { - // Wait before retry if delay is configured - if context.retry_delay > Duration::ZERO { - tokio::time::sleep(context.retry_delay).await; - } context.next_retry(); - continue; + std::thread::sleep(context.retry_delay); } else { - // All retries exhausted, try fallback - return self.exec_fallback(prep_result, error, &context).await; + return self.exec_fallback(prep_result, error, &context); } } } @@ -751,281 +323,100 @@ pub trait Node: Send + Sync { } } -#[cfg(all(test, feature = "storage-memory"))] +// Tests for both sync and async versions +#[cfg(all(test, not(feature = "async")))] mod tests { use super::*; use crate::shared_store::backends::MemoryStorage; - use async_trait::async_trait; - use std::sync::{Arc, Mutex}; + + #[derive(Debug)] + struct TestError; + impl std::fmt::Display for TestError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "test error") + } + } + impl std::error::Error for TestError {} struct TestNode { - name: String, - message: String, - action: Action, - max_retries: usize, - retry_delay: Duration, - fail_count: Arc>, - should_fail_prep: bool, - should_fail_exec: bool, - should_fail_post: bool, + prep_success: bool, + exec_success: bool, + post_success: bool, } impl TestNode { - pub fn new(name: impl Into, message: impl Into, action: Action) -> Self { + fn new(prep_success: bool, exec_success: bool, post_success: bool) -> Self { Self { - name: name.into(), - message: message.into(), - action, - max_retries: 1, - retry_delay: Duration::from_secs(0), - fail_count: Arc::new(Mutex::new(0)), - should_fail_prep: false, - should_fail_exec: false, - should_fail_post: false, + prep_success, + exec_success, + post_success, } } - - pub fn with_retries(mut self, max_retries: usize) -> Self { - self.max_retries = max_retries; - self - } - - pub fn with_delay(mut self, delay: Duration) -> Self { - self.retry_delay = delay; - self - } - - pub fn with_prep_failure(mut self) -> Self { - self.should_fail_prep = true; - self - } - - pub fn with_exec_failure(mut self, fail_times: usize) -> Self { - self.should_fail_exec = true; - *self.fail_count.lock().unwrap() = fail_times; - self - } - - pub fn with_post_failure(mut self) -> Self { - self.should_fail_post = true; - self - } } - #[async_trait] - impl Node for TestNode { + impl Node for TestNode { type PrepResult = String; type ExecResult = String; - type Error = NodeError; + type Error = TestError; - async fn prep( + fn prep( &mut self, - _store: &MemoryStorage, - context: &ExecutionContext, + _store: &S, + _context: &ExecutionContext, ) -> Result { - if self.should_fail_prep { - return Err(NodeError::PrepError("Intentional prep failure".to_string())); + if self.prep_success { + Ok("prep_result".to_string()) + } else { + Err(TestError) } - Ok(format!( - "Execution {}: {}", - context.execution_id, self.message - )) } - async fn exec( + fn exec( &mut self, prep_result: Self::PrepResult, _context: &ExecutionContext, ) -> Result { - if self.should_fail_exec { - let mut count = self.fail_count.lock().unwrap(); - if *count > 0 { - *count -= 1; - return Err(NodeError::ExecutionError( - "Intentional exec failure".to_string(), - )); - } + if self.exec_success { + Ok(format!("exec_{}", prep_result)) + } else { + Err(TestError) } - Ok(prep_result) } - async fn post( + fn post( &mut self, - store: &mut MemoryStorage, + _store: &mut S, _prep_result: Self::PrepResult, exec_result: Self::ExecResult, _context: &ExecutionContext, ) -> Result { - if self.should_fail_post { - return Err(NodeError::ExecutionError( - "Intentional post failure".to_string(), - )); + if self.post_success { + Ok(Action::simple(&exec_result)) + } else { + Err(TestError) } - - store - .set("last_result".to_string(), exec_result) - .map_err(|e| NodeError::StorageError(e.to_string()))?; - - Ok(self.action.clone()) - } - - fn name(&self) -> &str { - &self.name - } - - fn max_retries(&self) -> usize { - self.max_retries - } - - fn retry_delay(&self) -> Duration { - self.retry_delay - } - } - - #[tokio::test] - async fn test_node_successful_execution() { - let mut node = TestNode::new("test_node", "test message", Action::simple("continue")); - let mut store = MemoryStorage::new(); - - let result = node.run(&mut store).await; - assert!(result.is_ok()); - assert_eq!(result.unwrap(), Action::simple("continue")); - - let stored_result: Option = store.get("last_result").unwrap(); - assert!(stored_result.is_some()); - assert!(stored_result.unwrap().contains("test message")); - } - - #[tokio::test] - async fn test_node_prep_failure() { - let mut node = TestNode::new("test_node", "test message", Action::simple("continue")) - .with_prep_failure(); - let mut store = MemoryStorage::new(); - - let result = node.run(&mut store).await; - assert!(result.is_err()); - match result.unwrap_err() { - NodeError::PrepError(msg) => assert!(msg.contains("Prep failed")), - _ => panic!("Expected PrepError"), } } - #[tokio::test] - async fn test_node_exec_retry_success() { - let mut node = TestNode::new("test_node", "test message", Action::simple("continue")) - .with_exec_failure(1) - .with_retries(2); + #[test] + fn test_node_successful_execution() { + let mut node = TestNode::new(true, true, true); let mut store = MemoryStorage::new(); - let result = node.run(&mut store).await; + let result = node.run(&mut store); assert!(result.is_ok()); - assert_eq!(result.unwrap(), Action::simple("continue")); - } - - #[tokio::test] - async fn test_node_exec_retry_exhausted() { - let mut node = TestNode::new("test_node", "test message", Action::simple("continue")) - .with_exec_failure(3) - .with_retries(2); - let mut store = MemoryStorage::new(); - - let result = node.run(&mut store).await; - assert!(result.is_err()); - match result.unwrap_err() { - NodeError::ExecutionError(msg) => assert!(msg.contains("Exec failed")), - _ => panic!("Expected ExecutionError"), - } - } - - #[tokio::test] - async fn test_node_post_failure() { - let mut node = TestNode::new("test_node", "test message", Action::simple("continue")) - .with_post_failure(); - let mut store = MemoryStorage::new(); - - let result = node.run(&mut store).await; - assert!(result.is_err()); - match result.unwrap_err() { - NodeError::ExecutionError(msg) => assert!(msg.contains("Post failed")), - _ => panic!("Expected ExecutionError"), - } } #[test] - fn test_node_configuration() { - let node = TestNode::new("test_node", "test message", Action::simple("continue")) - .with_retries(5) - .with_delay(Duration::from_secs(2)); - - assert_eq!(node.name(), "test_node"); - assert_eq!(node.max_retries(), 5); - assert_eq!(node.retry_delay(), Duration::from_secs(2)); - } - - #[test] - fn test_node_default_name() { - let node = TestNode::new("custom_name", "test message", Action::simple("continue")); - assert_eq!(node.name(), "custom_name"); - } - - struct CustomFallbackNode { - fallback_result: String, - } - - impl CustomFallbackNode { - fn new(fallback_result: String) -> Self { - Self { fallback_result } - } - } - - #[async_trait] - impl Node for CustomFallbackNode { - type PrepResult = (); - type ExecResult = String; - type Error = NodeError; - - async fn prep( - &mut self, - _store: &MemoryStorage, - _context: &ExecutionContext, - ) -> Result { - Ok(()) - } - - async fn exec( - &mut self, - _prep_result: Self::PrepResult, - _context: &ExecutionContext, - ) -> Result { - Err(NodeError::ExecutionError("Always fails".to_string())) - } - - async fn post( - &mut self, - _store: &mut MemoryStorage, - _prep_result: Self::PrepResult, - _exec_result: Self::ExecResult, - _context: &ExecutionContext, - ) -> Result { - Ok(Action::simple("continue")) - } - - async fn exec_fallback( - &mut self, - _prep_result: Self::PrepResult, - _error: Self::Error, - _context: &ExecutionContext, - ) -> Result { - Ok(self.fallback_result.clone()) - } - } - - #[tokio::test] - async fn test_custom_exec_fallback() { - let mut node = CustomFallbackNode::new("fallback_success".to_string()); + fn test_node_prep_failure() { + let mut node = TestNode::new(false, true, true); let mut store = MemoryStorage::new(); - let result = node.run(&mut store).await; - assert!(result.is_ok()); + let result = node.run(&mut store); + assert!(result.is_err()); + assert!(matches!( + result.unwrap_err(), + NodeError::PreparationError(_) + )); } } diff --git a/examples/Cargo.toml b/examples/Cargo.toml deleted file mode 100644 index 05ce2aa..0000000 --- a/examples/Cargo.toml +++ /dev/null @@ -1,58 +0,0 @@ -[package] -name = "cosmoflow-examples" -version = "0.4.0" -edition = "2024" -publish = false - -[dependencies] -cosmoflow = { path = "../cosmoflow", features = ["storage-memory"] } -tokio = { version = "1.0", features = ["full"] } -async-trait = "0.1" -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" -rand = "0.8" -thiserror = "2.0" -reqwest = { version = "0.12", features = ["json"] } - -[[bin]] -name = "simple_loops" -path = "simple_loops.rs" - -[[bin]] -name = "hello_world" -path = "hello_world.rs" - -[[bin]] -name = "custom_node" -path = "custom_node.rs" - -[[bin]] -name = "unified_hello_world" -path = "unified_hello_world.rs" - -[[bin]] -name = "unified_counter" -path = "unified_counter.rs" - -[[bin]] -name = "flow_macro" -path = "flow_macro.rs" - -[[bin]] -name = "unified_shared_store" -path = "unified_shared_store.rs" - -[[bin]] -name = "llm_request" -path = "llm_request.rs" - -[[bin]] -name = "chat_loop" -path = "chat_loop.rs" - -[features] -default = [] -minimal = ["cosmoflow/minimal"] -basic = ["cosmoflow/basic"] -standard = ["cosmoflow/standard"] -full = ["cosmoflow/full"] diff --git a/examples/README.md b/examples/README.md deleted file mode 100644 index b37fbac..0000000 --- a/examples/README.md +++ /dev/null @@ -1,154 +0,0 @@ -# CosmoFlow Examples - -This directory contains comprehensive examples demonstrating CosmoFlow's capabilities from basic concepts to advanced patterns. These examples are designed to provide a progressive learning path for understanding CosmoFlow workflows. - -## āœ… Available Examples - -### 1. Hello World (`hello_world.rs`) - -A simple greeting workflow that introduces fundamental CosmoFlow concepts with minimal code. - -```bash -cd examples && cargo run --bin hello_world --features minimal -``` - ---- - -### 2. Custom Node (`custom_node.rs`) - -An sophisticated iterative workflow demonstrating stateful nodes, data analysis, and report generation. - -```bash -cd examples && cargo run --bin custom_node --features minimal -``` - ---- - -### 4. LLM Request (`llm_request.rs`) - -A real-world example demonstrating LLM API integration with environment-based configuration and comprehensive error handling. - -```bash -# Set environment variables first -export LLM_API_KEY="your_api_key" -export LLM_BASE_URL="https://api.openai.com/v1" -export LLM_MODEL="gpt-3.5-turbo" - -# Run the example -cargo run --bin llm_request --features basic -``` - -**Features:** -- Real HTTP API calls to LLM providers (OpenAI, Anthropic, Local APIs) -- Environment variable configuration -- Lightweight HTTP client without heavy SDK dependencies -- Comprehensive error handling and retry logic -- Copyable patterns for production use - -See `lightweight_llm_README.md` for detailed setup instructions. - ---- - -### 5. Flow Macro (`flow_macro.rs`) - -Demonstrates the powerful `flow!` macro for declarative workflow construction with custom action routing. - -```bash -cd examples && cargo run --bin flow_macro --features basic -``` - -**Syntax Example:** - -```rust -let workflow = flow! { - storage: SimpleStorage, - start: "decision", - nodes: { - "decision" : DecisionNode, - "success_path" : SuccessNode, - "error_path" : ErrorNode, - "final" : FinalNode, - }, - routes: { - "decision" - "default" => "success_path", - "decision" - "error" => "error_path", - "success_path" - "continue" => "final", - "error_path" - "continue" => "final", - } -}; -``` - -## šŸ”§ Additional Examples - -```bash -cargo run --bin unified_hello_world --features basic -cargo run --bin unified_counter --features basic -``` - -## šŸ—ļø Feature Levels - -CosmoFlow provides different feature tiers to match various use cases: - -| Feature Level | Description | Storage | Built-ins | Use Case | -|---------------|-------------|---------|-----------|----------| -| **minimal** | Core workflow functionality only | Custom only | None | Learning, custom implementations | -| **basic** | Adds memory storage backend | Custom + Memory | None | Development, testing | -| **standard** | Adds built-in node types | Custom + Memory | Basic nodes | Common workflows | -| **full** | Complete feature set | All backends | All nodes | Production applications | - -## šŸ“‹ Best Practices - -### Node Implementation - -1. **Three-Phase Pattern**: - - `prep()`: Read and validate inputs, prepare resources - - `exec()`: Perform core logic (keep stateless and idempotent when possible) - - `post()`: Write outputs, update state, and determine next action - -2. **Error Handling**: - - Use appropriate error types for different failure modes - - Implement retry logic for transient failures - - Provide meaningful error messages and fallback behavior - -3. **State Management**: - - Use SharedStore for inter-node data communication - - Keep node internal state minimal and recoverable - - Consider persistence strategies for long-running workflows - -### Flow Design - -1. **Workflow Structure**: - - Always validate flows before execution - - Use meaningful and consistent node/action naming - - Design and test all possible execution paths - -2. **Performance Considerations**: - - Set appropriate `max_steps` limits to prevent infinite loops - - Simple max_steps protection for iterative workflows - - Monitor execution metrics and optimize bottlenecks - -3. **Maintainability**: - - Document complex routing logic and business rules - - Use consistent naming conventions across the project - - Provide clear error messages and logging - -## šŸŽÆ Next Steps - -After working through these examples, consider: - -1. **Explore Built-in Features**: Try the `standard` and `full` feature sets -2. **Build Custom Workflows**: Combine patterns from multiple examples -3. **Production Deployment**: Implement proper error handling and monitoring -4. **Advanced Storage**: Explore Redis, file-based, or custom storage backends - -## šŸ“š Additional Resources - -- **Architecture**: `/docs/architecture.md` - System design and concepts -- **Features**: `/docs/features.md` - Complete feature documentation -- **Getting Started**: `/docs/getting-started.md` - Detailed setup guide -- **Built-in Nodes**: `/cosmoflow/src/builtin/` - Pre-built node implementations -- **Storage Backends**: `/cosmoflow/src/storage/` - Storage implementation examples - -## šŸ¤ Contributing - -Found an issue or want to improve an example? Check out the main project repository for contribution guidelines. \ No newline at end of file diff --git a/examples/custom_node.rs b/examples/custom_node.rs deleted file mode 100644 index e49ffc6..0000000 --- a/examples/custom_node.rs +++ /dev/null @@ -1,642 +0,0 @@ -//! Custom Node Example - Advanced Iterative Workflow with Data Analysis -//! -//! This example demonstrates sophisticated custom nodes that implement an iterative -//! counter workflow with data analysis and reporting. The workflow features: -//! -//! ## Workflow Behavior -//! - **Coordinator Node**: Orchestrates the iteration logic, deciding which counter to increment -//! - **Counter Nodes**: Two stateful counters (main increments by 5, secondary by 3) -//! - **Iterative Execution**: Counters alternate execution until reaching 6 total iterations -//! - **Statistics Analysis**: Analyzes counter data including averages, min/max, and growth rates -//! - **Report Generation**: Creates a formatted analysis report with all statistics -//! -//! ## Advanced Features Demonstrated -//! - **Stateful Nodes**: Nodes maintain internal state and persist data to shared store -//! - **Complex Business Logic**: Coordinator implements sophisticated iteration control -//! - **Three-Phase Execution**: Proper use of prep, exec, and post phases -//! - **Simple Workflows**: Uses max_steps for loop protection -//! - **Data Persistence**: Stores counter values and execution history -//! - **Statistical Analysis**: Calculates metrics and generates formatted reports -//! - **Custom Storage Backend**: Implements a complete storage interface -//! -//! ## Execution Flow -//! 1. Coordinator analyzes current state and decides next action -//! 2. Appropriate counter increments and stores its new value -//! 3. Counter returns control to coordinator for next iteration -//! 4. After 6 iterations, workflow transitions to analysis phase -//! 5. Statistics node calculates metrics from stored data -//! 6. Report node generates and displays final analysis -//! -//! To run this example: -//! ```bash -//! cd examples && cargo run --bin custom_node --features minimal -//! ``` - -use async_trait::async_trait; -use cosmoflow::{ - SharedStore, - action::Action, - flow::{FlowBackend, FlowBuilder}, - node::{ExecutionContext, Node, NodeError}, -}; -use serde::{Serialize, de::DeserializeOwned}; -use serde_json::Value; -use std::collections::HashMap; - -/// A simple in-memory storage implementation -#[derive(Debug, Clone)] -pub struct SimpleStorage { - data: HashMap, -} - -impl SimpleStorage { - pub fn new() -> Self { - Self { - data: HashMap::new(), - } - } -} - -impl Default for SimpleStorage { - fn default() -> Self { - Self::new() - } -} - -impl SharedStore for SimpleStorage { - type Error = SimpleStorageError; - - fn get(&self, key: &str) -> Result, Self::Error> { - match self.data.get(key) { - Some(value) => { - let deserialized = serde_json::from_value(value.clone()) - .map_err(|e| SimpleStorageError::DeserializationError(e.to_string()))?; - Ok(Some(deserialized)) - } - None => Ok(None), - } - } - - fn set(&mut self, key: String, value: T) -> Result<(), Self::Error> { - let json_value = serde_json::to_value(value) - .map_err(|e| SimpleStorageError::SerializationError(e.to_string()))?; - self.data.insert(key, json_value); - Ok(()) - } - - fn remove(&mut self, key: &str) -> Result, Self::Error> { - match self.data.remove(key) { - Some(value) => { - let deserialized = serde_json::from_value(value) - .map_err(|e| SimpleStorageError::DeserializationError(e.to_string()))?; - Ok(Some(deserialized)) - } - None => Ok(None), - } - } - - fn contains_key(&self, key: &str) -> Result { - Ok(self.data.contains_key(key)) - } - - fn keys(&self) -> Result, Self::Error> { - Ok(self.data.keys().cloned().collect()) - } - - fn clear(&mut self) -> Result<(), Self::Error> { - self.data.clear(); - Ok(()) - } - - fn len(&self) -> Result { - Ok(self.data.len()) - } -} - -#[derive(Debug, thiserror::Error)] -pub enum SimpleStorageError { - #[error("Serialization error: {0}")] - SerializationError(String), - #[error("Deserialization error: {0}")] - DeserializationError(String), -} - -/// A coordinator node that manages the counter workflow -struct CoordinatorNode { - main_count: usize, - secondary_count: usize, - max_iterations: usize, -} - -impl CoordinatorNode { - fn new(max_iterations: usize) -> Self { - Self { - main_count: 0, - secondary_count: 0, - max_iterations, - } - } -} - -#[async_trait] -impl Node for CoordinatorNode { - type PrepResult = (usize, usize); // (main_count, secondary_count) - type ExecResult = String; // Next action to take - type Error = NodeError; - - async fn prep( - &mut self, - store: &S, - _context: &ExecutionContext, - ) -> Result { - // Get current counts from store - if let Some(count) = store - .get::("main_counter_count") - .ok() - .flatten() - .and_then(|v| v.as_u64()) - { - self.main_count = count as usize; - } - - if let Some(count) = store - .get::("secondary_counter_count") - .ok() - .flatten() - .and_then(|v| v.as_u64()) - { - self.secondary_count = count as usize; - } - - Ok((self.main_count, self.secondary_count)) - } - - async fn exec( - &mut self, - (main_count, secondary_count): Self::PrepResult, - _context: &ExecutionContext, - ) -> Result { - let total_iterations = (main_count / 5) + (secondary_count / 3); // Calculate based on increments - - println!( - "šŸŽ® Coordinator: main={}, secondary={}, iterations={}/{}", - main_count, secondary_count, total_iterations, self.max_iterations - ); - - if total_iterations >= self.max_iterations { - Ok("analysis".to_string()) - } else if main_count <= secondary_count { - Ok("increment_main".to_string()) - } else { - Ok("increment_secondary".to_string()) - } - } - - async fn post( - &mut self, - _store: &mut S, - _prep_result: Self::PrepResult, - exec_result: Self::ExecResult, - _context: &ExecutionContext, - ) -> Result { - Ok(Action::simple(&exec_result)) - } - - fn name(&self) -> &str { - "CoordinatorNode" - } -} - -/// A counter node that tracks how many times it has been executed -#[derive(Debug)] -struct CounterNode { - name: String, - count: usize, - increment_by: usize, - max_count: Option, -} - -impl CounterNode { - fn new(name: impl Into, increment_by: usize) -> Self { - Self { - name: name.into(), - count: 0, - increment_by, - max_count: None, - } - } -} - -#[async_trait] -impl Node for CounterNode { - type PrepResult = usize; // Previous count - type ExecResult = usize; // New count - type Error = NodeError; - - async fn prep( - &mut self, - store: &S, - _context: &ExecutionContext, - ) -> Result { - let previous_count = self.count; - - // Check if we have stored count in the shared store - let store_key = format!("{}_count", self.name); - if let Some(count_value) = store - .get::(&store_key) - .ok() - .flatten() - .and_then(|v| v.as_u64()) - { - self.count = count_value as usize; - println!("šŸ“Š {} restored count from store: {}", self.name, self.count); - } - - Ok(previous_count) - } - - async fn exec( - &mut self, - prep_result: Self::PrepResult, - _context: &ExecutionContext, - ) -> Result { - // Check if we've reached the maximum count - #[allow(clippy::collapsible_if)] - if let Some(max) = self.max_count { - if self.count >= max { - return Err(NodeError::ValidationError(format!( - "Counter {} has reached maximum count: {}", - self.name, max - ))); - } - } - - // Increment the counter - self.count += self.increment_by; - - println!( - "šŸ”¢ {} count: {} -> {} (increment: {})", - self.name, prep_result, self.count, self.increment_by - ); - - Ok(self.count) - } - - async fn post( - &mut self, - store: &mut S, - _prep_result: Self::PrepResult, - exec_result: Self::ExecResult, - _context: &ExecutionContext, - ) -> Result { - // Store the current count - let store_key = format!("{}_count", self.name); - store - .set(store_key, Value::Number(exec_result.into())) - .map_err(|e| NodeError::StorageError(e.to_string()))?; - - // Store count history - let history_key = format!("{}_history", self.name); - let mut history: Vec = match store.get::(&history_key) { - Ok(Some(value)) => { - if let Some(array) = value.as_array() { - array - .iter() - .filter_map(|v| v.as_u64().map(|n| n as usize)) - .collect() - } else { - Vec::new() - } - } - _ => Vec::new(), - }; - - history.push(exec_result); - store - .set( - history_key, - Value::Array( - history - .into_iter() - .map(|n| Value::Number(n.into())) - .collect(), - ), - ) - .map_err(|e| NodeError::StorageError(e.to_string()))?; - - // Determine next action based on count - return to coordinator for iteration - if let Some(max) = self.max_count { - if exec_result >= max { - Ok(Action::simple("max_reached")) - } else { - Ok(Action::simple("return_to_coordinator")) - } - } else { - Ok(Action::simple("return_to_coordinator")) - } - } - - fn name(&self) -> &str { - &self.name - } -} - -/// A statistics node that analyzes counter data -struct StatisticsNode; - -#[async_trait] -impl Node for StatisticsNode { - type PrepResult = HashMap>; - type ExecResult = HashMap; - type Error = NodeError; - - async fn prep( - &mut self, - store: &S, - _context: &ExecutionContext, - ) -> Result { - println!("šŸ“ˆ Gathering statistics from all counters..."); - - let mut counter_histories = HashMap::new(); - - // Look for all counter histories in the store - let keys = ["main_counter_history", "secondary_counter_history"]; - - for key in keys { - #[allow(clippy::collapsible_if)] - if let Ok(Some(value)) = store.get::(key) { - if let Some(array) = value.as_array() { - let history: Vec = array - .iter() - .filter_map(|v| v.as_u64().map(|n| n as usize)) - .collect(); - - if !history.is_empty() { - let counter_name = key.replace("_history", ""); - counter_histories.insert(counter_name, history); - } - } - } - } - - Ok(counter_histories) - } - - async fn exec( - &mut self, - prep_result: Self::PrepResult, - _context: &ExecutionContext, - ) -> Result { - let mut statistics = HashMap::new(); - - for (counter_name, history) in prep_result { - if history.is_empty() { - continue; - } - - // Calculate basic statistics - let sum: usize = history.iter().sum(); - let count = history.len(); - let average = sum as f64 / count as f64; - - let min = *history.iter().min().unwrap() as f64; - let max = *history.iter().max().unwrap() as f64; - - // Calculate growth rate - let growth_rate = if history.len() > 1 { - let first = history[0] as f64; - let last = history[history.len() - 1] as f64; - if first > 0.0 { - (last - first) / first * 100.0 - } else { - 0.0 - } - } else { - 0.0 - }; - - println!("šŸ“Š {counter_name} statistics:"); - println!(" Count: {count}"); - println!(" Average: {average:.2}"); - println!(" Min: {min}, Max: {max}"); - println!(" Growth rate: {growth_rate:.1}%"); - - statistics.insert(format!("{counter_name}_average"), average); - statistics.insert(format!("{counter_name}_min"), min); - statistics.insert(format!("{counter_name}_max"), max); - statistics.insert(format!("{counter_name}_growth_rate"), growth_rate); - } - - Ok(statistics) - } - - async fn post( - &mut self, - store: &mut S, - _prep_result: Self::PrepResult, - exec_result: Self::ExecResult, - _context: &ExecutionContext, - ) -> Result { - // Store all statistics - for (key, value) in exec_result { - store - .set( - format!("stats_{key}"), - Value::Number( - serde_json::Number::from_f64(value).unwrap_or(serde_json::Number::from(0)), - ), - ) - .map_err(|e| NodeError::StorageError(e.to_string()))?; - } - - Ok(Action::simple("report")) - } - - fn name(&self) -> &str { - "StatisticsNode" - } -} - -/// A report node that generates a final summary -struct ReportNode; - -#[async_trait] -impl Node for ReportNode { - type PrepResult = HashMap; - type ExecResult = String; - type Error = NodeError; - - async fn prep( - &mut self, - store: &S, - _context: &ExecutionContext, - ) -> Result { - let mut stats = HashMap::new(); - - // Collect all statistics - let stat_keys = [ - "stats_main_counter_average", - "stats_main_counter_growth_rate", - "stats_secondary_counter_average", - "stats_secondary_counter_growth_rate", - ]; - - for key in stat_keys { - #[allow(clippy::collapsible_if)] - if let Ok(Some(value)) = store.get::(key) { - if let Some(number) = value.as_f64() { - stats.insert(key.to_string(), number); - } - } - } - - Ok(stats) - } - - async fn exec( - &mut self, - prep_result: Self::PrepResult, - _context: &ExecutionContext, - ) -> Result { - let mut report = String::new(); - report.push_str("šŸ“‹ COUNTER ANALYSIS REPORT\n"); - report.push_str("==========================\n\n"); - - if prep_result.is_empty() { - report.push_str("No statistics available.\n"); - } else { - for (key, value) in prep_result { - let display_key = key - .replace("stats_", "") - .replace("_", " ") - .split_whitespace() - .map(|word| { - let mut chars = word.chars(); - match chars.next() { - None => String::new(), - Some(first) => { - first.to_uppercase().collect::() + chars.as_str() - } - } - }) - .collect::>() - .join(" "); - - if key.contains("growth_rate") { - report.push_str(&format!("{display_key}: {value:.1}%\n")); - } else { - report.push_str(&format!("{display_key}: {value:.2}\n")); - } - } - } - - report.push_str("\nAnalysis completed successfully! šŸŽ‰"); - - println!("{}", report); - Ok(report) - } - - async fn post( - &mut self, - store: &mut S, - _prep_result: Self::PrepResult, - exec_result: Self::ExecResult, - _context: &ExecutionContext, - ) -> Result { - store - .set("final_report".to_string(), Value::String(exec_result)) - .map_err(|e| NodeError::StorageError(e.to_string()))?; - - Ok(Action::simple("complete")) - } - - fn name(&self) -> &str { - "ReportNode" - } -} - -#[tokio::main] -async fn main() -> Result<(), Box> { - println!("šŸš€ Starting Custom Node Example"); - println!("================================"); - - // Create a shared store - let mut store = SimpleStorage::new(); - - // Build a workflow that allows cycles for iterative behavior - let mut flow = FlowBuilder::new() - .start_node("coordinator") - .max_steps(50) - .node("coordinator", CoordinatorNode::new(6)) // 6 total increments - .node("main_counter", CounterNode::new("main_counter", 5)) - .node( - "secondary_counter", - CounterNode::new("secondary_counter", 3), - ) - .node("statistics", StatisticsNode) - .node("report", ReportNode) - // Coordinator routes to appropriate counter or analysis - .route("coordinator", "increment_main", "main_counter") - .route("coordinator", "increment_secondary", "secondary_counter") - .route("coordinator", "analysis", "statistics") - // Counters return to coordinator for iteration - .route("main_counter", "return_to_coordinator", "coordinator") - .route("main_counter", "max_reached", "coordinator") - .route("secondary_counter", "return_to_coordinator", "coordinator") - .route("secondary_counter", "max_reached", "coordinator") - // Analysis flow - .route("statistics", "report", "report") - .terminal_route("report", "complete") // Explicit termination - .build(); - - println!("šŸ“‹ Flow configuration:"); - println!(" Start node: {}", flow.config().start_node_id); - println!(" Max steps: {}", flow.config().max_steps); - println!(); - - // Validate the flow - if let Err(e) = flow.validate() { - eprintln!("āŒ Flow validation failed: {e}"); - return Err(e.into()); - } - println!("āœ… Flow validation passed"); - println!(); - - // Execute the workflow - println!("⚔ Executing custom node workflow..."); - println!("------------------------------------"); - - let start_time = std::time::Instant::now(); - let result = flow.execute(&mut store).await?; - let duration = start_time.elapsed(); - - println!(); - println!("šŸŽÆ Workflow completed!"); - println!("======================"); - println!(" Success: {}", result.success); - println!(" Steps executed: {}", result.steps_executed); - println!(" Final action: {}", result.final_action); - println!(" Last node: {}", result.last_node_id); - println!(" Execution path length: {}", result.execution_path.len()); - println!(" Duration: {duration:?}"); - println!(); - - // Show some final statistics from the store - println!("šŸ” Final store contents:"); - if let Ok(Some(main_count)) = store.get::("main_counter_count") { - println!(" Main counter final value: {main_count}"); - } - if let Ok(Some(secondary_count)) = store.get::("secondary_counter_count") { - println!(" Secondary counter final value: {secondary_count}"); - } - - println!("\nšŸ’” Key custom node features demonstrated:"); - println!(" - Stateful nodes that maintain internal state"); - println!(" - Complex preparation logic that reads from shared store"); - println!(" - Business logic validation in exec phase"); - println!(" - Rich post-processing that updates multiple store keys"); - println!(" - Conditional routing based on node state"); - println!(" - Data analysis and reporting capabilities"); - - Ok(()) -} diff --git a/examples/hello_world.rs b/examples/hello_world.rs deleted file mode 100644 index 70bcbad..0000000 --- a/examples/hello_world.rs +++ /dev/null @@ -1,299 +0,0 @@ -//! Hello World Example - CosmoFlow Minimal Features -//! -//! This example demonstrates the simplest possible CosmoFlow workflow using only -//! core features. It implements a basic greeting workflow that shows: -//! -//! ## Workflow Behavior -//! - **Hello Node**: Displays a greeting message and stores it in shared storage -//! - **Response Node**: Reads the greeting from storage and responds to it -//! - **Simple Communication**: Data flows between nodes via the shared store -//! -//! ## Core Features Demonstrated -//! - **Custom Storage Backend**: Complete implementation of SharedStore trait -//! - **Basic Node Implementation**: Simple prep, exec, and post phases -//! - **Minimal Dependencies**: Uses only CosmoFlow's core without built-ins -//! - **Data Communication**: Nodes sharing data via the SharedStore -//! - **Sequential Execution**: Linear workflow with simple routing -//! -//! ## Execution Flow -//! 1. Hello node generates and displays a greeting message -//! 2. Greeting is stored in the shared storage -//! 3. Response node retrieves the greeting from storage -//! 4. Response node generates and displays a response message -//! 5. Workflow completes successfully -//! -//! This is the perfect starting point for understanding CosmoFlow's core concepts. -//! -//! To run this example: -//! ```bash -//! cd examples && cargo run --bin hello_world --features minimal -//! ``` - -use async_trait::async_trait; -use cosmoflow::{ - action::Action, - flow::{FlowBackend, FlowBuilder}, - node::{ExecutionContext, Node, NodeError}, - shared_store::SharedStore, -}; -use serde::{Serialize, de::DeserializeOwned}; -use std::collections::HashMap; - -/// A simple in-memory storage implementation -#[derive(Debug, Clone)] -pub struct SimpleStorage { - data: HashMap, -} - -impl SimpleStorage { - pub fn new() -> Self { - Self { - data: HashMap::new(), - } - } -} - -impl Default for SimpleStorage { - fn default() -> Self { - Self::new() - } -} - -impl SharedStore for SimpleStorage { - type Error = SimpleStorageError; - - fn get(&self, key: &str) -> Result, Self::Error> { - match self.data.get(key) { - Some(value) => { - let deserialized = serde_json::from_value(value.clone()) - .map_err(|e| SimpleStorageError::DeserializationError(e.to_string()))?; - Ok(Some(deserialized)) - } - None => Ok(None), - } - } - - fn set(&mut self, key: String, value: T) -> Result<(), Self::Error> { - let json_value = serde_json::to_value(value) - .map_err(|e| SimpleStorageError::SerializationError(e.to_string()))?; - self.data.insert(key, json_value); - Ok(()) - } - - fn remove(&mut self, key: &str) -> Result, Self::Error> { - match self.data.remove(key) { - Some(value) => { - let deserialized = serde_json::from_value(value) - .map_err(|e| SimpleStorageError::DeserializationError(e.to_string()))?; - Ok(Some(deserialized)) - } - None => Ok(None), - } - } - - fn contains_key(&self, key: &str) -> Result { - Ok(self.data.contains_key(key)) - } - - fn keys(&self) -> Result, Self::Error> { - Ok(self.data.keys().cloned().collect()) - } - - fn clear(&mut self) -> Result<(), Self::Error> { - self.data.clear(); - Ok(()) - } - - fn len(&self) -> Result { - Ok(self.data.len()) - } -} - -#[derive(Debug, thiserror::Error)] -pub enum SimpleStorageError { - #[error("Serialization error: {0}")] - SerializationError(String), - #[error("Deserialization error: {0}")] - DeserializationError(String), -} - -/// A simple greeting node -struct HelloNode { - message: String, -} - -impl HelloNode { - fn new(message: impl Into) -> Self { - Self { - message: message.into(), - } - } -} - -#[async_trait] -impl Node for HelloNode { - type PrepResult = String; - type ExecResult = String; - type Error = NodeError; - - async fn prep( - &mut self, - _store: &SimpleStorage, - _context: &ExecutionContext, - ) -> Result { - Ok(self.message.clone()) - } - - async fn exec( - &mut self, - prep_result: Self::PrepResult, - _context: &ExecutionContext, - ) -> Result { - let output = format!("🌟 {prep_result}"); - println!("{}", output); - Ok(output) - } - - async fn post( - &mut self, - store: &mut SimpleStorage, - _prep_result: Self::PrepResult, - exec_result: Self::ExecResult, - _context: &ExecutionContext, - ) -> Result { - // Store the greeting - store - .set("greeting".to_string(), exec_result) - .map_err(|e| NodeError::StorageError(e.to_string()))?; - - Ok(Action::simple("next")) - } - - fn name(&self) -> &str { - "HelloNode" - } -} - -/// A simple response node -struct ResponseNode; - -#[async_trait] -impl Node for ResponseNode { - type PrepResult = Option; - type ExecResult = String; - type Error = NodeError; - - async fn prep( - &mut self, - store: &SimpleStorage, - _context: &ExecutionContext, - ) -> Result { - match store.get("greeting") { - Ok(Some(greeting)) => Ok(Some(greeting)), - _ => Ok(None), - } - } - - async fn exec( - &mut self, - prep_result: Self::PrepResult, - _context: &ExecutionContext, - ) -> Result { - let response = match prep_result { - Some(greeting) => format!("✨ I received: '{greeting}'"), - None => "āš ļø No greeting received".to_string(), - }; - - println!("{}", response); - Ok(response) - } - - async fn post( - &mut self, - store: &mut SimpleStorage, - _prep_result: Self::PrepResult, - exec_result: Self::ExecResult, - _context: &ExecutionContext, - ) -> Result { - // Store the response - store - .set("response".to_string(), exec_result) - .map_err(|e| NodeError::StorageError(e.to_string()))?; - - Ok(Action::simple("complete")) - } - - fn name(&self) -> &str { - "ResponseNode" - } -} - -#[tokio::main] -async fn main() -> Result<(), Box> { - println!("šŸš€ Starting Hello World Example"); - println!("================================="); - println!("This example uses only CosmoFlow's core features."); - println!(); - - // Create our custom storage - let mut storage = SimpleStorage::new(); - - // Build a simple workflow - let mut flow = FlowBuilder::new() - .start_node("hello") - .node("hello", HelloNode::new("Hello from CosmoFlow minimal!")) - .node("response", ResponseNode) - .route("hello", "next", "response") - .terminal_route("response", "complete") // Explicit termination - .build(); - - println!("šŸ“‹ Flow configuration:"); - println!(" Start node: {}", flow.config().start_node_id); - println!(" Max steps: {}", flow.config().max_steps); - println!(); - - // Validate the flow - if let Err(e) = flow.validate() { - eprintln!("āŒ Flow validation failed: {e}"); - return Err(e.into()); - } - println!("āœ… Flow validation passed"); - println!(); - - // Execute the workflow - println!("⚔ Executing workflow..."); - println!("------------------------"); - - let start_time = std::time::Instant::now(); - let result = flow.execute(&mut storage).await?; - let duration = start_time.elapsed(); - - println!(); - println!("šŸŽÆ Workflow completed!"); - println!("======================"); - println!(" Success: {}", result.success); - println!(" Steps executed: {}", result.steps_executed); - println!(" Final action: {}", result.final_action); - println!(" Last node: {}", result.last_node_id); - println!(" Execution path: {:?}", result.execution_path); - println!(" Duration: {duration:?}"); - println!(); - - // Show the final state of our custom storage - println!("šŸ“Š Final storage state:"); - if let Ok(Some(greeting)) = storage.get::("greeting") { - println!(" greeting: {greeting}"); - } - if let Ok(Some(response)) = storage.get::("response") { - println!(" response: {response}"); - } - - println!(); - println!("šŸ’” Key minimal features demonstrated:"); - println!(" - Custom storage backend implementation"); - println!(" - Basic node backend implementation"); - println!(" - Core workflow execution without built-ins"); - println!(" - Manual storage interface usage"); - - Ok(()) -} diff --git a/examples/llm_request.md b/examples/llm_request.md deleted file mode 100644 index 8c25802..0000000 --- a/examples/llm_request.md +++ /dev/null @@ -1,142 +0,0 @@ -# LLM Request Example - -A CosmoFlow example demonstrating real LLM API integration using direct HTTP calls with minimal dependencies. - -## Quick Start - -### 1. Set Environment Variables - -```bash -export LLM_API_KEY="your_api_key" -export LLM_BASE_URL="https://api.openai.com/v1" -export LLM_MODEL="gpt-3.5-turbo" -``` - -### 2. Run the Example - -```bash -cargo run --bin llm_request -p cosmoflow-examples -``` - -## Features - -- 🌐 **Real API Integration** - Makes actual HTTP calls to LLM APIs -- šŸ”§ **Environment Configuration** - Credentials loaded from environment variables -- 🪶 **Lightweight** - Minimal dependencies, no heavy SDK overhead -- šŸŽÆ **Direct Implementation** - Clean code without unnecessary abstractions -- šŸ”„ **Error Handling** - Comprehensive error handling for API failures -- šŸ“‹ **Copyable Patterns** - Reusable code patterns for your own projects - -## Supported Providers - -### OpenAI -```bash -export LLM_API_KEY="sk-your-openai-key" -export LLM_BASE_URL="https://api.openai.com/v1" -export LLM_MODEL="gpt-3.5-turbo" # or gpt-4, gpt-4-turbo, etc. -``` - -### Anthropic Claude -```bash -export LLM_API_KEY="your-anthropic-key" -export LLM_BASE_URL="https://api.anthropic.com/v1" -export LLM_MODEL="claude-3-sonnet-20240229" -``` - -### Local APIs (Ollama, LocalAI) -```bash -export LLM_API_KEY="not-needed" -export LLM_BASE_URL="http://localhost:11434/v1" -export LLM_MODEL="llama2" -``` - -## Architecture - -The example demonstrates a complete CosmoFlow workflow with these components: - -1. **`LlmClient`** - Lightweight HTTP client for API calls -2. **`LlmConfig`** - Configuration management from environment variables -3. **`LlmNode`** - Workflow node that makes actual API calls -4. **Utility Functions** - Request/response handling helpers - -### Workflow Steps - -``` -Start → Setup → LLM API Call → Display Result -``` - -1. **Start**: Initialize workflow with logging -2. **Setup**: Load configuration and prepare prompt -3. **LLM**: Make HTTP request to LLM API -4. **Display**: Show AI response - -## Customization - -### Change the Prompt - -Edit the prompt in `DataSetupNode::post()`: - -```rust -store.set("user_prompt".to_string(), "Your custom prompt").unwrap(); -``` - -### Add Request Parameters - -Extend `create_chat_request()` for additional options: - -```rust -pub fn create_chat_request(model: &str, messages: Vec, temperature: f32) -> Value { - json!({ - "model": model, - "messages": messages, - "temperature": temperature, - "max_tokens": 1000 - }) -} -``` - -### Support New Providers - -1. Adjust request format in `create_chat_request()` -2. Modify response parsing in `extract_content()` -3. Update authentication in `LlmNode` - -## Testing - -```bash -cargo test --bin llm_request -p cosmoflow-examples -``` - -Tests cover: -- Utility function validation -- Configuration loading -- HTTP client construction - -## Troubleshooting - -### Environment Variables Not Set -``` -āŒ Configuration Error: LLM_API_KEY environment variable not set -``` -**Solution**: Export all required environment variables in your shell. - -### API Request Failed -``` -āŒ LLM API request failed: ... -``` -**Solutions**: -- Verify API key is valid and has sufficient credits -- Check base URL is correct for your provider -- Ensure model name is supported - -### Response Parsing Failed -``` -āŒ Failed to extract content from LLM response -``` -**Solution**: The API response format may differ. Check provider documentation and adjust `extract_content()` function. - -## Security - -- āœ… Never commit API keys to version control -- āœ… Use environment variables for credentials -- āœ… API keys are masked in logs (only first 10 chars shown) diff --git a/examples/simple_loops.rs b/examples/simple_loops.rs deleted file mode 100644 index 8eadfaf..0000000 --- a/examples/simple_loops.rs +++ /dev/null @@ -1,142 +0,0 @@ -//! # Simple Loop Example -//! -//! This demonstrates how to create loops using just the existing flow design -//! without any special loop constructs - just nodes, routes, and conditions. - -use async_trait::async_trait; -use cosmoflow::prelude::*; - -/// A simple counter node that can loop back to itself -struct SimpleCounterNode { - name: String, - key: String, - increment: i32, - max_count: i32, -} - -impl SimpleCounterNode { - fn new(name: &str, key: &str, increment: i32, max_count: i32) -> Self { - Self { - name: name.to_string(), - key: key.to_string(), - increment, - max_count, - } - } -} - -#[async_trait] -impl Node for SimpleCounterNode { - type PrepResult = (); - type ExecResult = (); - type Error = NodeError; - - fn name(&self) -> &str { - &self.name - } - - async fn prep( - &mut self, - _store: &MemoryStorage, - _context: &ExecutionContext, - ) -> Result<(), Self::Error> { - Ok(()) - } - - async fn exec( - &mut self, - _prep_result: (), - _context: &ExecutionContext, - ) -> Result<(), Self::Error> { - Ok(()) - } - - async fn post( - &mut self, - store: &mut MemoryStorage, - _prep_result: (), - _exec_result: (), - _context: &ExecutionContext, - ) -> Result { - // Get current value - let current: i32 = store - .get(&self.key) - .map_err(|e| NodeError::from(e.to_string()))? - .unwrap_or(0); - let new_value = current + self.increment; - - // Store new value - store - .set(self.key.clone(), new_value) - .map_err(|e| NodeError::from(e.to_string()))?; - - println!("šŸ“Š {}: {} -> {}", self.key, current, new_value); - - // Simple condition: continue if below max, exit if reached - if new_value < self.max_count { - Ok(Action::simple("continue")) // This will route back to self - } else { - Ok(Action::simple("done")) // This will exit the loop - } - } -} - -#[tokio::main] -async fn main() -> Result<(), Box> { - println!("šŸ”„ Simple Loop Example"); - println!("======================\n"); - - let mut store = MemoryStorage::new(); - - // Example 1: Simple count loop - just route back to self! - println!("šŸ”¢ Count Loop (increment by 2 until >= 10)"); - println!("-------------------------------------------"); - - let mut flow = FlowBuilder::new() - .start_node("counter") - .max_steps(100) // Allow intentional loops - .node("counter", SimpleCounterNode::new("counter", "count", 2, 10)) - .self_route("counter", "continue") // Loop back to self - more explicit! - .terminal_route("counter", "done") // Exit when done - .build(); - - let result = flow.execute(&mut store).await?; - println!("āœ… Loop completed in {} steps", result.steps_executed); - - let final_count: i32 = store.get("count")?.unwrap_or(0); - println!("šŸ“‹ Final count: {final_count}\n"); - - // Example 2: Two-node loop - println!("šŸ”„ Two-Node Loop"); - println!("----------------"); - - // Reset counter - store.set("count2".to_string(), 0)?; - - let mut flow2 = FlowBuilder::new() - .start_node("node_a") - .max_steps(100) - .node("node_a", SimpleCounterNode::new("node_a", "count2", 1, 5)) - .node("node_b", SimpleCounterNode::new("node_b", "count2", 2, 5)) - .route("node_a", "continue", "node_b") // A -> B - .route("node_b", "continue", "node_a") // B -> A (creates loop) - .terminal_route("node_a", "done") // Exit from A - .terminal_route("node_b", "done") // Exit from B - .build(); - - let result2 = flow2.execute(&mut store).await?; - println!( - "āœ… Two-node loop completed in {} steps", - result2.steps_executed - ); - - let final_count2: i32 = store.get("count2")?.unwrap_or(0); - println!("šŸ“‹ Final count: {final_count2}"); - - println!("\nšŸŽÆ Key Insight: Loops are just routes + conditions!"); - println!("šŸ’” No special loop constructs needed - the existing"); - println!(" flow design already supports all loop patterns."); - println!("✨ New .self_route() method makes intent clearer!"); - - Ok(()) -}