diff --git a/misc/block-time-analyzer/BlockTimeTrap.sol b/misc/block-time-analyzer/BlockTimeTrap.sol new file mode 100644 index 0000000..acb9ac1 --- /dev/null +++ b/misc/block-time-analyzer/BlockTimeTrap.sol @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.20; + +import "contracts/interfaces/ITrap.sol"; + +contract BlockTimeTrap is ITrap { + struct CollectOutput { + uint256 blockTimestamp; + } + + function collect() external view returns (bytes memory) { + return abi.encode(CollectOutput({blockTimestamp: block.timestamp})); + } + + function shouldRespond(bytes[] calldata data) external pure returns (bool, bytes memory) { + if (data.length < 2) { + return (false, bytes("")); + } + + CollectOutput memory recent = abi.decode(data[0], (CollectOutput)); + CollectOutput memory older = abi.decode(data[1], (CollectOutput)); + + uint256 timeDifference = recent.blockTimestamp - older.blockTimestamp; + + // Trigger if the time between blocks is unusually long (e.g., > 30 seconds) + if (timeDifference > 30) { + return (true, abi.encode("High latency detected between blocks!")); + } + + return (false, bytes("")); + } +} diff --git a/misc/block-time-analyzer/EventLogger.sol b/misc/block-time-analyzer/EventLogger.sol new file mode 100644 index 0000000..4b03338 --- /dev/null +++ b/misc/block-time-analyzer/EventLogger.sol @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.20; + +contract EventLogger { + address public owner; + uint256 public eventCount; + + event Sighting(uint256 indexed eventId, string message, uint256 timestamp); + + constructor() { + owner = msg.sender; + } + + function recordSighting(string memory message) external { + eventCount++; + emit Sighting(eventCount, message, block.timestamp); + } +} diff --git a/misc/block-time-analyzer/README.md b/misc/block-time-analyzer/README.md new file mode 100644 index 0000000..f181485 --- /dev/null +++ b/misc/block-time-analyzer/README.md @@ -0,0 +1,26 @@ +# Block Time Analyzer Trap + +This trap provides a real-time monitoring solution for blockchain network health by analyzing the time between consecutive blocks. It serves as a practical and effective example of **Rate of Change Analysis** using Drosera. + +## What It Does + +The primary function of this trap is to detect potential network-level issues, such as heavy congestion, validator problems, or early signs of a chain liveness failure. It acts as a decentralized watchdog for the blockchain's pulse. + +## How It Works + +The trap's logic is straightforward and reliable, utilizing the most fundamental data on the blockchain: + +- **`BlockTimeTrap.sol`**: The main trap contract. + - **Collects:** The `block.timestamp` of every new block as it is produced. + - **Analyzes:** It calculates the time difference (delta) between the timestamp of the most recent block and the one immediately preceding it. + - **Responds:** If the time difference exceeds a predefined threshold (e.g., 30 seconds), the trap's `shouldRespond` function returns `true`, signaling that an incident has occurred. + +- **`EventLogger.sol`**: A simple responder contract. When the trap is triggered, the Drosera network calls the `recordSighting` function on this contract, creating a permanent, on-chain log of the high-latency event. + +## Real-World Use Case + +This trap is highly useful for any protocol, staking provider, or infrastructure service that depends on the consistent and timely production of blocks. + +- **Infrastructure Monitoring:** A DevOps team for a large exchange or dApp could use this trap to get immediate, decentralized alerts about network instability, allowing them to react before their users are impacted. +- **Staking Services:** A liquid staking protocol could use this trap to monitor for validator downtime or network partitions that could affect staking rewards or network security. +- **Automated Alerting:** The on-chain event from the `EventLogger` can be easily indexed by services like The Graph or integrated with off-chain tools like Zapier to trigger alerts in Slack, Telegram, or PagerDuty, creating a fully automated monitoring and alerting pipeline.