diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..3b41682 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +/mvnw text eol=lf +*.cmd text eol=crlf diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..b0f0506 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,382 @@ +name: CI/CD Pipeline + +on: + push: + branches: [ main ] + tags: [ 'v*' ] + pull_request: + branches: [ main ] + +permissions: + contents: write + actions: read + checks: write + security-events: write + pull-requests: write + +env: + JAVA_VERSION: '17' + MAVEN_OPTS: > + -Dmaven.repo.local=.m2/repository + -Xmx2048m + -XX:+UseG1GC + -Daether.connector.http.connectionMaxTtl=25 + -Dmaven.artifact.threads=8 + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + validate: + name: Code Quality & Security + runs-on: ubuntu-latest + timeout-minutes: 20 + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Setup JDK + uses: actions/setup-java@v4 + with: + java-version: ${{ env.JAVA_VERSION }} + distribution: 'temurin' + + - name: Validate Project + uses: nick-fields/retry@v3 + with: + timeout_minutes: 10 + max_attempts: 2 + retry_wait_seconds: 30 + command: mvn clean validate compile -B --no-transfer-progress -DskipTests + + - name: Verify Dependencies + run: | + mvn dependency:analyze -B --no-transfer-progress -Pquality + mvn dependency:tree -B --no-transfer-progress | tee dependency-tree.txt + + - name: Code Style Check (Spotless) + run: mvn spotless:check -B --no-transfer-progress + + - name: Security Analysis (SpotBugs) + run: mvn spotbugs:check -B --no-transfer-progress + + - name: Upload Dependency Tree + uses: actions/upload-artifact@v4 + with: + name: dependency-analysis + path: dependency-tree.txt + retention-days: 7 + + test: + name: Test Suite (Java ${{ matrix.java }}) + runs-on: ubuntu-latest + needs: validate + timeout-minutes: 25 + strategy: + fail-fast: false + matrix: + java: [ 17, 21, 22 ] + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup JDK ${{ matrix.java }} + uses: actions/setup-java@v4 + with: + java-version: ${{ matrix.java }} + distribution: 'temurin' + + - name: Run Tests & Coverage Check + uses: nick-fields/retry@v3 + with: + timeout_minutes: 20 + max_attempts: 3 + retry_wait_seconds: 30 + command: mvn clean verify -B --no-transfer-progress -Djava.version=${{ matrix.java }} + + - name: List test report files + if: always() && matrix.java == 17 + run: | + echo "Surefire reports:" + ls -l target/surefire-reports || true + echo "Failsafe reports:" + ls -l target/failsafe-reports || true + echo "JaCoCo reports:" + ls -l target/site/jacoco* || true + + - name: Upload coverage reports to Codecov + if: matrix.java == 17 + uses: codecov/codecov-action@v5 + with: + files: target/site/jacoco-merged/jacoco.xml,target/site/jacoco/jacoco.xml + token: ${{ secrets.CODECOV_TOKEN }} + fail_ci_if_error: false + + - name: Upload Test Results + uses: dorny/test-reporter@v1 + if: always() && matrix.java == 17 + with: + name: Test Results (Java ${{ matrix.java }}) + path: target/surefire-reports/TEST-*.xml,target/failsafe-reports/TEST-*.xml + reporter: java-junit + fail-on-error: false + + build: + name: Build & Package + runs-on: ubuntu-latest + needs: [validate, test] + timeout-minutes: 15 + outputs: + version: ${{ steps.version.outputs.version }} + is-snapshot: ${{ steps.version.outputs.is-snapshot }} + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup JDK + uses: actions/setup-java@v4 + with: + java-version: ${{ env.JAVA_VERSION }} + distribution: 'temurin' + + - name: Extract Version + id: version + run: | + VERSION=$(mvn help:evaluate -Dexpression=project.version -q -DforceStdout) + echo "version=$VERSION" >> $GITHUB_OUTPUT + echo "is-snapshot=$(echo $VERSION | grep -q SNAPSHOT && echo true || echo false)" >> $GITHUB_OUTPUT + + - name: Build Package + uses: nick-fields/retry@v3 + with: + timeout_minutes: 10 + max_attempts: 2 + retry_wait_seconds: 30 + command: mvn clean package -B --no-transfer-progress -DskipTests + + - name: Generate Documentation + run: mvn javadoc:javadoc -B --no-transfer-progress + + - name: Verify JAR Structure + run: | + jar -tf target/logdash-*.jar | head -20 + echo "JAR size: $(du -h target/logdash-*.jar | cut -f1)" + + - name: Upload Build Artifacts + uses: actions/upload-artifact@v4 + with: + name: build-artifacts-${{ steps.version.outputs.version }} + path: | + target/*-${{ steps.version.outputs.version }}.jar + !target/*-tests.jar + !target/original-*.jar + target/site/apidocs/** + retention-days: ${{ steps.version.outputs.is-snapshot == 'true' && 7 || 90 }} + + security-scan: + name: Security Scanning + runs-on: ubuntu-latest + needs: validate + timeout-minutes: 15 + if: > + (github.event_name == 'push' && github.ref == 'refs/heads/main') || + (github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'security-scan')) || + startsWith(github.ref, 'refs/tags/v') + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup JDK + uses: actions/setup-java@v4 + with: + java-version: ${{ env.JAVA_VERSION }} + distribution: 'temurin' + + - name: Build for Security Scan + run: mvn clean compile -B --no-transfer-progress -DskipTests + + - name: Run Trivy Scan + uses: aquasecurity/trivy-action@master + with: + scan-type: 'fs' + scan-ref: '.' + format: 'sarif' + output: 'trivy-results.sarif' + + - name: Upload SARIF + uses: github/codeql-action/upload-sarif@v3 + if: always() + with: + sarif_file: 'trivy-results.sarif' + continue-on-error: true + + release: + name: Release to GitHub Packages + runs-on: ubuntu-latest + needs: [build, security-scan] + timeout-minutes: 15 + if: startsWith(github.ref, 'refs/tags/v') + + permissions: + contents: write + packages: write + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup JDK + uses: actions/setup-java@v4 + with: + java-version: ${{ env.JAVA_VERSION }} + distribution: 'temurin' + + - name: Configure Maven Settings + uses: whelk-io/maven-settings-xml-action@v22 + with: + servers: | + [ + { + "id": "github", + "username": "${env.GITHUB_ACTOR}", + "password": "${env.GITHUB_TOKEN}" + } + ] + + - name: Extract Version Info + id: version + run: | + VERSION=$(mvn help:evaluate -Dexpression=project.version -q -DforceStdout) + echo "version=$VERSION" >> $GITHUB_OUTPUT + echo "tag=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT + + # Validate version matches tag + if [[ "v$VERSION" != "${GITHUB_REF#refs/tags/}" ]]; then + echo "::error::Version mismatch: POM version ($VERSION) doesn't match tag (${GITHUB_REF#refs/tags/})" + exit 1 + fi + + - name: Build and Deploy to GitHub Packages + run: | + mvn clean deploy -B --no-transfer-progress \ + -Prelease \ + -DperformRelease=true \ + -DskipTests=true + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Download Build Artifacts + uses: actions/download-artifact@v4 + with: + pattern: build-artifacts-* + merge-multiple: true + + - name: Prepare Release Assets + run: | + mkdir -p release-assets + cp target/logdash-${{ steps.version.outputs.version }}.jar release-assets/ + cp target/logdash-${{ steps.version.outputs.version }}-sources.jar release-assets/ + cp target/logdash-${{ steps.version.outputs.version }}-javadoc.jar release-assets/ + + # Create checksums + cd release-assets + sha256sum *.jar > checksums.sha256 + cd .. + + - name: Create GitHub Release + uses: softprops/action-gh-release@v2 + with: + files: | + release-assets/*.jar + release-assets/checksums.sha256 + generate_release_notes: true + draft: false + body: | + # 🚀 Logdash Java SDK + + Official Java SDK for [Logdash.io](https://logdash.io) – Zero-configuration observability for developers. + + ## ✨ Features + + - **Zero Configuration**: Start logging and tracking metrics in seconds + - **Real-time Dashboard**: Cloud-hosted interface with live data updates + - **Structured Logging**: Multiple log levels with rich context support + - **Custom Metrics**: Track counters, gauges, and business metrics + - **Async & Fast**: Non-blocking, production-ready, framework-agnostic + - **Java 17+ Compatible**: Works with Java 17, 21, 22+ + + ## 📦 Installation + + **Maven** + ```xml + + io.logdash + logdash + ${{ steps.version.outputs.version }} + + ``` + + **Gradle** + ```gradle + implementation 'io.logdash:logdash:${{ steps.version.outputs.version }}' + ``` + + **Direct JAR Download:** + [Download from GitHub Releases](https://github.com/logdash-io/java-sdk/releases) + + ## 🏁 Quick Start + + ```java + var logdash = Logdash.builder() + .apiKey("your-api-key") + .build(); + + logdash.logger().info("Application started"); + logdash.metrics().mutate("app_starts", 1); + ``` + + ## ⚙️ Requirements + + - Java 17 or higher + - Internet connection + - Logdash API key ([get yours](https://logdash.io)) + + ## 📖 Documentation & Support + + - [Full Documentation](https://logdash.io/docs) + - [Live Demo](https://logdash.io/demo-dashboard) + - [GitHub Issues](https://github.com/logdash-io/java-sdk/issues) + - [Discord Community](https://discord.gg/naftPW4Hxe) + - [Email Support](mailto:contact@logdash.io) + + --- + + _See below for full release notes and change log._ + + - name: Notify Deployment Success + run: | + echo "✅ Successfully deployed logdash:${{ steps.version.outputs.version }} to GitHub Packages" + echo "📦 Available at: https://github.com/logdash-io/java-sdk/packages" + + notification: + name: Notification + runs-on: ubuntu-latest + needs: [validate, test, build, security-scan] + if: always() && github.ref == 'refs/heads/main' + + steps: + - name: Check Workflow Status + run: | + if [[ "${{ needs.validate.result }}" == "failure" || "${{ needs.test.result }}" == "failure" || "${{ needs.build.result }}" == "failure" ]]; then + echo "❌ Workflow failed on main branch" + echo "::error::Critical job failed on main branch" + else + echo "✅ Workflow completed successfully" + fi diff --git a/.mvn/wrapper/maven-wrapper.properties b/.mvn/wrapper/maven-wrapper.properties new file mode 100644 index 0000000..d58dfb7 --- /dev/null +++ b/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +wrapperVersion=3.3.2 +distributionType=only-script +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.9/apache-maven-3.9.9-bin.zip diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..e4be4e1 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 logdash.io + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/README.md b/README.md index 0739af0..d4fa52b 100644 --- a/README.md +++ b/README.md @@ -1 +1,436 @@ # Logdash Java SDK + +[![GitHub Release](https://img.shields.io/github/v/release/logdash-io/java-sdk)](https://github.com/logdash-io/java-sdk/releases) +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) +[![Java Version](https://img.shields.io/badge/Java-17+-blue.svg)](https://openjdk.java.net/projects/jdk/17/) +[![CI/CD](https://github.com/logdash-io/java-sdk/workflows/CI/CD%20Pipeline/badge.svg)](https://github.com/logdash-io/java-sdk/actions) + +Official Java SDK for [Logdash.io](https://logdash.io/) - Zero-configuration observability platform designed for developers working on side projects and prototypes. + +## Why Logdash? + +Most observability solutions feel overwhelming for hobby projects. Logdash provides instant logging and real-time +metrics without complex configurations. Just add the SDK and start monitoring your application immediately. + +## Features + +- **🚀 Zero Configuration**: Start logging and tracking metrics in seconds +- **📊 Real-time Dashboard**: Cloud-hosted interface with live data updates +- **📝 Structured Logging**: Multiple log levels with rich context support +- **📈 Custom Metrics**: Track counters, gauges, and business metrics effortlessly +- **⚡ Asynchronous**: Non-blocking operations with automatic resource management +- **🛡️ Production Ready**: Built with enterprise-grade patterns and error handling +- **🔧 Framework Agnostic**: Works with Spring Boot, Quarkus, Micronaut, or standalone apps +- **☕ Java 17+ Compatible**: Supports Java 17, 21, and all newer versions + +## Quick Start + +### Installation + +**Option 1: Download from GitHub Releases (Recommended)** + +1. Download the latest JAR from [GitHub Releases](https://github.com/logdash-io/java-sdk/releases) +2. Add to your project classpath + +**Option 2: GitHub Packages (Maven/Gradle)** + +Add GitHub Packages repository to your build configuration: + +**Maven:** + +```xml + + + github + https://maven.pkg.github.com/logdash-io/java-sdk + + + + + io.logdash + logdash + 0.1.0 + +``` + +**Gradle:** + +```gradle +repositories { + maven { + url = uri("https://maven.pkg.github.com/logdash-io/java-sdk") + } +} + +dependencies { + implementation 'io.logdash:logdash:0.1.0' +} +``` + +**Option 3: Local Installation** + +```bash +# Clone and install locally +git clone https://github.com/logdash-io/java-sdk.git +cd java-sdk +mvn clean install +``` + +Then use in your project: + +```xml + + io.logdash + logdash + 0.1.0 + +``` + +> **Note:** Maven Central publication is planned for future releases. For now, use GitHub Releases or GitHub Packages. + +### Basic Usage + +```java +import io.logdash.sdk.Logdash; +import java.util.Map; + +public class Application { + public static void main(String[] args) { + // Initialize with your API key from https://logdash.io + var logdash = Logdash.builder() + .apiKey("your-api-key-here") + .build(); + + var logger = logdash.logger(); + var metrics = logdash.metrics(); + + // Start logging and tracking metrics immediately + logger.info("Application started"); + metrics.mutate("app_starts", 1); + + // Your application logic here + processUserRequest(logger, metrics); + + // Graceful shutdown (optional) + logdash.flush(); + logdash.close(); + } + + private static void processUserRequest(var logger, var metrics) { + logger.info("Processing user request", Map.of( + "userId", 12345, + "endpoint", "/api/users", + "method", "GET" + )); + + metrics.set("active_users", 150); + metrics.mutate("api_requests", 1); + metrics.mutate("response_time_ms", -25); + } +} +``` + +### Try-with-resources Pattern + +```java +public class Application { + public static void main(String[] args) { + try (var logdash = Logdash.create("your-api-key")) { + logdash.logger().info("Application started"); + logdash.metrics().mutate("app.starts", 1); + + // Your application code here + + // Automatic cleanup on close + } + } +} +``` + +## Framework Integration + +### Spring Boot + +**Configuration:** + +```yaml +# application.yml +logdash: + api-key: ${LOGDASH_API_KEY:your-development-key} + enable-console-output: false +``` + +**Integration:** + +```java +@SpringBootApplication +public class Application { + + @Value("${logdash.api-key}") + private String logdashApiKey; + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + + @Bean + @ConditionalOnProperty(name = "logdash.api-key") + public Logdash logdash() { + return Logdash.builder() + .apiKey(logdashApiKey) + .enableConsoleOutput(false) // Use Spring's logging + .build(); + } +} + +@RestController +public class UserController { + + private final LogdashLogger logger; + private final LogdashMetrics metrics; + + public UserController(Logdash logdash) { + this.logger = logdash.logger(); + this.metrics = logdash.metrics(); + } + + @GetMapping("/users/{id}") + public User getUser(@PathVariable Long id) { + logger.info("Fetching user", Map.of("userId", id)); + metrics.mutate("user.fetch.requests", 1); + + var user = userService.findById(id); + + logger.debug("User retrieved", Map.of( + "userId", id, + "username", user.getUsername() + )); + + return user; + } +} +``` + +### Standalone Applications + +```java +public class StandaloneApp { + public static void main(String[] args) { + var logdash = Logdash.builder() + .apiKey(System.getenv("LOGDASH_API_KEY")) + .build(); + + var logger = logdash.logger(); + var metrics = logdash.metrics(); + + // Add shutdown hook for graceful cleanup + Runtime.getRuntime().addShutdownHook(new Thread(() -> { + logger.info("Application shutting down"); + logdash.close(); + })); + + // Your application logic + runApplication(logger, metrics); + } +} +``` + +## Logging + +### Log Levels & Context + +```java +var logger = logdash.logger(); + +// Simple messages +logger.error("Database connection failed"); +logger.warn("High memory usage detected"); +logger.info("User session started"); +logger.http("GET /api/health - 200 OK"); +logger.verbose("Loading user preferences"); +logger.debug("Cache hit for key: user_123"); +logger.silly("Detailed trace information"); + +// Structured logging with context +logger.info( + "Payment processed", + Map.of( + "userId", 12345, + "amount", 29.99, + "currency", "USD", + "paymentMethod", "credit_card", + "transactionId", "txn_abc123" + ) +); + +logger.error( + "API call failed", + Map.of( + "endpoint", "https://external-api.com/data", + "statusCode", 503, + "retryAttempt", 3, + "errorMessage", "Service temporarily unavailable" + ) +); +``` + +## Metrics + +Track business and technical metrics to monitor application performance: + +```java +var metrics = logdash.metrics(); + +// Counters - track events and occurrences +metrics.mutate("page_views",1); +metrics.mutate("api_requests",5); +metrics.mutate("response_time_ms",-100); +metrics.mutate("available_licenses",-1); + +// Gauges - track current values +metrics.set("active_users", 1_250); +metrics.set("memory_usage_mb", 512.5); +metrics.set("queue_size", 0); +``` + +## Configuration + +### Development Configuration + +```java +Logdash logdash = Logdash.builder() + .apiKey("your-api-key") + .enableConsoleOutput(true) // See logs locally + .enableVerboseLogging(true) // Debug SDK behavior + .build(); +``` + +### Production Configuration + +```java +Logdash logdash = Logdash.builder() + .apiKey(System.getenv("LOGDASH_API_KEY")) + .enableConsoleOutput(false) // Use your app's logging + .enableVerboseLogging(false) // Disable debug output + .requestTimeoutMs(10000) // 10s timeout + .maxRetries(5) // More retries in prod + .maxConcurrentRequests(20) // Higher concurrency + .build(); +``` + +### All Configuration Options + +| Option | Default | Description | +|-------------------------|--------------------------|----------------------------------| +| `apiKey` | `null` | Your Logdash API key (required) | +| `baseUrl` | `https://api.logdash.io` | Logdash API endpoint | +| `enableConsoleOutput` | `true` | Show logs/metrics in console | +| `enableVerboseLogging` | `false` | Enable SDK debug output | +| `requestTimeoutMs` | `15000` | HTTP request timeout | +| `maxRetries` | `3` | Failed request retry attempts | +| `retryDelayMs` | `500` | Base delay between retries | +| `maxConcurrentRequests` | `10` | Maximum concurrent HTTP requests | +| `shutdownTimeoutMs` | `10000` | Graceful shutdown timeout | + +## Performance Considerations + +- **Initialize once** at application startup and reuse the instance +- **Use structured logging** for better searchability and performance +- **Avoid logging sensitive data** (passwords, tokens, PII) +- **The SDK is async** - logging calls return immediately without blocking +- **Memory efficient** - automatic batching and resource management + +## Getting Your API Key + +1. Visit [Logdash.io](https://logdash.io) +2. Create a free account +3. Generate your API key in the dashboard +4. View live demo at [demo-dashboard](https://logdash.io/demo-dashboard) + +## Examples + +Check out the [`examples/`](./examples) directory for a complete sample application demonstrating: + +- Basic SDK usage patterns +- Structured logging examples +- Metrics tracking patterns +- Error handling strategies +- Framework integration examples + +## Troubleshooting + +### Common Issues + +**Logs not appearing in dashboard:** + +- Verify your API key is correct and active +- Check network connectivity to `api.logdash.io` +- Enable verbose logging to see HTTP requests: `.enableVerboseLogging(true)` + +**High latency or timeouts:** + +- Increase `requestTimeoutMs` for slower networks +- Reduce `maxConcurrentRequests` to limit resource usage +- Check your network connection stability + +**Missing logs in production:** + +- Ensure SDK is initialized before first log call +- Verify API key environment variable is set +- Check firewall settings for outbound HTTPS (port 443) + +## Requirements + +- **Java 17 or higher** (tested on Java 17, 21, 22) +- **Internet connection** for sending data to Logdash +- **Valid API key** from [Logdash.io](https://logdash.io) + +## Contributing + +We welcome contributions! Here's how to get started: + +### Development Setup + +```bash +git clone https://github.com/logdash-io/java-sdk.git +cd java-sdk +mvn clean compile +mvn test +``` + +### Running Tests + +```bash +# Unit tests +mvn test + +# Integration tests +mvn failsafe:integration-test + +# All tests with coverage +mvn clean verify +``` + +### Code Quality + +- Follow existing code style (checkstyle configuration included) +- Ensure all tests pass +- Add tests for new features +- Update documentation as needed + +## License + +This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. + +## Support & Resources + +- 📖 **Documentation**: [https://logdash.io/docs](https://logdash.io/docs) +- 🚀 **Live Demo**: [https://logdash.io/demo-dashboard](https://logdash.io/demo-dashboard) +- 🐛 **Issues**: [GitHub Issues](https://github.com/logdash-io/java-sdk/issues) +- 💬 **Community**: [Discord Server](https://discord.gg/naftPW4Hxe) +- 📧 **Support**: [contact@logdash.io](mailto:contact@logdash.io) +- 🔗 **Main Repository**: [https://github.com/logdash-io/](https://github.com/logdash-io/) + +--- + +**Made with ❤️ for developers who want simple, effective observability without the complexity.** diff --git a/examples/example-simple-java/pom.xml b/examples/example-simple-java/pom.xml new file mode 100644 index 0000000..ea3d58d --- /dev/null +++ b/examples/example-simple-java/pom.xml @@ -0,0 +1,56 @@ + + + 4.0.0 + + com.example + logdash-example + 0.1.0 + jar + + Logdash Example Application + Example application demonstrating Logdash Java SDK usage + + + 21 + 21 + UTF-8 + 21 + + 0.1.0 + 3.12.1 + 3.1.1 + + + + + io.logdash + logdash + ${logdash-sdk.version} + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + ${maven-compiler-plugin.version} + + ${maven.compiler.release} + + -parameters + + + + + + org.codehaus.mojo + exec-maven-plugin + ${maven-exec-plugin.version} + + com.example.ExampleApp + + + + + diff --git a/examples/example-simple-java/src/main/java/com/example/ExampleApp.java b/examples/example-simple-java/src/main/java/com/example/ExampleApp.java new file mode 100644 index 0000000..8d88965 --- /dev/null +++ b/examples/example-simple-java/src/main/java/com/example/ExampleApp.java @@ -0,0 +1,56 @@ +package com.example; + +import io.logdash.sdk.Logdash; + +import java.util.Map; +import java.util.Random; +import java.util.concurrent.TimeUnit; + +public class ExampleApp { + + public static final String METRIC_ACTIVE_USERS = "active_users"; + public static final String METRIC_API_REQUESTS = "api_requests"; + + public static void main(String[] args) { + var random = new Random(); + Logdash logdash = Logdash.builder() + .apiKey("your-api-key") + .baseUrl("https://api.logdash.io") + .enableConsoleOutput(true) + .enableVerboseLogging(false) + .requestTimeoutMs(10000) + .maxConcurrentRequests(20) + .shutdownTimeoutMs(15000) + .build(); + + var logger = logdash.logger(); + var metrics = logdash.metrics(); + + logger.info("Application started", Map.of("version", "1.0.0")); + + for (int i = 0; i < 2; i++) { + int users = 50 + random.nextInt(51); + int apiCalls = random.nextInt(10) + 1; + + metrics.set(METRIC_ACTIVE_USERS, users); + metrics.mutate(METRIC_ACTIVE_USERS, 100); + metrics.mutate(METRIC_API_REQUESTS, apiCalls); + + logger.debug("Iteration " + i, Map.of("users", users, "apiCalls", apiCalls)); + + try { + TimeUnit.MILLISECONDS.sleep(1000L + random.nextInt(3000)); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + logger.http("Processed API request"); + logger.warn("Random warning"); + logger.error("Random error"); + logger.verbose("Verbose log"); + logger.silly("Silly log"); + + logger.info("Example completed"); + } +} diff --git a/mvnw b/mvnw new file mode 100755 index 0000000..19529dd --- /dev/null +++ b/mvnw @@ -0,0 +1,259 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Apache Maven Wrapper startup batch script, version 3.3.2 +# +# Optional ENV vars +# ----------------- +# JAVA_HOME - location of a JDK home dir, required when download maven via java source +# MVNW_REPOURL - repo url base for downloading maven distribution +# MVNW_USERNAME/MVNW_PASSWORD - user and password for downloading maven +# MVNW_VERBOSE - true: enable verbose log; debug: trace the mvnw script; others: silence the output +# ---------------------------------------------------------------------------- + +set -euf +[ "${MVNW_VERBOSE-}" != debug ] || set -x + +# OS specific support. +native_path() { printf %s\\n "$1"; } +case "$(uname)" in +CYGWIN* | MINGW*) + [ -z "${JAVA_HOME-}" ] || JAVA_HOME="$(cygpath --unix "$JAVA_HOME")" + native_path() { cygpath --path --windows "$1"; } + ;; +esac + +# set JAVACMD and JAVACCMD +set_java_home() { + # For Cygwin and MinGW, ensure paths are in Unix format before anything is touched + if [ -n "${JAVA_HOME-}" ]; then + if [ -x "$JAVA_HOME/jre/sh/java" ]; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + JAVACCMD="$JAVA_HOME/jre/sh/javac" + else + JAVACMD="$JAVA_HOME/bin/java" + JAVACCMD="$JAVA_HOME/bin/javac" + + if [ ! -x "$JAVACMD" ] || [ ! -x "$JAVACCMD" ]; then + echo "The JAVA_HOME environment variable is not defined correctly, so mvnw cannot run." >&2 + echo "JAVA_HOME is set to \"$JAVA_HOME\", but \"\$JAVA_HOME/bin/java\" or \"\$JAVA_HOME/bin/javac\" does not exist." >&2 + return 1 + fi + fi + else + JAVACMD="$( + 'set' +e + 'unset' -f command 2>/dev/null + 'command' -v java + )" || : + JAVACCMD="$( + 'set' +e + 'unset' -f command 2>/dev/null + 'command' -v javac + )" || : + + if [ ! -x "${JAVACMD-}" ] || [ ! -x "${JAVACCMD-}" ]; then + echo "The java/javac command does not exist in PATH nor is JAVA_HOME set, so mvnw cannot run." >&2 + return 1 + fi + fi +} + +# hash string like Java String::hashCode +hash_string() { + str="${1:-}" h=0 + while [ -n "$str" ]; do + char="${str%"${str#?}"}" + h=$(((h * 31 + $(LC_CTYPE=C printf %d "'$char")) % 4294967296)) + str="${str#?}" + done + printf %x\\n $h +} + +verbose() { :; } +[ "${MVNW_VERBOSE-}" != true ] || verbose() { printf %s\\n "${1-}"; } + +die() { + printf %s\\n "$1" >&2 + exit 1 +} + +trim() { + # MWRAPPER-139: + # Trims trailing and leading whitespace, carriage returns, tabs, and linefeeds. + # Needed for removing poorly interpreted newline sequences when running in more + # exotic environments such as mingw bash on Windows. + printf "%s" "${1}" | tr -d '[:space:]' +} + +# parse distributionUrl and optional distributionSha256Sum, requires .mvn/wrapper/maven-wrapper.properties +while IFS="=" read -r key value; do + case "${key-}" in + distributionUrl) distributionUrl=$(trim "${value-}") ;; + distributionSha256Sum) distributionSha256Sum=$(trim "${value-}") ;; + esac +done <"${0%/*}/.mvn/wrapper/maven-wrapper.properties" +[ -n "${distributionUrl-}" ] || die "cannot read distributionUrl property in ${0%/*}/.mvn/wrapper/maven-wrapper.properties" + +case "${distributionUrl##*/}" in +maven-mvnd-*bin.*) + MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/ + case "${PROCESSOR_ARCHITECTURE-}${PROCESSOR_ARCHITEW6432-}:$(uname -a)" in + *AMD64:CYGWIN* | *AMD64:MINGW*) distributionPlatform=windows-amd64 ;; + :Darwin*x86_64) distributionPlatform=darwin-amd64 ;; + :Darwin*arm64) distributionPlatform=darwin-aarch64 ;; + :Linux*x86_64*) distributionPlatform=linux-amd64 ;; + *) + echo "Cannot detect native platform for mvnd on $(uname)-$(uname -m), use pure java version" >&2 + distributionPlatform=linux-amd64 + ;; + esac + distributionUrl="${distributionUrl%-bin.*}-$distributionPlatform.zip" + ;; +maven-mvnd-*) MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/ ;; +*) MVN_CMD="mvn${0##*/mvnw}" _MVNW_REPO_PATTERN=/org/apache/maven/ ;; +esac + +# apply MVNW_REPOURL and calculate MAVEN_HOME +# maven home pattern: ~/.m2/wrapper/dists/{apache-maven-,maven-mvnd--}/ +[ -z "${MVNW_REPOURL-}" ] || distributionUrl="$MVNW_REPOURL$_MVNW_REPO_PATTERN${distributionUrl#*"$_MVNW_REPO_PATTERN"}" +distributionUrlName="${distributionUrl##*/}" +distributionUrlNameMain="${distributionUrlName%.*}" +distributionUrlNameMain="${distributionUrlNameMain%-bin}" +MAVEN_USER_HOME="${MAVEN_USER_HOME:-${HOME}/.m2}" +MAVEN_HOME="${MAVEN_USER_HOME}/wrapper/dists/${distributionUrlNameMain-}/$(hash_string "$distributionUrl")" + +exec_maven() { + unset MVNW_VERBOSE MVNW_USERNAME MVNW_PASSWORD MVNW_REPOURL || : + exec "$MAVEN_HOME/bin/$MVN_CMD" "$@" || die "cannot exec $MAVEN_HOME/bin/$MVN_CMD" +} + +if [ -d "$MAVEN_HOME" ]; then + verbose "found existing MAVEN_HOME at $MAVEN_HOME" + exec_maven "$@" +fi + +case "${distributionUrl-}" in +*?-bin.zip | *?maven-mvnd-?*-?*.zip) ;; +*) die "distributionUrl is not valid, must match *-bin.zip or maven-mvnd-*.zip, but found '${distributionUrl-}'" ;; +esac + +# prepare tmp dir +if TMP_DOWNLOAD_DIR="$(mktemp -d)" && [ -d "$TMP_DOWNLOAD_DIR" ]; then + clean() { rm -rf -- "$TMP_DOWNLOAD_DIR"; } + trap clean HUP INT TERM EXIT +else + die "cannot create temp dir" +fi + +mkdir -p -- "${MAVEN_HOME%/*}" + +# Download and Install Apache Maven +verbose "Couldn't find MAVEN_HOME, downloading and installing it ..." +verbose "Downloading from: $distributionUrl" +verbose "Downloading to: $TMP_DOWNLOAD_DIR/$distributionUrlName" + +# select .zip or .tar.gz +if ! command -v unzip >/dev/null; then + distributionUrl="${distributionUrl%.zip}.tar.gz" + distributionUrlName="${distributionUrl##*/}" +fi + +# verbose opt +__MVNW_QUIET_WGET=--quiet __MVNW_QUIET_CURL=--silent __MVNW_QUIET_UNZIP=-q __MVNW_QUIET_TAR='' +[ "${MVNW_VERBOSE-}" != true ] || __MVNW_QUIET_WGET='' __MVNW_QUIET_CURL='' __MVNW_QUIET_UNZIP='' __MVNW_QUIET_TAR=v + +# normalize http auth +case "${MVNW_PASSWORD:+has-password}" in +'') MVNW_USERNAME='' MVNW_PASSWORD='' ;; +has-password) [ -n "${MVNW_USERNAME-}" ] || MVNW_USERNAME='' MVNW_PASSWORD='' ;; +esac + +if [ -z "${MVNW_USERNAME-}" ] && command -v wget >/dev/null; then + verbose "Found wget ... using wget" + wget ${__MVNW_QUIET_WGET:+"$__MVNW_QUIET_WGET"} "$distributionUrl" -O "$TMP_DOWNLOAD_DIR/$distributionUrlName" || die "wget: Failed to fetch $distributionUrl" +elif [ -z "${MVNW_USERNAME-}" ] && command -v curl >/dev/null; then + verbose "Found curl ... using curl" + curl ${__MVNW_QUIET_CURL:+"$__MVNW_QUIET_CURL"} -f -L -o "$TMP_DOWNLOAD_DIR/$distributionUrlName" "$distributionUrl" || die "curl: Failed to fetch $distributionUrl" +elif set_java_home; then + verbose "Falling back to use Java to download" + javaSource="$TMP_DOWNLOAD_DIR/Downloader.java" + targetZip="$TMP_DOWNLOAD_DIR/$distributionUrlName" + cat >"$javaSource" <<-END + public class Downloader extends java.net.Authenticator + { + protected java.net.PasswordAuthentication getPasswordAuthentication() + { + return new java.net.PasswordAuthentication( System.getenv( "MVNW_USERNAME" ), System.getenv( "MVNW_PASSWORD" ).toCharArray() ); + } + public static void main( String[] args ) throws Exception + { + setDefault( new Downloader() ); + java.nio.file.Files.copy( java.net.URI.create( args[0] ).toURL().openStream(), java.nio.file.Paths.get( args[1] ).toAbsolutePath().normalize() ); + } + } + END + # For Cygwin/MinGW, switch paths to Windows format before running javac and java + verbose " - Compiling Downloader.java ..." + "$(native_path "$JAVACCMD")" "$(native_path "$javaSource")" || die "Failed to compile Downloader.java" + verbose " - Running Downloader.java ..." + "$(native_path "$JAVACMD")" -cp "$(native_path "$TMP_DOWNLOAD_DIR")" Downloader "$distributionUrl" "$(native_path "$targetZip")" +fi + +# If specified, validate the SHA-256 sum of the Maven distribution zip file +if [ -n "${distributionSha256Sum-}" ]; then + distributionSha256Result=false + if [ "$MVN_CMD" = mvnd.sh ]; then + echo "Checksum validation is not supported for maven-mvnd." >&2 + echo "Please disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2 + exit 1 + elif command -v sha256sum >/dev/null; then + if echo "$distributionSha256Sum $TMP_DOWNLOAD_DIR/$distributionUrlName" | sha256sum -c >/dev/null 2>&1; then + distributionSha256Result=true + fi + elif command -v shasum >/dev/null; then + if echo "$distributionSha256Sum $TMP_DOWNLOAD_DIR/$distributionUrlName" | shasum -a 256 -c >/dev/null 2>&1; then + distributionSha256Result=true + fi + else + echo "Checksum validation was requested but neither 'sha256sum' or 'shasum' are available." >&2 + echo "Please install either command, or disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2 + exit 1 + fi + if [ $distributionSha256Result = false ]; then + echo "Error: Failed to validate Maven distribution SHA-256, your Maven distribution might be compromised." >&2 + echo "If you updated your Maven version, you need to update the specified distributionSha256Sum property." >&2 + exit 1 + fi +fi + +# unzip and move +if command -v unzip >/dev/null; then + unzip ${__MVNW_QUIET_UNZIP:+"$__MVNW_QUIET_UNZIP"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -d "$TMP_DOWNLOAD_DIR" || die "failed to unzip" +else + tar xzf${__MVNW_QUIET_TAR:+"$__MVNW_QUIET_TAR"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -C "$TMP_DOWNLOAD_DIR" || die "failed to untar" +fi +printf %s\\n "$distributionUrl" >"$TMP_DOWNLOAD_DIR/$distributionUrlNameMain/mvnw.url" +mv -- "$TMP_DOWNLOAD_DIR/$distributionUrlNameMain" "$MAVEN_HOME" || [ -d "$MAVEN_HOME" ] || die "fail to move MAVEN_HOME" + +clean || : +exec_maven "$@" diff --git a/mvnw.cmd b/mvnw.cmd new file mode 100644 index 0000000..249bdf3 --- /dev/null +++ b/mvnw.cmd @@ -0,0 +1,149 @@ +<# : batch portion +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Apache Maven Wrapper startup batch script, version 3.3.2 +@REM +@REM Optional ENV vars +@REM MVNW_REPOURL - repo url base for downloading maven distribution +@REM MVNW_USERNAME/MVNW_PASSWORD - user and password for downloading maven +@REM MVNW_VERBOSE - true: enable verbose log; others: silence the output +@REM ---------------------------------------------------------------------------- + +@IF "%__MVNW_ARG0_NAME__%"=="" (SET __MVNW_ARG0_NAME__=%~nx0) +@SET __MVNW_CMD__= +@SET __MVNW_ERROR__= +@SET __MVNW_PSMODULEP_SAVE=%PSModulePath% +@SET PSModulePath= +@FOR /F "usebackq tokens=1* delims==" %%A IN (`powershell -noprofile "& {$scriptDir='%~dp0'; $script='%__MVNW_ARG0_NAME__%'; icm -ScriptBlock ([Scriptblock]::Create((Get-Content -Raw '%~f0'))) -NoNewScope}"`) DO @( + IF "%%A"=="MVN_CMD" (set __MVNW_CMD__=%%B) ELSE IF "%%B"=="" (echo %%A) ELSE (echo %%A=%%B) +) +@SET PSModulePath=%__MVNW_PSMODULEP_SAVE% +@SET __MVNW_PSMODULEP_SAVE= +@SET __MVNW_ARG0_NAME__= +@SET MVNW_USERNAME= +@SET MVNW_PASSWORD= +@IF NOT "%__MVNW_CMD__%"=="" (%__MVNW_CMD__% %*) +@echo Cannot start maven from wrapper >&2 && exit /b 1 +@GOTO :EOF +: end batch / begin powershell #> + +$ErrorActionPreference = "Stop" +if ($env:MVNW_VERBOSE -eq "true") { + $VerbosePreference = "Continue" +} + +# calculate distributionUrl, requires .mvn/wrapper/maven-wrapper.properties +$distributionUrl = (Get-Content -Raw "$scriptDir/.mvn/wrapper/maven-wrapper.properties" | ConvertFrom-StringData).distributionUrl +if (!$distributionUrl) { + Write-Error "cannot read distributionUrl property in $scriptDir/.mvn/wrapper/maven-wrapper.properties" +} + +switch -wildcard -casesensitive ( $($distributionUrl -replace '^.*/','') ) { + "maven-mvnd-*" { + $USE_MVND = $true + $distributionUrl = $distributionUrl -replace '-bin\.[^.]*$',"-windows-amd64.zip" + $MVN_CMD = "mvnd.cmd" + break + } + default { + $USE_MVND = $false + $MVN_CMD = $script -replace '^mvnw','mvn' + break + } +} + +# apply MVNW_REPOURL and calculate MAVEN_HOME +# maven home pattern: ~/.m2/wrapper/dists/{apache-maven-,maven-mvnd--}/ +if ($env:MVNW_REPOURL) { + $MVNW_REPO_PATTERN = if ($USE_MVND) { "/org/apache/maven/" } else { "/maven/mvnd/" } + $distributionUrl = "$env:MVNW_REPOURL$MVNW_REPO_PATTERN$($distributionUrl -replace '^.*'+$MVNW_REPO_PATTERN,'')" +} +$distributionUrlName = $distributionUrl -replace '^.*/','' +$distributionUrlNameMain = $distributionUrlName -replace '\.[^.]*$','' -replace '-bin$','' +$MAVEN_HOME_PARENT = "$HOME/.m2/wrapper/dists/$distributionUrlNameMain" +if ($env:MAVEN_USER_HOME) { + $MAVEN_HOME_PARENT = "$env:MAVEN_USER_HOME/wrapper/dists/$distributionUrlNameMain" +} +$MAVEN_HOME_NAME = ([System.Security.Cryptography.MD5]::Create().ComputeHash([byte[]][char[]]$distributionUrl) | ForEach-Object {$_.ToString("x2")}) -join '' +$MAVEN_HOME = "$MAVEN_HOME_PARENT/$MAVEN_HOME_NAME" + +if (Test-Path -Path "$MAVEN_HOME" -PathType Container) { + Write-Verbose "found existing MAVEN_HOME at $MAVEN_HOME" + Write-Output "MVN_CMD=$MAVEN_HOME/bin/$MVN_CMD" + exit $? +} + +if (! $distributionUrlNameMain -or ($distributionUrlName -eq $distributionUrlNameMain)) { + Write-Error "distributionUrl is not valid, must end with *-bin.zip, but found $distributionUrl" +} + +# prepare tmp dir +$TMP_DOWNLOAD_DIR_HOLDER = New-TemporaryFile +$TMP_DOWNLOAD_DIR = New-Item -Itemtype Directory -Path "$TMP_DOWNLOAD_DIR_HOLDER.dir" +$TMP_DOWNLOAD_DIR_HOLDER.Delete() | Out-Null +trap { + if ($TMP_DOWNLOAD_DIR.Exists) { + try { Remove-Item $TMP_DOWNLOAD_DIR -Recurse -Force | Out-Null } + catch { Write-Warning "Cannot remove $TMP_DOWNLOAD_DIR" } + } +} + +New-Item -Itemtype Directory -Path "$MAVEN_HOME_PARENT" -Force | Out-Null + +# Download and Install Apache Maven +Write-Verbose "Couldn't find MAVEN_HOME, downloading and installing it ..." +Write-Verbose "Downloading from: $distributionUrl" +Write-Verbose "Downloading to: $TMP_DOWNLOAD_DIR/$distributionUrlName" + +$webclient = New-Object System.Net.WebClient +if ($env:MVNW_USERNAME -and $env:MVNW_PASSWORD) { + $webclient.Credentials = New-Object System.Net.NetworkCredential($env:MVNW_USERNAME, $env:MVNW_PASSWORD) +} +[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 +$webclient.DownloadFile($distributionUrl, "$TMP_DOWNLOAD_DIR/$distributionUrlName") | Out-Null + +# If specified, validate the SHA-256 sum of the Maven distribution zip file +$distributionSha256Sum = (Get-Content -Raw "$scriptDir/.mvn/wrapper/maven-wrapper.properties" | ConvertFrom-StringData).distributionSha256Sum +if ($distributionSha256Sum) { + if ($USE_MVND) { + Write-Error "Checksum validation is not supported for maven-mvnd. `nPlease disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." + } + Import-Module $PSHOME\Modules\Microsoft.PowerShell.Utility -Function Get-FileHash + if ((Get-FileHash "$TMP_DOWNLOAD_DIR/$distributionUrlName" -Algorithm SHA256).Hash.ToLower() -ne $distributionSha256Sum) { + Write-Error "Error: Failed to validate Maven distribution SHA-256, your Maven distribution might be compromised. If you updated your Maven version, you need to update the specified distributionSha256Sum property." + } +} + +# unzip and move +Expand-Archive "$TMP_DOWNLOAD_DIR/$distributionUrlName" -DestinationPath "$TMP_DOWNLOAD_DIR" | Out-Null +Rename-Item -Path "$TMP_DOWNLOAD_DIR/$distributionUrlNameMain" -NewName $MAVEN_HOME_NAME | Out-Null +try { + Move-Item -Path "$TMP_DOWNLOAD_DIR/$MAVEN_HOME_NAME" -Destination $MAVEN_HOME_PARENT | Out-Null +} catch { + if (! (Test-Path -Path "$MAVEN_HOME" -PathType Container)) { + Write-Error "fail to move MAVEN_HOME" + } +} finally { + try { Remove-Item $TMP_DOWNLOAD_DIR -Recurse -Force | Out-Null } + catch { Write-Warning "Cannot remove $TMP_DOWNLOAD_DIR" } +} + +Write-Output "MVN_CMD=$MAVEN_HOME/bin/$MVN_CMD" diff --git a/pom.xml b/pom.xml new file mode 100644 index 0000000..284f8d4 --- /dev/null +++ b/pom.xml @@ -0,0 +1,538 @@ + + + 4.0.0 + + io.logdash + logdash + 0.1.0 + jar + + Logdash Java SDK + Official Java SDK for Logdash.io observability platform + https://logdash.io + + + + MIT License + https://opensource.org/licenses/MIT + repo + + + + + + logdash-team + Logdash Team + contact@logdash.io + Logdash + https://logdash.io + + + + + scm:git:git://github.com/logdash-io/java-sdk.git + scm:git:ssh://github.com/logdash-io/java-sdk.git + https://github.com/logdash-io/java-sdk + + + + + github + GitHub Packages + https://maven.pkg.github.com/logdash-io/java-sdk + + + github + GitHub Packages + https://maven.pkg.github.com/logdash-io/java-sdk + + + + + 17 + ${java.version} + ${java.version} + ${java.version} + UTF-8 + + + 5.13.0 + 5.18.0 + 3.27.3 + 3.13.0 + 2.0.17 + 2.19.0 + + + 3.14.0 + 3.5.3 + 3.5.3 + 0.8.13 + 3.3.1 + 3.11.2 + 4.9.3.0 + 2.44.5 + 3.5.0 + 3.1.4 + 3.8.1 + + + + + + org.slf4j + slf4j-api + ${slf4j.version} + + + com.fasterxml.jackson.core + jackson-core + ${jackson.version} + + + com.fasterxml.jackson.core + jackson-databind + ${jackson.version} + + + com.fasterxml.jackson.core + jackson-annotations + ${jackson.version} + + + com.fasterxml.jackson.datatype + jackson-datatype-jsr310 + ${jackson.version} + + + + + org.junit.jupiter + junit-jupiter-api + ${junit.version} + test + + + org.junit.jupiter + junit-jupiter-params + ${junit.version} + test + + + org.junit.jupiter + junit-jupiter-engine + ${junit.version} + test + + + org.mockito + mockito-core + ${mockito.version} + test + + + org.mockito + mockito-junit-jupiter + ${mockito.version} + test + + + org.assertj + assertj-core + ${assertj.version} + test + + + org.wiremock + wiremock + ${wiremock.version} + test + + + + + + + + org.apache.maven.plugins + maven-enforcer-plugin + ${maven-enforcer-plugin.version} + + + enforce-maven + + enforce + + + + + 3.6.0 + + + ${java.version} + + + + commons-logging:commons-logging + log4j:log4j + + + + + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + ${maven-compiler-plugin.version} + + ${java.version} + + -parameters + -Xlint:unchecked + -Xlint:deprecation + -Werror + + + + + + + com.github.spotbugs + spotbugs-maven-plugin + ${spotbugs-maven-plugin.version} + + Max + Low + true + true + + + + + check + + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + ${maven-surefire-plugin.version} + + false + + **/*Test.java + + + **/*IntegrationTest.java + **/*PerformanceTest.java + + + target/jacoco.exec + + @{argLine} -Xmx1024m + + + + + + org.apache.maven.plugins + maven-failsafe-plugin + ${maven-failsafe-plugin.version} + + + **/*IntegrationTest.java + + + target/jacoco-it.exec + + + + + + integration-test + verify + + + + + + + + org.jacoco + jacoco-maven-plugin + ${jacoco-maven-plugin.version} + + + **/exception/* + **/config/*Builder* + **/*$*.class + + + + + prepare-agent + + prepare-agent + + + + report + + report + + test + + + prepare-agent-integration + + prepare-agent-integration + + + + report-integration + + report-integration + + post-integration-test + + + merge-results + + merge + + verify + + + + ${project.build.directory} + + jacoco.exec + jacoco-it.exec + + + jacoco-merged.exec + + + + ${project.build.directory}/jacoco-merged.exec + + + + report-merged + + report + + verify + + ${project.build.directory}/jacoco-merged.exec + ${project.build.directory}/site/jacoco-merged + + XML + CSV + HTML + + + + + check-coverage + + check + + verify + + target/jacoco-merged.exec + + + BUNDLE + + + INSTRUCTION + COVEREDRATIO + 0.70 + + + LINE + COVEREDRATIO + 0.70 + + + BRANCH + COVEREDRATIO + 0.50 + + + + + true + + + + + + + + org.apache.maven.plugins + maven-source-plugin + ${maven-source-plugin.version} + + + attach-sources + + jar-no-fork + + + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + ${maven-javadoc-plugin.version} + + ${java.version} + none + true + false + false + + + + attach-javadocs + + jar + + + + + + + + com.diffplug.spotless + spotless-maven-plugin + ${spotless.maven.plugin.version} + + + + + + + true + 4 + + + **/target/** + **/generated/** + + + + + + **/*.md + + + + **/target/** + + + + + + **/*.json + + + + **/target/** + + + + + + pom.xml + **/pom.xml + + + false + false + 4 + + + **/target/** + + + + + + + check + + verify + + + + + + + + + + release + + + performRelease + true + + + + + + + org.apache.maven.plugins + maven-deploy-plugin + ${maven-deploy-plugin.version} + + true + + + + + + + + + quality + + + + org.apache.maven.plugins + maven-dependency-plugin + ${maven-dependency-plugin.version} + + + analyze-dependencies + + analyze-only + + + true + true + + org.slf4j:slf4j-api + org.junit.jupiter:junit-jupiter-engine + + + + + + + + + + diff --git a/src/main/java/io/logdash/sdk/Logdash.java b/src/main/java/io/logdash/sdk/Logdash.java new file mode 100644 index 0000000..ca0b6fa --- /dev/null +++ b/src/main/java/io/logdash/sdk/Logdash.java @@ -0,0 +1,373 @@ +package io.logdash.sdk; + +import io.logdash.sdk.config.LogdashConfig; +import io.logdash.sdk.log.LogdashLogger; +import io.logdash.sdk.metrics.LogdashMetrics; +import io.logdash.sdk.transport.HttpTransport; +import io.logdash.sdk.transport.LogdashTransport; +import io.logdash.sdk.transport.NoOpTransport; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + * Main entry point for the Logdash Java SDK. + * + *

This class provides access to logging and metrics functionality for sending observability data + * to the Logdash.io platform. It manages the underlying transport layer and provides a simple API + * for developers to integrate observability into their applications. + * + *

The SDK automatically handles HTTP transport with retry logic, rate limiting, and graceful + * shutdown. If no API key is provided or HTTP transport fails, it falls back to console-only mode. + * + *

Example usage: + * + *

{@code
+ * // Simple usage with API key
+ * try (var logdash = Logdash.create("your-api-key")) {
+ *     logdash.logger().info("Application started");
+ *     logdash.metrics().mutate("app.starts", 1);
+ * }
+ *
+ * // Advanced configuration
+ * try (var logdash = Logdash.builder()
+ *         .apiKey("your-api-key")
+ *         .enableVerboseLogging(true)
+ *         .maxRetries(5)
+ *         .build()) {
+ *
+ *     logdash.logger().error("Error occurred", Map.of("userId", 123));
+ *     logdash.metrics().set("active_users", 1500);
+ * }
+ * }
+ */ +public final class Logdash implements AutoCloseable { + + private final LogdashConfig config; + private final LogdashTransport transport; + private final LogdashLogger logger; + private final Thread shutdownHook; + private volatile boolean closed = false; + + private Logdash(LogdashConfig config) { + this.config = config; + this.transport = createTransport(config); + this.logger = new LogdashLogger(config, transport); + + this.shutdownHook = new Thread(this::performShutdownHookCleanup, "logdash-shutdown-hook"); + this.shutdownHook.setPriority(Thread.MAX_PRIORITY); + Runtime.getRuntime().addShutdownHook(shutdownHook); + } + + /** + * Creates a new builder for configuring Logdash instances. + * + * @return a new builder instance for customizing SDK configuration + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Creates a Logdash instance with default configuration and the specified API key. + * + * @param apiKey the API key for authenticating with Logdash.io + * @return a new Logdash instance ready for use + * @throws IllegalArgumentException if apiKey is null or blank + */ + public static Logdash create(String apiKey) { + return new Logdash(LogdashConfig.builder().apiKey(apiKey).build()); + } + + /** + * Returns the logger instance for sending structured log messages. + * + * @return the logger instance + * @throws IllegalStateException if this instance has been closed + */ + public LogdashLogger logger() { + checkNotClosed(); + return logger; + } + + /** + * Returns the metrics instance for sending custom metrics and measurements. + * + * @return a new metrics instance (defensive copy) + * @throws IllegalStateException if this instance has been closed + */ + public LogdashMetrics metrics() { + checkNotClosed(); + return new LogdashMetrics(config, transport); + } + + /** + * Returns the configuration used by this instance. + * + * @return the immutable configuration object + */ + public LogdashConfig config() { + return config; + } + + /** + * Flushes all pending logs and metrics synchronously. + * + *

Blocks until all queued requests complete or the configured timeout is reached. This method + * is useful for ensuring all data is sent before application shutdown. + */ + public void flush() { + if (closed || !(transport instanceof HttpTransport httpTransport)) { + return; + } + + try { + if (config.enableVerboseLogging()) { + System.out.println("Flushing pending requests..."); + } + + httpTransport.flush().get(config.shutdownTimeoutMs(), TimeUnit.MILLISECONDS); + + if (config.enableVerboseLogging()) { + System.out.println("Flush completed successfully"); + } + } catch (ExecutionException | TimeoutException e) { + if (config.enableVerboseLogging()) { + System.err.println("Flush failed: " + e.getMessage()); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + if (config.enableVerboseLogging()) { + System.err.println("Flush interrupted: " + e.getMessage()); + } + } + } + + @Override + public void close() { + if (!closed) { + try { + Runtime.getRuntime().removeShutdownHook(shutdownHook); + } catch (IllegalStateException ignored) { + // JVM is already shutting down + } + closeInternal(); + } + } + + private void performShutdownHookCleanup() { + if (closed) { + return; + } + + try { + if (config.enableVerboseLogging()) { + System.out.println("Shutting down Logdash SDK via shutdown hook..."); + } + performShutdown(Math.min(config.shutdownTimeoutMs(), 3000)); + } catch (Exception e) { + if (config.enableVerboseLogging()) { + System.err.println("Error during shutdown hook cleanup: " + e.getMessage()); + } + } + } + + private void closeInternal() { + if (!closed) { + if (config.enableVerboseLogging()) { + System.out.println("Shutting down Logdash SDK..."); + } + performShutdown(config.shutdownTimeoutMs()); + } + } + + private void performShutdown(long timeoutMs) { + if (closed) { + return; + } + + closed = true; + + try { + transport + .shutdown() + .ifPresent( + shutdownFuture -> { + try { + shutdownFuture.get(timeoutMs, TimeUnit.MILLISECONDS); + } catch (Exception e) { + if (config.enableVerboseLogging()) { + System.err.println("Warning: Graceful shutdown timeout: " + e.getMessage()); + } + } + }); + } finally { + transport.close(); + if (config.enableVerboseLogging()) { + System.out.println("Logdash SDK shutdown completed"); + } + } + } + + private void checkNotClosed() { + if (closed) { + throw new IllegalStateException("Logdash instance has been closed"); + } + } + + private LogdashTransport createTransport(LogdashConfig config) { + try { + if (config.apiKey() == null || config.apiKey().isBlank()) { + if (config.enableVerboseLogging()) { + System.out.println("No API key provided, using console-only mode"); + } + return new NoOpTransport(config); + } + return new HttpTransport(config); + } catch (Exception e) { + if (config.enableVerboseLogging()) { + System.err.printf( + "Failed to create HTTP transport, falling back to console-only: %s%n", e.getMessage()); + e.printStackTrace(); + } + return new NoOpTransport(config); + } + } + + public static final class Builder { + private String apiKey; + private String baseUrl = "https://api.logdash.io"; + private boolean enableConsoleOutput = true; + private boolean enableVerboseLogging = false; + private int maxRetries = 3; + private long retryDelayMs = 500L; + private long requestTimeoutMs = 15000L; + private long shutdownTimeoutMs = 10000L; + private int maxConcurrentRequests = 20; + + private Builder() { + } + + public Builder apiKey(String apiKey) { + this.apiKey = apiKey; + return this; + } + + /** + * Sets the base URL for the Logdash API. + * + * @param baseUrl the base URL (defaults to https://api.logdash.io) + * @return this builder + */ + public Builder baseUrl(String baseUrl) { + this.baseUrl = baseUrl; + return this; + } + + /** + * Enables or disables console output for logs and metrics. + * + * @param enableConsoleOutput true to enable console output + * @return this builder + */ + public Builder enableConsoleOutput(boolean enableConsoleOutput) { + this.enableConsoleOutput = enableConsoleOutput; + return this; + } + + /** + * Enables or disables verbose logging for debugging. + * + * @param enableVerboseLogging true to enable verbose logging + * @return this builder + */ + public Builder enableVerboseLogging(boolean enableVerboseLogging) { + this.enableVerboseLogging = enableVerboseLogging; + return this; + } + + /** + * Sets the maximum number of retry attempts for failed requests. + * + * @param maxRetries the maximum retry attempts (defaults to 3) + * @return this builder + */ + public Builder maxRetries(int maxRetries) { + this.maxRetries = maxRetries; + return this; + } + + /** + * Sets the retry delay in milliseconds. + * + * @param retryDelayMs the retry delay (defaults to 500ms) + * @return this builder + */ + public Builder retryDelayMs(long retryDelayMs) { + this.retryDelayMs = retryDelayMs; + return this; + } + + /** + * Sets the request timeout in milliseconds. + * + * @param requestTimeoutMs the request timeout (defaults to 15000ms) + * @return this builder + */ + public Builder requestTimeoutMs(long requestTimeoutMs) { + this.requestTimeoutMs = requestTimeoutMs; + return this; + } + + /** + * Sets the shutdown timeout in milliseconds. + * + * @param shutdownTimeoutMs the shutdown timeout (defaults to 10000ms) + * @return this builder + */ + public Builder shutdownTimeoutMs(long shutdownTimeoutMs) { + this.shutdownTimeoutMs = shutdownTimeoutMs; + return this; + } + + /** + * Sets the maximum number of concurrent requests. + * + * @param maxConcurrentRequests the max concurrent requests (defaults to 20) + * @return this builder + */ + public Builder maxConcurrentRequests(int maxConcurrentRequests) { + this.maxConcurrentRequests = maxConcurrentRequests; + return this; + } + + /** + * Builds the Logdash instance with the configured settings. + * + * @return a new Logdash instance + * @throws IllegalArgumentException if configuration is invalid + */ + public Logdash build() { + if (requestTimeoutMs <= 0) { + throw new IllegalArgumentException("Request timeout must be positive"); + } + if (maxConcurrentRequests <= 0) { + throw new IllegalArgumentException("Max concurrent requests must be positive"); + } + + LogdashConfig config = + new LogdashConfig( + apiKey, + baseUrl, + enableConsoleOutput, + enableVerboseLogging, + maxRetries, + retryDelayMs, + requestTimeoutMs, + shutdownTimeoutMs, + maxConcurrentRequests); + return new Logdash(config); + } + } +} diff --git a/src/main/java/io/logdash/sdk/config/LogdashConfig.java b/src/main/java/io/logdash/sdk/config/LogdashConfig.java new file mode 100644 index 0000000..2988d37 --- /dev/null +++ b/src/main/java/io/logdash/sdk/config/LogdashConfig.java @@ -0,0 +1,191 @@ +package io.logdash.sdk.config; + +import io.logdash.sdk.exception.LogdashException; + +import java.net.URI; +import java.net.URISyntaxException; + +/** + * Configuration record for Logdash SDK client settings. + * + *

This record contains all configuration parameters needed to initialize and operate the Logdash + * SDK, including API credentials, network settings, and behavioral options. + * + * @param apiKey API key for authenticating with Logdash.io + * @param baseUrl Base URL for the Logdash API (defaults to https://api.logdash.io) + * @param enableConsoleOutput Whether to output logs and metrics to console + * @param enableVerboseLogging Whether to enable verbose internal logging + * @param maxRetries Maximum number of retry attempts for failed requests + * @param retryDelayMs Delay between retry attempts in milliseconds + * @param requestTimeoutMs HTTP request timeout in milliseconds + * @param shutdownTimeoutMs Maximum time to wait for graceful shutdown + * @param maxConcurrentRequests Maximum number of concurrent HTTP requests + */ +public record LogdashConfig( + String apiKey, + String baseUrl, + boolean enableConsoleOutput, + boolean enableVerboseLogging, + int maxRetries, + long retryDelayMs, + long requestTimeoutMs, + long shutdownTimeoutMs, + int maxConcurrentRequests) { + private static final int MIN_API_KEY_LENGTH = 3; + private static final int MAX_API_KEY_LENGTH = 256; + + public LogdashConfig { + validateConfiguration( + apiKey, + baseUrl, + maxRetries, + retryDelayMs, + requestTimeoutMs, + shutdownTimeoutMs, + maxConcurrentRequests); + } + + /** + * Creates a new builder for configuring LogdashConfig instances. + * + * @return a new builder instance + */ + public static Builder builder() { + return new Builder(); + } + + private static void validateConfiguration( + String apiKey, + String baseUrl, + int maxRetries, + long retryDelayMs, + long requestTimeoutMs, + long shutdownTimeoutMs, + int maxConcurrentRequests) { + + if (apiKey != null && !apiKey.isBlank()) { + if (apiKey.length() < MIN_API_KEY_LENGTH || apiKey.length() > MAX_API_KEY_LENGTH) { + throw new LogdashException( + String.format( + "API key length must be between %d and %d characters", + MIN_API_KEY_LENGTH, MAX_API_KEY_LENGTH)); + } + } + + if (baseUrl == null || baseUrl.isBlank()) { + throw new LogdashException("Base URL cannot be null or blank"); + } + + try { + var uri = new URI(baseUrl); + var scheme = uri.getScheme(); + if (scheme != null + && !("http".equalsIgnoreCase(scheme) || "https".equalsIgnoreCase(scheme))) { + throw new IllegalArgumentException("Unsupported URL scheme: " + scheme); + } + } catch (URISyntaxException e) { + throw new LogdashException("Invalid base URL: " + baseUrl, e); + } catch (IllegalArgumentException e) { + throw new LogdashException("Invalid base URL: " + baseUrl, e); + } + + if (maxRetries < 0) { + throw new LogdashException("Max retries cannot be negative"); + } + + if (retryDelayMs < 0) { + throw new LogdashException("Retry delay cannot be negative"); + } + + if (requestTimeoutMs <= 0) { + throw new LogdashException("Request timeout must be positive"); + } + + if (shutdownTimeoutMs <= 0) { + throw new LogdashException("Shutdown timeout must be positive"); + } + + if (maxConcurrentRequests <= 0) { + throw new LogdashException("Max concurrent requests must be positive"); + } + } + + public static final class Builder { + private String apiKey = null; + private String baseUrl = "https://api.logdash.io"; + private boolean enableConsoleOutput = true; + private boolean enableVerboseLogging = false; + private int maxRetries = 3; + private long retryDelayMs = 500L; + private long requestTimeoutMs = 15000L; + private long shutdownTimeoutMs = 10000L; + private int maxConcurrentRequests = 10; + + private Builder() { + } + + public Builder apiKey(String apiKey) { + this.apiKey = apiKey; + return this; + } + + public Builder baseUrl(String baseUrl) { + this.baseUrl = baseUrl; + return this; + } + + public Builder enableConsoleOutput(boolean enableConsoleOutput) { + this.enableConsoleOutput = enableConsoleOutput; + return this; + } + + public Builder enableVerboseLogging(boolean enableVerboseLogging) { + this.enableVerboseLogging = enableVerboseLogging; + return this; + } + + public Builder maxRetries(int maxRetries) { + this.maxRetries = maxRetries; + return this; + } + + public Builder retryDelayMs(long retryDelayMs) { + this.retryDelayMs = retryDelayMs; + return this; + } + + public Builder requestTimeoutMs(long requestTimeoutMs) { + this.requestTimeoutMs = requestTimeoutMs; + return this; + } + + public Builder shutdownTimeoutMs(long shutdownTimeoutMs) { + this.shutdownTimeoutMs = shutdownTimeoutMs; + return this; + } + + public Builder maxConcurrentRequests(int maxConcurrentRequests) { + this.maxConcurrentRequests = maxConcurrentRequests; + return this; + } + + /** + * Builds the LogdashConfig instance with the configured settings. + * + * @return a new LogdashConfig instance + * @throws LogdashException if configuration is invalid + */ + public LogdashConfig build() { + return new LogdashConfig( + apiKey, + baseUrl, + enableConsoleOutput, + enableVerboseLogging, + maxRetries, + retryDelayMs, + requestTimeoutMs, + shutdownTimeoutMs, + maxConcurrentRequests); + } + } +} diff --git a/src/main/java/io/logdash/sdk/exception/LogdashException.java b/src/main/java/io/logdash/sdk/exception/LogdashException.java new file mode 100644 index 0000000..4242655 --- /dev/null +++ b/src/main/java/io/logdash/sdk/exception/LogdashException.java @@ -0,0 +1,40 @@ +package io.logdash.sdk.exception; + +import java.io.Serial; + +/** + * Runtime exception thrown when Logdash SDK operations fail. + * + *

This exception indicates various error conditions such as: + * + *

    + *
  • Invalid configuration parameters + *
  • Network connectivity issues + *
  • API authentication failures + *
  • Invalid input parameters + *
+ */ +public class LogdashException extends RuntimeException { + + @Serial + private static final long serialVersionUID = 1L; + + /** + * Constructs a new LogdashException with the specified detail message. + * + * @param message the detail message explaining the cause of the exception + */ + public LogdashException(String message) { + super(message); + } + + /** + * Constructs a new LogdashException with the specified detail message and cause. + * + * @param message the detail message explaining the cause of the exception + * @param cause the underlying cause of this exception + */ + public LogdashException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/src/main/java/io/logdash/sdk/log/LogEntry.java b/src/main/java/io/logdash/sdk/log/LogEntry.java new file mode 100644 index 0000000..f0a341b --- /dev/null +++ b/src/main/java/io/logdash/sdk/log/LogEntry.java @@ -0,0 +1,120 @@ +package io.logdash.sdk.log; + +import java.time.Instant; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Map; + +/** + * Immutable data container representing a log entry to be sent to Logdash. + * + *

A log entry contains: + * + *

    + *
  • The log message text + *
  • Severity level indicating the importance + *
  • Timestamp when the log was created + *
  • Sequence number for maintaining order + *
  • Optional context data for structured logging + *
+ * + * @param message the log message text, must not be null + * @param level the severity level, must not be null + * @param createdAt the timestamp when this log was created + * @param sequenceNumber monotonically increasing number for ordering + * @param context additional structured data as key-value pairs + */ +public record LogEntry( + String message, + LogLevel level, + Instant createdAt, + long sequenceNumber, + Map context) { + private static final int MAX_MESSAGE_LENGTH = 100_000; + private static final int MAX_CONTEXT_ENTRIES = 100; + + /** + * Validates and normalizes the log entry parameters during construction. + * + * @throws IllegalArgumentException if message or level is null + */ + public LogEntry { + if (message == null) { + throw new IllegalArgumentException("Message cannot be null"); + } + if (message.length() > MAX_MESSAGE_LENGTH) { + message = + message.substring(0, MAX_MESSAGE_LENGTH - 50) + + " ... [TRUNCATED: original length was " + + message.length() + + " chars]"; + } + if (level == null) { + throw new IllegalArgumentException("Level cannot be null"); + } + if (createdAt == null) { + createdAt = Instant.now(); + } + if (context == null) { + context = Map.of(); + } else { + context = validateAndLimitContext(context); + } + } + + /** + * Creates a log entry with current timestamp and no context. + * + * @param message the log message + * @param level the log level + * @param sequenceNumber the sequence number for ordering + */ + public LogEntry(String message, LogLevel level, long sequenceNumber) { + this(message, level, Instant.now(), sequenceNumber, Map.of()); + } + + /** + * Creates a log entry with current timestamp and context data. + * + * @param message the log message + * @param level the log level + * @param sequenceNumber the sequence number for ordering + * @param context additional context data + */ + public LogEntry( + String message, LogLevel level, long sequenceNumber, Map context) { + this(message, level, Instant.now(), sequenceNumber, context); + } + + private static Map validateAndLimitContext(Map context) { + if (context.size() > MAX_CONTEXT_ENTRIES) { + var limitedContext = new LinkedHashMap(); + var iterator = context.entrySet().iterator(); + int count = 0; + + while (iterator.hasNext() && count < MAX_CONTEXT_ENTRIES - 1) { + var entry = iterator.next(); + limitedContext.put(entry.getKey(), entry.getValue()); + count++; + } + + limitedContext.put( + "_truncated", + "Context limited to " + MAX_CONTEXT_ENTRIES + " entries from original " + context.size()); + + return Collections.unmodifiableMap(limitedContext); + } else { + return Collections.unmodifiableMap(new HashMap<>(context)); + } + } + + /** + * Returns the context data for this log entry. + * + * @return an immutable copy of the context map + */ + public Map context() { + return Collections.unmodifiableMap(new HashMap<>(context)); + } +} diff --git a/src/main/java/io/logdash/sdk/log/LogLevel.java b/src/main/java/io/logdash/sdk/log/LogLevel.java new file mode 100644 index 0000000..75033fa --- /dev/null +++ b/src/main/java/io/logdash/sdk/log/LogLevel.java @@ -0,0 +1,64 @@ +package io.logdash.sdk.log; + +/** + * Defines the severity levels for log messages in Logdash. + * + *

Log levels are ordered from most severe ({@link #ERROR}) to least severe ({@link #SILLY}). + * Each level corresponds to a specific string value used in the Logdash API. + */ +public enum LogLevel { + /** + * Error level for application errors and exceptions + */ + ERROR("error"), + + /** + * Warning level for potentially harmful situations + */ + WARN("warning"), + + /** + * Informational level for general application flow + */ + INFO("info"), + + /** + * HTTP level for HTTP request/response logging + */ + HTTP("http"), + + /** + * Verbose level for detailed operational information + */ + VERBOSE("verbose"), + + /** + * Debug level for detailed diagnostic information + */ + DEBUG("debug"), + + /** + * Silly level for very detailed trace information + */ + SILLY("silly"); + + private final String value; + + LogLevel(String value) { + this.value = value; + } + + /** + * Returns the string value used in the Logdash API. + * + * @return the API string representation of this log level + */ + public String getValue() { + return value; + } + + @Override + public String toString() { + return value; + } +} diff --git a/src/main/java/io/logdash/sdk/log/LogdashLogger.java b/src/main/java/io/logdash/sdk/log/LogdashLogger.java new file mode 100644 index 0000000..385dc69 --- /dev/null +++ b/src/main/java/io/logdash/sdk/log/LogdashLogger.java @@ -0,0 +1,303 @@ +package io.logdash.sdk.log; + +import io.logdash.sdk.config.LogdashConfig; +import io.logdash.sdk.transport.LogdashTransport; + +import java.time.OffsetDateTime; +import java.time.format.DateTimeFormatter; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Primary interface for sending structured log messages to Logdash platform. + * + *

Provides convenient methods for logging at different severity levels with optional structured + * context data. All log entries are sent asynchronously and are thread-safe for high-throughput + * scenarios. + * + *

Example usage: + * + *

{@code
+ * logger.info("User logged in successfully");
+ * logger.error("Database connection failed", Map.of("userId", 123, "retries", 3));
+ * logger.debug("Processing request", Map.of("requestId", "req-456"));
+ * }
+ */ +public final class LogdashLogger { + + private static final DateTimeFormatter CONSOLE_FORMATTER = DateTimeFormatter.ofPattern( + "yyyy-MM-dd'T'HH:mm:ss.SSSXXX"); + private static final String RESET = "\u001B[0m"; + private static final String RED = "\u001B[31m"; + private static final String YELLOW = "\u001B[33m"; + private static final String BLUE = "\u001B[34m"; + private static final String GREEN = "\u001B[32m"; + private static final String PURPLE = "\u001B[35m"; + private static final String CYAN = "\u001B[36m"; + private static final String GRAY = "\u001B[90m"; + + private final LogdashConfig config; + private final LogdashTransport transport; + private final AtomicLong sequenceNumber = new AtomicLong(0); + + /** + * Creates a new logger instance with the specified configuration and transport. + * + *

Note: This constructor is internal to the SDK. Use {@link + * io.logdash.sdk.Logdash#logger()} to obtain an instance. + * + * @param config the SDK configuration + * @param transport the transport for sending log entries + */ + public LogdashLogger(LogdashConfig config, LogdashTransport transport) { + this.config = config; + this.transport = transport; + } + + /** + * Logs an error message indicating a serious problem. + * + * @param message the error message + */ + public void error(String message) { + log(LogLevel.ERROR, message, Map.of()); + } + + /** + * Logs an error message with additional structured context. + * + * @param message the error message + * @param context additional context data as key-value pairs + */ + public void error(String message, Map context) { + log(LogLevel.ERROR, message, context); + } + + /** + * Logs a warning message indicating a potentially harmful situation. + * + * @param message the warning message + */ + public void warn(String message) { + log(LogLevel.WARN, message, Map.of()); + } + + /** + * Logs a warning message with additional structured context. + * + * @param message the warning message + * @param context additional context data as key-value pairs + */ + public void warn(String message, Map context) { + log(LogLevel.WARN, message, context); + } + + /** + * Logs an informational message about normal application flow. + * + * @param message the informational message + */ + public void info(String message) { + log(LogLevel.INFO, message, Map.of()); + } + + /** + * Logs an informational message with additional structured context. + * + * @param message the informational message + * @param context additional context data as key-value pairs + */ + public void info(String message, Map context) { + log(LogLevel.INFO, message, context); + } + + /** + * Logs an HTTP-related message (requests, responses, etc.). + * + * @param message the HTTP-related message + */ + public void http(String message) { + log(LogLevel.HTTP, message, Map.of()); + } + + /** + * Logs an HTTP-related message with additional structured context. + * + * @param message the HTTP-related message + * @param context additional context data as key-value pairs + */ + public void http(String message, Map context) { + log(LogLevel.HTTP, message, context); + } + + /** + * Logs a verbose message with detailed operational information. + * + * @param message the verbose message + */ + public void verbose(String message) { + log(LogLevel.VERBOSE, message, Map.of()); + } + + /** + * Logs a verbose message with additional structured context. + * + * @param message the verbose message + * @param context additional context data as key-value pairs + */ + public void verbose(String message, Map context) { + log(LogLevel.VERBOSE, message, context); + } + + /** + * Logs a debug message with detailed diagnostic information. + * + * @param message the debug message + */ + public void debug(String message) { + log(LogLevel.DEBUG, message, Map.of()); + } + + /** + * Logs a debug message with additional structured context. + * + * @param message the debug message + * @param context additional context data as key-value pairs + */ + public void debug(String message, Map context) { + log(LogLevel.DEBUG, message, context); + } + + /** + * Logs a silly/trace level message with very detailed information. + * + * @param message the trace-level message + */ + public void silly(String message) { + log(LogLevel.SILLY, message, Map.of()); + } + + /** + * Logs a silly/trace level message with additional structured context. + * + * @param message the trace-level message + * @param context additional context data as key-value pairs + */ + public void silly(String message, Map context) { + log(LogLevel.SILLY, message, context); + } + + private long getNextSequenceNumber() { + long current, next; + do { + current = sequenceNumber.get(); + next = current == Long.MAX_VALUE ? 1 : current + 1; + } while (!sequenceNumber.compareAndSet(current, next)); + return next; + } + + private void log(LogLevel level, String message, Map context) { + var finalMessage = buildFinalMessage(message, context); + var logEntry = new LogEntry(finalMessage, level, getNextSequenceNumber()); + + if (config.enableConsoleOutput()) { + printToConsole(message, level, context); + } + + transport.sendLog(logEntry); + } + + private String buildFinalMessage(String message, Map context) { + if (context.isEmpty()) { + return message; + } + + var contextJson = buildContextJson(context); + return message + " " + contextJson; + } + + private String buildContextJson(Map context) { + if (context.isEmpty()) { + return "{}"; + } + + var sb = new StringBuilder(context.size() * 32); + sb.append("{"); + + var iterator = context.entrySet().iterator(); + while (iterator.hasNext()) { + var entry = iterator.next(); + sb.append("\"").append(escapeJsonString(entry.getKey())).append("\":"); + sb.append(formatJsonValue(entry.getValue())); + + if (iterator.hasNext()) { + sb.append(","); + } + } + + sb.append("}"); + return sb.toString(); + } + + private String formatJsonValue(Object value) { + if (value == null) { + return "null"; + } + if (value instanceof String str) { + return "\"" + escapeJsonString(str) + "\""; + } + if (value instanceof Number || value instanceof Boolean) { + return value.toString(); + } + return "\"" + escapeJsonString(String.valueOf(value)) + "\""; + } + + private String escapeJsonString(String str) { + return str.replace("\\", "\\\\") + .replace("\"", "\\\"") + .replace("\n", "\\n") + .replace("\r", "\\r") + .replace("\t", "\\t"); + } + + private void printToConsole(String originalMessage, LogLevel level, Map context) { + var timestamp = OffsetDateTime.now().format(CONSOLE_FORMATTER); + var levelColor = getLevelColor(level); + var levelStr = level.getValue(); + + var contextStr = context.isEmpty() ? "" : " " + formatContext(context); + + System.out.printf( + "%s [%s%s%s] %s%s%n", timestamp, levelColor, levelStr, RESET, originalMessage, contextStr); + } + + private String formatContext(Map context) { + if (context.isEmpty()) { + return ""; + } + + var sb = new StringBuilder("{"); + var first = true; + for (var entry : context.entrySet()) { + if (!first) { + sb.append(", "); + } + sb.append(entry.getKey()).append("=").append(entry.getValue()); + first = false; + } + sb.append("}"); + return sb.toString(); + } + + private String getLevelColor(LogLevel level) { + return switch (level) { + case ERROR -> RED; + case WARN -> YELLOW; + case INFO -> BLUE; + case HTTP -> GREEN; + case VERBOSE -> PURPLE; + case DEBUG -> CYAN; + case SILLY -> GRAY; + }; + } +} diff --git a/src/main/java/io/logdash/sdk/metrics/LogdashMetrics.java b/src/main/java/io/logdash/sdk/metrics/LogdashMetrics.java new file mode 100644 index 0000000..f7c8731 --- /dev/null +++ b/src/main/java/io/logdash/sdk/metrics/LogdashMetrics.java @@ -0,0 +1,73 @@ +package io.logdash.sdk.metrics; + +import io.logdash.sdk.config.LogdashConfig; +import io.logdash.sdk.transport.LogdashTransport; + +/** + * Primary interface for sending custom metrics to Logdash platform. + * + *

Provides convenient methods for common metric operations including counters, gauges, and + * custom measurements. All operations are asynchronous and thread-safe. + * + *

Example usage: + * + *

{@code
+ * metrics.set("active_users", 150);
+ * metrics.mutate("response_time_ms", -50);
+ * }
+ */ +public final class LogdashMetrics { + + private final LogdashConfig config; + private final LogdashTransport transport; + + /** + * Creates a new metrics instance with the specified configuration and transport. + * + *

Note: This constructor is internal to the SDK. Use {@link + * io.logdash.sdk.Logdash#metrics()} to obtain an instance. + * + * @param config the SDK configuration + * @param transport the transport for sending metrics + */ + public LogdashMetrics(LogdashConfig config, LogdashTransport transport) { + this.config = config; + this.transport = transport; + } + + /** + * Sets a metric to a specific absolute value. + * + *

Use this for gauge-type metrics where you want to record the current state or level of + * something. + * + * @param name the metric name (e.g., "memory_usage_mb") + * @param value the absolute value to set + */ + public void set(String name, Number value) { + var metric = new MetricEntry(name, value, MetricType.SET); + sendMetric(metric); + } + + /** + * Changes a metric by the specified delta value. + * + *

Positive values increase the metric, negative values decrease it. This is the most flexible + * method for counter-type metrics. + * + * @param name the metric name + * @param delta the change amount (positive or negative) + */ + public void mutate(String name, Number delta) { + var metric = new MetricEntry(name, delta, MetricType.MUTATE); + sendMetric(metric); + } + + private void sendMetric(MetricEntry metric) { + if (config.enableVerboseLogging()) { + System.out.printf( + "Metric: %s %s %s%n", metric.operation().getValue(), metric.name(), metric.value()); + } + transport.sendMetric(metric); + } +} diff --git a/src/main/java/io/logdash/sdk/metrics/MetricEntry.java b/src/main/java/io/logdash/sdk/metrics/MetricEntry.java new file mode 100644 index 0000000..2edce93 --- /dev/null +++ b/src/main/java/io/logdash/sdk/metrics/MetricEntry.java @@ -0,0 +1,36 @@ +package io.logdash.sdk.metrics; + +/** + * Immutable data container representing a metric entry to be sent to Logdash. + * + *

A metric entry consists of: + * + *

    + *
  • A unique name identifying the metric (e.g., "http_requests_total") + *
  • A numeric value representing the measurement + *
  • An operation type defining how the metric should be processed + *
+ * + * @param name the metric identifier, must not be null or blank + * @param value the numeric value, must not be null + * @param operation the type of operation to perform, must not be null + */ +public record MetricEntry(String name, Number value, MetricType operation) { + + /** + * Validates the metric entry parameters during construction. + * + * @throws IllegalArgumentException if any parameter is null or invalid + */ + public MetricEntry { + if (name == null || name.isBlank()) { + throw new IllegalArgumentException("Metric name cannot be null or blank"); + } + if (value == null) { + throw new IllegalArgumentException("Metric value cannot be null"); + } + if (operation == null) { + throw new IllegalArgumentException("Metric operation cannot be null"); + } + } +} diff --git a/src/main/java/io/logdash/sdk/metrics/MetricType.java b/src/main/java/io/logdash/sdk/metrics/MetricType.java new file mode 100644 index 0000000..6e2ccd9 --- /dev/null +++ b/src/main/java/io/logdash/sdk/metrics/MetricType.java @@ -0,0 +1,45 @@ +package io.logdash.sdk.metrics; + +/** + * Defines the types of operations that can be performed on metrics in Logdash. + * + *

These operations determine how metric values are processed by the Logdash platform: + * + *

    + *
  • {@link #SET} - Replaces the current metric value with the provided value + *
  • {@link #MUTATE} - Modifies the current metric value by adding the provided delta + *
+ */ +public enum MetricType { + /** + * Set operation - replaces the current metric value with the provided value. Use for gauge-type + * metrics where you want to record absolute values. + */ + SET("set"), + + /** + * Mutate operation - modifies the current metric value by the provided delta. Use for + * counter-type metrics where you want to increment/decrement values. + */ + MUTATE("change"); + + private final String value; + + MetricType(String value) { + this.value = value; + } + + /** + * Returns the string value used in the Logdash API. + * + * @return the API string representation of this metric operation + */ + public String getValue() { + return value; + } + + @Override + public String toString() { + return value; + } +} diff --git a/src/main/java/io/logdash/sdk/transport/HttpTransport.java b/src/main/java/io/logdash/sdk/transport/HttpTransport.java new file mode 100644 index 0000000..38082af --- /dev/null +++ b/src/main/java/io/logdash/sdk/transport/HttpTransport.java @@ -0,0 +1,408 @@ +package io.logdash.sdk.transport; + +import io.logdash.sdk.config.LogdashConfig; +import io.logdash.sdk.exception.LogdashException; +import io.logdash.sdk.log.LogEntry; +import io.logdash.sdk.metrics.MetricEntry; +import io.logdash.sdk.util.JsonSerializer; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.http.HttpClient; +import java.net.http.HttpRequest; +import java.net.http.HttpResponse; +import java.time.Duration; +import java.util.Optional; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; + +/** + * High-performance HTTP transport implementation for sending observability data to Logdash + * platform. + * + *

This transport is optimized for throughput and low latency with: + * + *

    + *
  • Non-blocking concurrency control using tryAcquire + *
  • Efficient request tracking with CountDownLatch + *
  • Optimized thread pool sizing + *
  • Fast retry logic with exponential backoff + *
  • Minimal synchronization overhead + *
+ */ +public final class HttpTransport implements LogdashTransport, AutoCloseable { + + private static final String LOGS_ENDPOINT = "/logs"; + private static final String METRICS_ENDPOINT = "/metrics"; + + private static final String API_KEY_HEADER = "project-api-key"; + private static final String USER_AGENT = "logdash-java-sdk/0.1.0"; + private static final String CONTENT_TYPE = "application/json"; + + private final LogdashConfig config; + private final JsonSerializer serializer; + private final HttpClient httpClient; + private final ExecutorService executorService; + private final Semaphore rateLimiter; + private final AtomicBoolean closed = new AtomicBoolean(false); + private final AtomicBoolean shutdownInitiated = new AtomicBoolean(false); + private final AtomicLong requestCounter = new AtomicLong(0); + private final AtomicLong activeRequests = new AtomicLong(0); + + private volatile CountDownLatch pendingRequestsLatch = new CountDownLatch(0); + + /** + * Creates a new HTTP transport with the specified configuration. + * + * @param config the configuration for HTTP operations + * @throws LogdashException if transport initialization fails + */ + public HttpTransport(LogdashConfig config) { + this.config = config; + this.serializer = new JsonSerializer(); + this.rateLimiter = new Semaphore(config.maxConcurrentRequests(), true); + this.executorService = createOptimizedExecutorService(); + this.httpClient = createOptimizedHttpClient(); + + if (config.enableVerboseLogging()) { + System.out.println("HTTP transport initialized for " + config.baseUrl()); + } + } + + @Override + public CompletableFuture sendLog(LogEntry logEntry) { + if (shutdownInitiated.get() || closed.get()) { + return CompletableFuture.completedFuture(null); + } + + try { + String json = serializer.serialize(logEntry); + return sendWithRetry(LOGS_ENDPOINT, json, "POST"); + } catch (Exception e) { + handleTransportError("Failed to serialize log entry", e); + return CompletableFuture.completedFuture(null); + } + } + + @Override + public CompletableFuture sendMetric(MetricEntry metricEntry) { + if (shutdownInitiated.get() || closed.get()) { + return CompletableFuture.completedFuture(null); + } + + try { + String json = serializer.serialize(metricEntry); + return sendWithRetry(METRICS_ENDPOINT, json, "PUT"); + } catch (Exception e) { + handleTransportError("Failed to serialize metric entry", e); + return CompletableFuture.completedFuture(null); + } + } + + /** + * Sends an HTTP request with optimized retry logic and smart concurrency control. Uses tryAcquire + * with timeout instead of immediate rejection for better throughput. + * + * @param endpoint the API endpoint path + * @param json the JSON payload to send + * @param method the HTTP method (POST or PUT) + * @return a CompletableFuture that completes when the request succeeds or all retries are exhausted + */ + private CompletableFuture sendWithRetry(String endpoint, String json, String method) { + if (shutdownInitiated.get() || closed.get()) { + return CompletableFuture.completedFuture(null); + } + + final long sequenceId = requestCounter.incrementAndGet(); + + return CompletableFuture.runAsync( + () -> { + boolean acquired = false; + try { + acquired = rateLimiter.tryAcquire(200, TimeUnit.MILLISECONDS); + if (!acquired) { + if (config.enableVerboseLogging()) { + System.out.printf( + "Request #%d rejected due to concurrency limit timeout%n", sequenceId); + } + return; + } + + if (shutdownInitiated.get() || closed.get()) { + return; + } + + long currentActive = activeRequests.incrementAndGet(); + if (currentActive == 1) { + pendingRequestsLatch = new CountDownLatch(1); + } + + Exception lastException = null; + for (int attempt = 1; attempt <= config.maxRetries() + 1; attempt++) { + try { + sendHttpRequest(endpoint, json, method, attempt, sequenceId); + return; + } catch (Exception e) { + lastException = e; + if (config.enableVerboseLogging()) { + System.err.printf( + "Request #%d failed: %s (attempt %d/%d)%n", + sequenceId, e.getMessage(), attempt, config.maxRetries() + 1); + } + + if (attempt <= config.maxRetries() && !shutdownInitiated.get() && !closed.get()) { + try { + long baseDelay = config.retryDelayMs(); + long exponentialDelay = baseDelay * (1L << (attempt - 1)); + long maxDelay = Math.min(exponentialDelay, 5000); + long jitter = (long) (Math.random() * 100); + Thread.sleep(maxDelay + jitter); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + return; + } + } + } + } + + handleTransportError( + String.format( + "Request #%d failed permanently after %d attempts", + sequenceId, config.maxRetries() + 1), + lastException); + + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } finally { + if (acquired) { + rateLimiter.release(); + } + + long remaining = activeRequests.decrementAndGet(); + if (remaining == 0) { + pendingRequestsLatch.countDown(); + } + } + }, + executorService); + } + + private void sendHttpRequest( + String endpoint, + String json, + String method, + int attempt, + long sequenceId) throws InterruptedException { + URI uri; + try { + uri = createEndpointUri(endpoint); + } catch (LogdashException e) { + throw e; + } catch (Exception e) { + throw new LogdashException("Failed to create endpoint URI", e); + } + + if (config.enableVerboseLogging()) { + logRequestStart(method, endpoint, sequenceId, json.length(), attempt); + } + + try { + HttpRequest request = HttpRequest.newBuilder() + .uri(uri) + .timeout(Duration.ofMillis(config.requestTimeoutMs())) + .header("Content-Type", CONTENT_TYPE) + .header(API_KEY_HEADER, config.apiKey()) + .header("User-Agent", USER_AGENT) + .method(method, HttpRequest.BodyPublishers.ofString(json)) + .build(); + + HttpResponse response = httpClient.send(request, HttpResponse.BodyHandlers.ofString()); + + if (isSuccessResponse(response.statusCode())) { + if (config.enableVerboseLogging()) { + System.out.printf( + "Successfully sent request #%d to %s (status=%d)%n", + sequenceId, endpoint, response.statusCode()); + } + } else { + throw new LogdashException("HTTP " + response.statusCode() + ": " + response.body()); + } + } catch (IOException e) { + throw new LogdashException("I/O error during HTTP request", e); + } + } + + /** + * Flushes all pending requests efficiently using CountDownLatch. + * + * @return a future that completes when all pending requests finish + */ + public CompletableFuture flush() { + return CompletableFuture.runAsync( + () -> { + try { + CountDownLatch currentLatch = pendingRequestsLatch; + if (currentLatch != null && activeRequests.get() > 0) { + long timeoutMs = Math.min(config.shutdownTimeoutMs(), 5000); + boolean completed = currentLatch.await(timeoutMs, TimeUnit.MILLISECONDS); + + if (completed && config.enableVerboseLogging()) { + System.out.println("All pending requests flushed successfully"); + } else if (!completed && config.enableVerboseLogging()) { + System.out.printf( + "Flush timeout, %d requests still pending%n", activeRequests.get()); + } + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + }, + executorService); + } + + @Override + public Optional> shutdown() { + if (shutdownInitiated.compareAndSet(false, true)) { + return Optional.of( + CompletableFuture.runAsync( + () -> { + try { + if (config.enableVerboseLogging()) { + System.out.println("Initiating HTTP transport shutdown..."); + } + + CountDownLatch currentLatch = pendingRequestsLatch; + long timeoutMs = Math.min(config.shutdownTimeoutMs(), 3000); + + if (currentLatch != null && activeRequests.get() > 0) { + if (config.enableVerboseLogging()) { + System.out.printf( + "Waiting for %d pending HTTP requests...%n", activeRequests.get()); + } + + boolean completed = currentLatch.await(timeoutMs, TimeUnit.MILLISECONDS); + + if (completed && config.enableVerboseLogging()) { + System.out.println("All HTTP requests completed successfully"); + } else if (!completed && config.enableVerboseLogging()) { + System.out.printf( + "Shutdown timeout exceeded, %d requests may be lost%n", + activeRequests.get()); + } + } + + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } finally { + closed.set(true); + } + }, + executorService)); + } + return Optional.of(CompletableFuture.completedFuture(null)); + } + + @Override + public void close() { + shutdownInitiated.set(true); + closed.set(true); + + try { + executorService.shutdown(); + if (!executorService.awaitTermination(500, TimeUnit.MILLISECONDS)) { + if (config.enableVerboseLogging()) { + System.out.println("Forcing HTTP executor shutdown"); + } + executorService.shutdownNow(); + } + + if (config.enableVerboseLogging()) { + System.out.println("HTTP transport closed"); + } + + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + executorService.shutdownNow(); + } + } + + /** + * Creates an optimized thread pool for HTTP operations. Uses more aggressive sizing for better + * throughput. + */ + private ExecutorService createOptimizedExecutorService() { + int corePoolSize = Math.max(4, Runtime.getRuntime().availableProcessors()); + int maxPoolSize = Math.max(config.maxConcurrentRequests(), corePoolSize * 2); + + ThreadPoolExecutor executor = + new ThreadPoolExecutor( + corePoolSize, + maxPoolSize, + 60L, + TimeUnit.SECONDS, + new LinkedBlockingQueue<>(config.maxConcurrentRequests() * 2), + r -> { + Thread t = new Thread(r, "logdash-http-" + System.currentTimeMillis()); + t.setDaemon(true); + return t; + }, + new ThreadPoolExecutor.CallerRunsPolicy()); + + executor.allowCoreThreadTimeOut(true); + return executor; + } + + private HttpClient createOptimizedHttpClient() { + return HttpClient.newBuilder() + .connectTimeout(Duration.ofMillis(Math.min(config.requestTimeoutMs(), 10000))) + .build(); + } + + private URI createEndpointUri(String endpoint) { + try { + String baseUrl = + config.baseUrl().endsWith("/") + ? config.baseUrl().substring(0, config.baseUrl().length() - 1) + : config.baseUrl(); + return new URI(baseUrl + endpoint); + } catch (URISyntaxException e) { + throw new LogdashException("Invalid endpoint URI: " + config.baseUrl() + endpoint, e); + } + } + + private boolean isSuccessResponse(int statusCode) { + return statusCode >= 200 && statusCode < 300; + } + + private String maskApiKey(String apiKey) { + if (apiKey == null || apiKey.length() <= 8) { + return "***"; + } + return apiKey.substring(0, 4) + "..." + apiKey.substring(apiKey.length() - 4); + } + + private void logRequestStart( + String method, String endpoint, long requestId, int bodySize, int attempt) { + System.out.printf( + "Sending HTTP %s request #%d to %s (attempt %d): headers={Content-Type=[%s], %s=[%s]}, bodySize=%d%n", + method, + requestId, + endpoint, + attempt, + CONTENT_TYPE, + API_KEY_HEADER, + maskApiKey(config.apiKey()), + bodySize); + } + + private void handleTransportError(String message, Throwable cause) { + if (config.enableVerboseLogging()) { + System.err.println("Logdash transport error: " + message); + if (cause != null) { + System.err.println("Cause: " + cause.getMessage()); + } + } + } +} diff --git a/src/main/java/io/logdash/sdk/transport/LogdashTransport.java b/src/main/java/io/logdash/sdk/transport/LogdashTransport.java new file mode 100644 index 0000000..407fada --- /dev/null +++ b/src/main/java/io/logdash/sdk/transport/LogdashTransport.java @@ -0,0 +1,52 @@ +package io.logdash.sdk.transport; + +import io.logdash.sdk.log.LogEntry; +import io.logdash.sdk.metrics.MetricEntry; + +import java.util.Optional; +import java.util.concurrent.CompletableFuture; + +/** + * Transport interface for sending observability data to Logdash platform. + * + *

Implementations handle the actual delivery mechanism for logs and metrics, whether through + * HTTP, console output, or other means. All operations are asynchronous to avoid blocking the + * application. + */ +public interface LogdashTransport { + + /** + * Sends a log entry asynchronously to the transport destination. + * + * @param logEntry the log entry to send + * @return a future that completes when the operation finishes + */ + CompletableFuture sendLog(LogEntry logEntry); + + /** + * Sends a metric entry asynchronously to the transport destination. + * + * @param metricEntry the metric entry to send + * @return a future that completes when the operation finishes + */ + CompletableFuture sendMetric(MetricEntry metricEntry); + + /** + * Initiates graceful shutdown of the transport. + * + *

Implementations should ensure all pending operations complete before the returned future + * completes. + * + * @return an optional future for shutdown completion, empty if no cleanup needed + */ + default Optional> shutdown() { + return Optional.empty(); + } + + /** + * Immediately closes the transport and releases all resources. + * + *

This method should be idempotent and safe to call multiple times. + */ + void close(); +} diff --git a/src/main/java/io/logdash/sdk/transport/NoOpTransport.java b/src/main/java/io/logdash/sdk/transport/NoOpTransport.java new file mode 100644 index 0000000..4f0d8d4 --- /dev/null +++ b/src/main/java/io/logdash/sdk/transport/NoOpTransport.java @@ -0,0 +1,108 @@ +package io.logdash.sdk.transport; + +import io.logdash.sdk.config.LogdashConfig; +import io.logdash.sdk.log.LogEntry; +import io.logdash.sdk.metrics.MetricEntry; + +import java.time.OffsetDateTime; +import java.time.format.DateTimeFormatter; +import java.util.Locale; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; + +/** + * No-operation transport that outputs observability data to console only. + * + *

This transport is used as a fallback when no API key is provided or when HTTP transport + * initialization fails. It provides local visibility into logs and metrics without sending data to + * external services. + * + *

Useful for development, testing, and scenarios where external connectivity is not available or + * desired. + */ +public final class NoOpTransport implements LogdashTransport { + + private static final DateTimeFormatter CONSOLE_FORMATTER = DateTimeFormatter.ofPattern( + "yyyy-MM-dd'T'HH:mm:ss.SSSXXX"); + private static final String RESET = "\u001B[0m"; + private static final String CYAN = "\u001B[36m"; + private static final String YELLOW = "\u001B[33m"; + + private final LogdashConfig config; + private volatile boolean closed = false; + + /** + * Creates a new NoOp transport with the specified configuration. + * + * @param config the configuration for console output behavior + */ + public NoOpTransport(LogdashConfig config) { + this.config = config; + if (config.enableVerboseLogging()) { + System.out.println("Logdash NoOp transport initialized (console-only mode)"); + } + } + + @Override + public CompletableFuture sendLog(LogEntry logEntry) { + if (closed) { + return CompletableFuture.completedFuture(null); + } + + if (config.enableConsoleOutput()) { + printLogToConsole(logEntry); + } + + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture sendMetric(MetricEntry metricEntry) { + if (closed) { + return CompletableFuture.completedFuture(null); + } + + if (config.enableConsoleOutput()) { + printMetricToConsole(metricEntry); + } + + return CompletableFuture.completedFuture(null); + } + + @Override + public Optional> shutdown() { + return Optional.empty(); + } + + @Override + public void close() { + closed = true; + if (config.enableVerboseLogging()) { + System.out.println("Logdash NoOp transport closed"); + } + } + + private void printLogToConsole(LogEntry logEntry) { + var timestamp = OffsetDateTime.now().format(CONSOLE_FORMATTER); + System.out.printf( + "%s [%sLOG%s] %s: %s (seq=%d)%n", + timestamp, + CYAN, + RESET, + logEntry.level().getValue().toUpperCase(Locale.ENGLISH), + logEntry.message(), + logEntry.sequenceNumber()); + } + + private void printMetricToConsole(MetricEntry metricEntry) { + var timestamp = OffsetDateTime.now().format(CONSOLE_FORMATTER); + System.out.printf( + "%s [%sMETRIC%s] %s %s = %s%n", + timestamp, + YELLOW, + RESET, + metricEntry.operation().getValue().toUpperCase(Locale.ENGLISH), + metricEntry.name(), + metricEntry.value()); + } +} diff --git a/src/main/java/io/logdash/sdk/util/JsonSerializer.java b/src/main/java/io/logdash/sdk/util/JsonSerializer.java new file mode 100644 index 0000000..a65535d --- /dev/null +++ b/src/main/java/io/logdash/sdk/util/JsonSerializer.java @@ -0,0 +1,124 @@ +package io.logdash.sdk.util; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.MapperFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; +import com.fasterxml.jackson.databind.json.JsonMapper; +import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; +import io.logdash.sdk.log.LogEntry; +import io.logdash.sdk.metrics.MetricEntry; + +import java.util.Map; + +/** + * Internal JSON serializer for converting SDK objects to JSON format. + * + *

This lightweight, dependency-free serializer is specifically designed for Logdash SDK objects. + * It handles proper escaping and formatting according to JSON specification. + * + *

Note: This class is internal to the SDK and should not be used directly by + * client applications. + */ +public final class JsonSerializer { + private static final ObjectMapper OBJECT_MAPPER = createObjectMapper(); + + private static ObjectMapper createObjectMapper() { + return JsonMapper.builder() + .addModule(new JavaTimeModule()) + .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES) + .disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS) + .enable(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY) + .serializationInclusion(JsonInclude.Include.NON_NULL) + .build(); + } + + /** + * Serializes a log entry to JSON format. + * + * @param logEntry the log entry to serialize, must not be null + * @return JSON string representation + */ + public String serialize(LogEntry logEntry) { + try { + var logData = + Map.of( + "message", sanitizeString(logEntry.message()), + "level", logEntry.level().getValue(), + "createdAt", logEntry.createdAt(), + "sequenceNumber", logEntry.sequenceNumber()); + return OBJECT_MAPPER.writeValueAsString(logData); + } catch (JsonProcessingException e) { + return createFallbackLogJson(logEntry, e); + } + } + + /** + * Serializes a metric entry to JSON format. + * + * @param metricEntry the metric entry to serialize, must not be null + * @return JSON string representation + */ + public String serialize(MetricEntry metricEntry) { + try { + var metricData = + Map.of( + "name", sanitizeString(metricEntry.name()), + "value", sanitizeNumber(metricEntry.value()), + "operation", metricEntry.operation().getValue()); + return OBJECT_MAPPER.writeValueAsString(metricData); + } catch (JsonProcessingException e) { + return createFallbackMetricJson(metricEntry, e); + } + } + + private String sanitizeString(String input) { + if (input == null) return ""; + return input.replaceAll("[\\p{Cntrl}&&[^\t\n\r]]", ""); + } + + private Number sanitizeNumber(Number number) { + if (number instanceof Double d) { + if (Double.isNaN(d) || Double.isInfinite(d)) { + return 0.0; + } + } + if (number instanceof Float f) { + if (Float.isNaN(f) || Float.isInfinite(f)) { + return 0.0f; + } + } + return number; + } + + private String createFallbackLogJson(LogEntry logEntry, Exception error) { + return String.format( + "{\"message\":\"%s\",\"level\":\"%s\",\"createdAt\":\"%s\",\"sequenceNumber\":%d," + + "\"_error\":\"Serialization failed: %s\"}", + escapeJsonString(logEntry.message()), + logEntry.level().getValue(), + logEntry.createdAt(), + logEntry.sequenceNumber(), + escapeJsonString(error.getMessage())); + } + + private String createFallbackMetricJson(MetricEntry metricEntry, Exception error) { + return String.format( + "{\"name\":\"%s\",\"value\":0,\"operation\":\"%s\",\"_error\":\"Serialization failed: %s\"}", + escapeJsonString(metricEntry.name()), + metricEntry.operation().getValue(), + escapeJsonString(error.getMessage())); + } + + private String escapeJsonString(String input) { + if (input == null) return ""; + return input + .replace("\\", "\\\\") + .replace("\"", "\\\"") + .replace("\n", "\\n") + .replace("\r", "\\r") + .replace("\t", "\\t"); + } +} diff --git a/src/test/java/io/logdash/sdk/LogdashTest.java b/src/test/java/io/logdash/sdk/LogdashTest.java new file mode 100644 index 0000000..edb7fde --- /dev/null +++ b/src/test/java/io/logdash/sdk/LogdashTest.java @@ -0,0 +1,229 @@ +package io.logdash.sdk; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import java.util.Map; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +import static org.assertj.core.api.Assertions.*; + +class LogdashTest { + + @Test + void should_create_sdk_with_valid_configuration() { + assertThatNoException() + .isThrownBy( + () -> { + try (var sdk = + Logdash.builder() + .apiKey("test-api-key") + .baseUrl("https://api.logdash.io") + .build()) { + + assertThat(sdk.logger()).isNotNull(); + assertThat(sdk.metrics()).isNotNull(); + assertThat(sdk.config().apiKey()).isEqualTo("test-api-key"); + assertThat(sdk.config().baseUrl()).isEqualTo("https://api.logdash.io"); + } + }); + } + + @Test + void should_create_sdk_with_default_configuration() { + // Act + try (var sdk = Logdash.create("test-key")) { + // Assert + assertThat(sdk.config().apiKey()).isEqualTo("test-key"); + assertThat(sdk.config().baseUrl()).isEqualTo("https://api.logdash.io"); + assertThat(sdk.config().enableConsoleOutput()).isTrue(); + assertThat(sdk.config().enableVerboseLogging()).isFalse(); + } + } + + @Test + void should_create_sdk_without_api_key_and_use_noop_transport() { + // Act + try (var sdk = Logdash.builder() + .apiKey("") + .enableVerboseLogging(true) + .build()) { + + // Assert + assertThat(sdk.logger()).isNotNull(); + assertThat(sdk.metrics()).isNotNull(); + } + } + + @Test + void should_throw_exception_when_accessing_closed_sdk() { + // Arrange + var sdk = Logdash.create("test-key"); + sdk.close(); + + // Act & Assert + assertThatThrownBy(sdk::logger) + .isInstanceOf(IllegalStateException.class) + .hasMessageContaining("closed"); + + assertThatThrownBy(sdk::metrics) + .isInstanceOf(IllegalStateException.class) + .hasMessageContaining("closed"); + } + + @Test + @Timeout(value = 10, unit = TimeUnit.SECONDS) + void should_handle_flush_operation() { + // Arrange + try (var sdk = Logdash.builder() + .apiKey("test-key") + .enableVerboseLogging(false) + .build()) { + + sdk.logger().info("Test message"); + sdk.metrics().set("test_metric", 42); + + // Act & Assert + assertThatNoException().isThrownBy(sdk::flush); + } + } + + @Test + void should_configure_all_builder_options() { + // Act + try (var sdk = + Logdash.builder() + .apiKey("custom-key") + .baseUrl("https://custom.logdash.io") + .enableConsoleOutput(false) + .enableVerboseLogging(true) + .maxRetries(5) + .retryDelayMs(2000L) + .requestTimeoutMs(20000L) + .shutdownTimeoutMs(15000L) + .maxConcurrentRequests(50) + .build()) { + + // Assert + var config = sdk.config(); + assertThat(config.apiKey()).isEqualTo("custom-key"); + assertThat(config.baseUrl()).isEqualTo("https://custom.logdash.io"); + assertThat(config.enableConsoleOutput()).isFalse(); + assertThat(config.enableVerboseLogging()).isTrue(); + assertThat(config.maxRetries()).isEqualTo(5); + assertThat(config.retryDelayMs()).isEqualTo(2000L); + assertThat(config.requestTimeoutMs()).isEqualTo(20000L); + assertThat(config.shutdownTimeoutMs()).isEqualTo(15000L); + assertThat(config.maxConcurrentRequests()).isEqualTo(50); + } + } + + @Test + void should_allow_multiple_close_calls_safely() { + // Arrange + var sdk = Logdash.create("test-key"); + + // Act & Assert + assertThatNoException() + .isThrownBy( + () -> { + sdk.close(); + sdk.close(); // Second close should be safe + }); + } + + @Test + void should_work_with_try_with_resources() { + // Act & Assert + assertThatNoException() + .isThrownBy( + () -> { + try (var sdk = Logdash.create("test-key")) { + sdk.logger().info("Test message", Map.of("key", "value")); + sdk.metrics().mutate("test_counter", 1); + } + // Auto-close should work without issues + }); + } + + @Test + void should_handle_builder_with_all_default_values() { + try (var sdk = Logdash.builder().build()) { + var config = sdk.config(); + + assertThat(config.apiKey()).isNull(); + assertThat(config.baseUrl()).isEqualTo("https://api.logdash.io"); + assertThat(config.enableConsoleOutput()).isTrue(); + assertThat(config.enableVerboseLogging()).isFalse(); + assertThat(config.maxRetries()).isEqualTo(3); + assertThat(config.retryDelayMs()).isEqualTo(500L); + assertThat(config.requestTimeoutMs()).isEqualTo(15000L); + assertThat(config.shutdownTimeoutMs()).isEqualTo(10000L); + assertThat(config.maxConcurrentRequests()).isEqualTo(20); + } + } + + @Test + void should_handle_runtime_shutdown_hook() { + // Test that shutdown hook is properly registered and removed + var sdk = Logdash.create("test-key"); + + // Verify SDK is operational + assertThat(sdk.logger()).isNotNull(); + assertThat(sdk.metrics()).isNotNull(); + + // Close should not throw exception + assertThatNoException().isThrownBy(sdk::close); + } + + @Test + void should_maintain_thread_safety_during_concurrent_access() throws InterruptedException { + try (var sdk = Logdash.builder() + .apiKey("concurrent-test") + .enableConsoleOutput(false) + .build()) { + + var executor = Executors.newFixedThreadPool(10); + var latch = new CountDownLatch(50); + var exceptions = new ConcurrentLinkedQueue(); + + // Submit concurrent operations + for (int i = 0; i < 50; i++) { + final int iteration = i; + executor.submit( + () -> { + try { + sdk.logger().info("Concurrent message " + iteration); + sdk.metrics().mutate("concurrent_counter", 1); + } catch (Exception e) { + exceptions.add(e); + } finally { + latch.countDown(); + } + }); + } + + // Wait for all operations to complete + assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue(); + assertThat(exceptions).isEmpty(); + + executor.shutdown(); + assertThat(executor.awaitTermination(5, TimeUnit.SECONDS)).isTrue(); + } + } + + @Test + void should_handle_flush_with_no_pending_operations() { + try (var sdk = Logdash.builder() + .apiKey("test-key") + .enableConsoleOutput(false) + .build()) { + + // Flush when no operations are pending + assertThatNoException().isThrownBy(sdk::flush); + } + } +} diff --git a/src/test/java/io/logdash/sdk/config/LogdashConfigBuilderTest.java b/src/test/java/io/logdash/sdk/config/LogdashConfigBuilderTest.java new file mode 100644 index 0000000..34e4170 --- /dev/null +++ b/src/test/java/io/logdash/sdk/config/LogdashConfigBuilderTest.java @@ -0,0 +1,79 @@ +package io.logdash.sdk.config; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatNoException; + +class LogdashConfigBuilderTest { + + @Test + void should_chain_builder_methods_correctly() { + // Act + var builder = + LogdashConfig.builder() + .apiKey("test") + .baseUrl("https://test.com") + .enableConsoleOutput(false) + .enableVerboseLogging(true) + .maxRetries(5) + .retryDelayMs(1000L) + .requestTimeoutMs(5000L) + .shutdownTimeoutMs(3000L) + .maxConcurrentRequests(20); + + // Assert + assertThat(builder).isNotNull(); + + var config = builder.build(); + assertThat(config.apiKey()).isEqualTo("test"); + assertThat(config.baseUrl()).isEqualTo("https://test.com"); + assertThat(config.enableConsoleOutput()).isFalse(); + assertThat(config.enableVerboseLogging()).isTrue(); + assertThat(config.maxRetries()).isEqualTo(5); + assertThat(config.retryDelayMs()).isEqualTo(1000L); + assertThat(config.requestTimeoutMs()).isEqualTo(5000L); + assertThat(config.shutdownTimeoutMs()).isEqualTo(3000L); + assertThat(config.maxConcurrentRequests()).isEqualTo(20); + } + + @ParameterizedTest + @ValueSource( + strings = { + "https://api.example.com", + "http://localhost:8080", + "https://subdomain.example.org/path", + "relative-path" + }) + void should_accept_various_url_formats(String url) { + // Act & Assert + assertThatNoException().isThrownBy(() -> LogdashConfig.builder().baseUrl(url).build()); + } + + @Test + void should_validate_extreme_timeout_values() { + // Very large values should be accepted + assertThatNoException() + .isThrownBy( + () -> + LogdashConfig.builder() + .requestTimeoutMs(Long.MAX_VALUE) + .shutdownTimeoutMs(Long.MAX_VALUE) + .retryDelayMs(Long.MAX_VALUE) + .build()); + + // Boundary values + assertThatNoException() + .isThrownBy( + () -> + LogdashConfig.builder() + .requestTimeoutMs(1L) + .shutdownTimeoutMs(1L) + .retryDelayMs(0L) + .maxRetries(0) + .maxConcurrentRequests(1) + .build()); + } +} diff --git a/src/test/java/io/logdash/sdk/config/LogdashConfigTest.java b/src/test/java/io/logdash/sdk/config/LogdashConfigTest.java new file mode 100644 index 0000000..f1cebd4 --- /dev/null +++ b/src/test/java/io/logdash/sdk/config/LogdashConfigTest.java @@ -0,0 +1,259 @@ +package io.logdash.sdk.config; + +import io.logdash.sdk.exception.LogdashException; +import org.junit.jupiter.api.Test; + +import static org.assertj.core.api.Assertions.*; + +class LogdashConfigTest { + + @Test + void should_create_config_with_valid_parameters() { + // Act + var config = + LogdashConfig.builder() + .apiKey("test-key") + .baseUrl("https://api.logdash.io") + .enableConsoleOutput(true) + .enableVerboseLogging(false) + .maxRetries(3) + .retryDelayMs(1000L) + .requestTimeoutMs(15000L) + .shutdownTimeoutMs(10000L) + .maxConcurrentRequests(10) + .build(); + + // Assert + assertThat(config.apiKey()).isEqualTo("test-key"); + assertThat(config.baseUrl()).isEqualTo("https://api.logdash.io"); + assertThat(config.enableConsoleOutput()).isTrue(); + assertThat(config.enableVerboseLogging()).isFalse(); + assertThat(config.maxRetries()).isEqualTo(3); + assertThat(config.retryDelayMs()).isEqualTo(1000L); + assertThat(config.requestTimeoutMs()).isEqualTo(15000L); + assertThat(config.shutdownTimeoutMs()).isEqualTo(10000L); + assertThat(config.maxConcurrentRequests()).isEqualTo(10); + } + + @Test + void should_use_default_values() { + // Act + var config = LogdashConfig.builder().build(); + + // Assert + assertThat(config.apiKey()).isNull(); + assertThat(config.baseUrl()).isEqualTo("https://api.logdash.io"); + assertThat(config.enableConsoleOutput()).isTrue(); + assertThat(config.enableVerboseLogging()).isFalse(); + assertThat(config.maxRetries()).isEqualTo(3); + assertThat(config.retryDelayMs()).isEqualTo(500L); + assertThat(config.requestTimeoutMs()).isEqualTo(15000L); + assertThat(config.shutdownTimeoutMs()).isEqualTo(10000L); + assertThat(config.maxConcurrentRequests()).isEqualTo(10); + } + + @Test + void should_throw_exception_for_null_base_url() { + assertThatThrownBy(() -> + LogdashConfig.builder() + .baseUrl(null) + .build() + ).isInstanceOf(LogdashException.class) + .hasMessageContaining("Base URL cannot be null or blank"); + } + + @Test + void should_throw_exception_for_blank_base_url() { + assertThatThrownBy(() -> + LogdashConfig.builder() + .baseUrl(" ") + .build() + ).isInstanceOf(LogdashException.class) + .hasMessageContaining("Base URL cannot be null or blank"); + } + + @Test + void should_accept_relative_urls_as_valid() { + assertThatNoException().isThrownBy(() -> + LogdashConfig.builder() + .baseUrl("not-a-valid-url") + .build() + ); + } + + @Test + void should_throw_exception_for_invalid_uri_syntax() { + assertThatThrownBy(() -> + LogdashConfig.builder() + .baseUrl("ht tp://invalid url with spaces") + .build() + ).isInstanceOf(LogdashException.class) + .hasMessageContaining("Invalid base URL"); + } + + @Test + void should_throw_exception_for_negative_max_retries() { + assertThatThrownBy(() -> + LogdashConfig.builder() + .maxRetries(-1) + .build() + ).isInstanceOf(LogdashException.class) + .hasMessageContaining("Max retries cannot be negative"); + } + + @Test + void should_throw_exception_for_negative_retry_delay() { + assertThatThrownBy(() -> + LogdashConfig.builder() + .retryDelayMs(-1L) + .build() + ).isInstanceOf(LogdashException.class) + .hasMessageContaining("Retry delay cannot be negative"); + } + + @Test + void should_throw_exception_for_zero_or_negative_request_timeout() { + assertThatThrownBy(() -> + LogdashConfig.builder() + .requestTimeoutMs(0L) + .build() + ).isInstanceOf(LogdashException.class) + .hasMessageContaining("Request timeout must be positive"); + + assertThatThrownBy(() -> + LogdashConfig.builder() + .requestTimeoutMs(-1000L) + .build() + ).isInstanceOf(LogdashException.class) + .hasMessageContaining("Request timeout must be positive"); + } + + @Test + void should_throw_exception_for_zero_or_negative_shutdown_timeout() { + assertThatThrownBy(() -> + LogdashConfig.builder() + .shutdownTimeoutMs(0L) + .build() + ).isInstanceOf(LogdashException.class) + .hasMessageContaining("Shutdown timeout must be positive"); + } + + @Test + void should_throw_exception_for_zero_or_negative_max_concurrent_requests() { + assertThatThrownBy(() -> + LogdashConfig.builder() + .maxConcurrentRequests(0) + .build() + ).isInstanceOf(LogdashException.class) + .hasMessageContaining("Max concurrent requests must be positive"); + } + + @Test + void should_accept_zero_max_retries() { + assertThatNoException().isThrownBy(() -> + LogdashConfig.builder() + .maxRetries(0) + .build() + ); + } + + @Test + void should_accept_zero_retry_delay() { + assertThatNoException().isThrownBy(() -> + LogdashConfig.builder() + .retryDelayMs(0L) + .build() + ); + } + + @Test + void should_validate_all_parameters_in_constructor() { + assertThatThrownBy(() -> + new LogdashConfig( + "test-key", + "", + true, + false, + 3, + 1000L, + 15000L, + 10000L, + 10 + ) + ).isInstanceOf(LogdashException.class) + .hasMessageContaining("Base URL cannot be null or blank"); + } + + @Test + void should_create_valid_urls_for_common_protocols() { + assertThatNoException() + .isThrownBy( + () -> { + LogdashConfig.builder().baseUrl("https://api.logdash.io").build(); + LogdashConfig.builder().baseUrl("http://localhost:8080").build(); + LogdashConfig.builder().baseUrl("https://custom-domain.com/api/v1").build(); + }); + } + + @Test + void should_validate_url_schemes_correctly() { + // Act & Assert + assertThatNoException() + .isThrownBy( + () -> { + LogdashConfig.builder().baseUrl("https://api.logdash.io").build(); + LogdashConfig.builder().baseUrl("http://localhost:8080").build(); + LogdashConfig.builder().baseUrl("HTTP://EXAMPLE.COM").build(); + LogdashConfig.builder().baseUrl("HTTPS://EXAMPLE.COM").build(); + }); + } + + @Test + void should_handle_edge_case_timeout_values() { + // Act & Assert + assertThatNoException() + .isThrownBy( + () -> { + LogdashConfig.builder() + .requestTimeoutMs(1L) + .shutdownTimeoutMs(1L) + .retryDelayMs(0L) + .maxRetries(0) + .maxConcurrentRequests(1) + .build(); + }); + } + + @Test + void should_validate_concurrent_requests_limits() { + assertThatThrownBy(() -> + LogdashConfig.builder() + .maxConcurrentRequests(-1) + .build() + ).isInstanceOf(LogdashException.class) + .hasMessageContaining("Max concurrent requests must be positive"); + } + + @Test + void should_handle_builder_method_chaining() { + // Act + var config = + LogdashConfig.builder() + .apiKey("key") + .baseUrl("https://test.com") + .enableConsoleOutput(false) + .enableVerboseLogging(true) + .maxRetries(5) + .retryDelayMs(2000L) + .requestTimeoutMs(30000L) + .shutdownTimeoutMs(15000L) + .maxConcurrentRequests(50); + + // Assert + assertThatNoException().isThrownBy(config::build); + + var builtConfig = config.build(); + assertThat(builtConfig.apiKey()).isEqualTo("key"); + assertThat(builtConfig.maxConcurrentRequests()).isEqualTo(50); + } +} diff --git a/src/test/java/io/logdash/sdk/error/ErrorHandlingTest.java b/src/test/java/io/logdash/sdk/error/ErrorHandlingTest.java new file mode 100644 index 0000000..041c549 --- /dev/null +++ b/src/test/java/io/logdash/sdk/error/ErrorHandlingTest.java @@ -0,0 +1,141 @@ +package io.logdash.sdk.error; + +import io.logdash.sdk.Logdash; +import io.logdash.sdk.config.LogdashConfig; +import io.logdash.sdk.exception.LogdashException; +import io.logdash.sdk.log.LogEntry; +import io.logdash.sdk.log.LogLevel; +import io.logdash.sdk.metrics.MetricEntry; +import io.logdash.sdk.metrics.MetricType; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.NullAndEmptySource; +import org.junit.jupiter.params.provider.ValueSource; + +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +import static org.assertj.core.api.Assertions.*; + +class ErrorHandlingTest { + + @ParameterizedTest + @NullAndEmptySource + @ValueSource(strings = {" ", "\t", "\n"}) + void should_handle_invalid_api_keys(String apiKey) { + assertThatNoException() + .isThrownBy( + () -> { + try (var logdash = Logdash.builder().apiKey(apiKey).build()) { + + logdash.logger().info("Test"); + logdash.metrics().mutate("counter", 1); + } + }); + } + + @Test + void should_validate_config_parameters() { + assertThatThrownBy(() -> + LogdashConfig.builder().maxRetries(-1).build() + ).isInstanceOf(LogdashException.class) + .hasMessageContaining("Max retries cannot be negative"); + + assertThatThrownBy(() -> + LogdashConfig.builder().requestTimeoutMs(0).build() + ).isInstanceOf(LogdashException.class) + .hasMessageContaining("Request timeout must be positive"); + + assertThatThrownBy(() -> + LogdashConfig.builder().maxConcurrentRequests(0).build() + ).isInstanceOf(LogdashException.class) + .hasMessageContaining("Max concurrent requests must be positive"); + } + + @Test + void should_handle_extreme_values() { + assertThatNoException() + .isThrownBy( + () -> { + try (var logdash = + Logdash.builder() + .apiKey("test") + .requestTimeoutMs(Long.MAX_VALUE) + .retryDelayMs(Long.MAX_VALUE) + .maxRetries(Integer.MAX_VALUE) + .build()) { + + logdash.logger().info("Extreme config test"); + } + }); + } + + @Test + void should_handle_resource_exhaustion_scenarios() { + try (var logdash = + Logdash.builder().apiKey("test").maxConcurrentRequests(1).requestTimeoutMs(100L).build()) { + + for (int i = 0; i < 100; i++) { + logdash.logger().info("Stress test " + i); + logdash.metrics().mutate("stress_counter", 1); + } + + assertThatNoException().isThrownBy(logdash::flush); + } + } + + @Test + void should_handle_malformed_data_gracefully() { + assertThatNoException() + .isThrownBy( + () -> { + var entry = new LogEntry("\u0000\u001F\uFFFF", LogLevel.INFO, 1L); + }); + + assertThatNoException() + .isThrownBy( + () -> { + var metric = new MetricEntry("test\n\r\t", Double.NaN, MetricType.SET); + }); + } + + @Test + void should_maintain_thread_safety_under_stress() throws InterruptedException { + try (var logdash = + Logdash.builder().apiKey("thread-safety-test").enableConsoleOutput(false).build()) { + + var executor = Executors.newFixedThreadPool(20); + var latch = new CountDownLatch(200); + var exceptions = new ConcurrentLinkedQueue(); + + for (int i = 0; i < 200; i++) { + final int iteration = i; + executor.submit( + () -> { + try { + logdash.logger().info("Thread safety test " + iteration); + logdash.metrics().mutate("thread_counter", 1); + logdash.metrics().set("thread_id", getThreadId()); + } catch (Exception e) { + exceptions.add(e); + } finally { + latch.countDown(); + } + }); + } + + assertThat(latch.await(30, TimeUnit.SECONDS)).isTrue(); + assertThat(exceptions).isEmpty(); + + executor.shutdown(); + assertThat(executor.awaitTermination(5, TimeUnit.SECONDS)).isTrue(); + } + } + + @SuppressWarnings("deprecation") + private long getThreadId() { + return Thread.currentThread().getId(); + } +} diff --git a/src/test/java/io/logdash/sdk/exception/LogdashExceptionTest.java b/src/test/java/io/logdash/sdk/exception/LogdashExceptionTest.java new file mode 100644 index 0000000..6a5f84a --- /dev/null +++ b/src/test/java/io/logdash/sdk/exception/LogdashExceptionTest.java @@ -0,0 +1,41 @@ +package io.logdash.sdk.exception; + +import org.junit.jupiter.api.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +class LogdashExceptionTest { + + @Test + void should_create_exception_with_message() { + // Arrange + var message = "Test error message"; + + // Act + var exception = new LogdashException(message); + + // Assert + assertThat(exception.getMessage()).isEqualTo(message); + assertThat(exception.getCause()).isNull(); + } + + @Test + void should_create_exception_with_message_and_cause() { + // Arrange + var message = "Test error message"; + var cause = new RuntimeException("Root cause"); + + // Act + var exception = new LogdashException(message, cause); + + // Assert + assertThat(exception.getMessage()).isEqualTo(message); + assertThat(exception.getCause()).isEqualTo(cause); + } + + @Test + void should_be_runtime_exception() { + var exception = new LogdashException("Test"); + assertThat(exception).isInstanceOf(RuntimeException.class); + } +} diff --git a/src/test/java/io/logdash/sdk/integration/LogdashIntegrationTest.java b/src/test/java/io/logdash/sdk/integration/LogdashIntegrationTest.java new file mode 100644 index 0000000..462a4e9 --- /dev/null +++ b/src/test/java/io/logdash/sdk/integration/LogdashIntegrationTest.java @@ -0,0 +1,200 @@ +package io.logdash.sdk.integration; + +import io.logdash.sdk.Logdash; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import java.util.ArrayList; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; + +import static org.assertj.core.api.Assertions.assertThatNoException; + +class LogdashIntegrationTest { + + @Test + @Timeout(value = 30, unit = TimeUnit.SECONDS) + void should_handle_rapid_successive_operations() { + try (var logdash = + Logdash.builder() + .apiKey("test-key") + .enableVerboseLogging(false) + .enableConsoleOutput(false) + .maxConcurrentRequests(5) + .requestTimeoutMs(1000L) + .retryDelayMs(50L) + .maxRetries(1) + .build()) { + + var logger = logdash.logger(); + var metrics = logdash.metrics(); + + // Rapid operations + for (int i = 0; i < 50; i++) { + logger.info("Message " + i, Map.of("iteration", i)); + metrics.mutate("counter", i % 5); + if (i % 5 == 0) { + metrics.set("batch", i / 5); + } + } + + // Should complete without exceptions + assertThatNoException().isThrownBy(logdash::flush); + } + } + + @Test + void should_handle_all_log_levels_correctly() { + try (var logdash = Logdash.builder().apiKey("test-key").enableConsoleOutput(false).build()) { + var logger = logdash.logger(); + + assertThatNoException() + .isThrownBy( + () -> { + logger.error("Error message"); + logger.warn("Warning message"); + logger.info("Info message"); + logger.http("HTTP message"); + logger.verbose("Verbose message"); + logger.debug("Debug message"); + logger.silly("Silly message"); + }); + } + } + + @Test + void should_handle_metric_operations_correctly() { + try (var logdash = Logdash.builder().apiKey("test-key").enableConsoleOutput(false).build()) { + var metrics = logdash.metrics(); + + assertThatNoException() + .isThrownBy( + () -> { + metrics.set("users", 100); + metrics.mutate("requests", 1); + metrics.mutate("requests", 5); + metrics.mutate("errors", -1); + metrics.mutate("errors", -2); + metrics.mutate("temperature", -5.5); + }); + } + } + + @Test + void should_recover_from_transport_creation_failure() { + // Using invalid URL to force transport creation failure + try (var logdash = + Logdash.builder() + .apiKey("test-key") + .baseUrl("invalid-url") + .enableVerboseLogging(false) + .enableConsoleOutput(false) + .build()) { + + // Should still work with NoOp transport + assertThatNoException() + .isThrownBy( + () -> { + logdash.logger().info("Test message"); + logdash.metrics().set("test", 1); + }); + } + } + + @Test + void should_handle_no_api_key_scenario() { + try (var logdash = Logdash.builder().apiKey("").enableConsoleOutput(false).build()) { + + assertThatNoException() + .isThrownBy( + () -> { + logdash.logger().info("Test message"); + logdash.metrics().mutate("counter", 1); + }); + } + } + + @Test + @Timeout(value = 45, unit = TimeUnit.SECONDS) + void should_handle_mixed_logging_and_metrics_under_load() { + try (var logdash = + Logdash.builder() + .apiKey("load-test-key") + .enableVerboseLogging(false) + .enableConsoleOutput(false) + .maxConcurrentRequests(10) + .requestTimeoutMs(2000L) + .retryDelayMs(100L) + .maxRetries(2) + .build()) { + + var logger = logdash.logger(); + var metrics = logdash.metrics(); + var futures = new ArrayList>(); + + // Act + for (int i = 0; i < 100; i++) { + final int iteration = i; + + // Log at different levels + logger.info("Iteration " + iteration, Map.of("step", iteration)); + if (iteration % 10 == 0) { + logger.warn("Checkpoint reached", Map.of("checkpoint", iteration / 10)); + } + if (iteration % 25 == 0) { + logger.error("Quarter milestone", Map.of("quarter", iteration / 25)); + } + + // Various metrics + metrics.mutate("iterations", 1); + metrics.set("current_iteration", iteration); + if (iteration % 5 == 0) { + metrics.mutate("batch_size", iteration % 3 == 0 ? 1 : -1); + } + } + + // Assert + assertThatNoException().isThrownBy(logdash::flush); + } + } + + @Test + void should_handle_very_long_messages_gracefully() { + try (var logdash = + Logdash.builder().apiKey("long-message-test").enableConsoleOutput(false).build()) { + + var logger = logdash.logger(); + var veryLongMessage = "X".repeat(50000); // 50KB message + + // Act & Assert + assertThatNoException() + .isThrownBy( + () -> { + logger.info(veryLongMessage); + logger.debug(veryLongMessage, Map.of("size", veryLongMessage.length())); + }); + } + } + + @Test + void should_maintain_sequence_numbers_across_different_log_levels() { + try (var logdash = + Logdash.builder().apiKey("sequence-test").enableConsoleOutput(false).build()) { + + var logger = logdash.logger(); + + // Act + logger.error("Error 1"); + logger.info("Info 1"); + logger.debug("Debug 1"); + logger.warn("Warn 1"); + logger.verbose("Verbose 1"); + logger.silly("Silly 1"); + logger.http("HTTP 1"); + + // Assert + assertThatNoException().isThrownBy(logdash::flush); + } + } +} diff --git a/src/test/java/io/logdash/sdk/log/LogEntryTest.java b/src/test/java/io/logdash/sdk/log/LogEntryTest.java new file mode 100644 index 0000000..1676e58 --- /dev/null +++ b/src/test/java/io/logdash/sdk/log/LogEntryTest.java @@ -0,0 +1,159 @@ +package io.logdash.sdk.log; + +import org.junit.jupiter.api.Test; + +import java.time.Instant; +import java.util.HashMap; +import java.util.Map; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +class LogEntryTest { + + @Test + void should_create_log_entry_with_all_parameters() { + // Arrange + var timestamp = Instant.now(); + Map context = Map.of("key", "value"); + + // Act + var logEntry = new LogEntry("Test message", LogLevel.INFO, timestamp, 42L, context); + + // Assert + assertThat(logEntry.message()).isEqualTo("Test message"); + assertThat(logEntry.level()).isEqualTo(LogLevel.INFO); + assertThat(logEntry.createdAt()).isEqualTo(timestamp); + assertThat(logEntry.sequenceNumber()).isEqualTo(42L); + assertThat(logEntry.context()).containsEntry("key", "value"); + } + + @Test + void should_create_log_entry_with_minimal_parameters() { + // Act + var logEntry = new LogEntry("Test message", LogLevel.ERROR, 123L); + + // Assert + assertThat(logEntry.message()).isEqualTo("Test message"); + assertThat(logEntry.level()).isEqualTo(LogLevel.ERROR); + assertThat(logEntry.sequenceNumber()).isEqualTo(123L); + assertThat(logEntry.createdAt()).isNotNull(); + assertThat(logEntry.context()).isEmpty(); + } + + @Test + void should_create_log_entry_with_context() { + // Arrange + Map context = Map.of("userId", "123", "action", "login"); + + // Act + var logEntry = new LogEntry("User action", LogLevel.INFO, 1L, context); + + // Assert + assertThat(logEntry.message()).isEqualTo("User action"); + assertThat(logEntry.level()).isEqualTo(LogLevel.INFO); + assertThat(logEntry.sequenceNumber()).isEqualTo(1L); + assertThat(logEntry.context()).containsEntry("userId", "123"); + assertThat(logEntry.context()).containsEntry("action", "login"); + } + + @Test + void should_throw_exception_for_null_message() { + assertThatThrownBy(() -> + new LogEntry(null, LogLevel.INFO, 1L) + ).isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("Message cannot be null"); + } + + @Test + void should_throw_exception_for_null_level() { + assertThatThrownBy(() -> + new LogEntry("Test", null, 1L) + ).isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("Level cannot be null"); + } + + @Test + void should_handle_null_timestamp_by_using_current_time() { + // Act + var logEntry = new LogEntry("Test", LogLevel.INFO, null, 1L, Map.of()); + + // Assert + assertThat(logEntry.createdAt()).isNotNull(); + assertThat(logEntry.createdAt()).isBeforeOrEqualTo(Instant.now()); + } + + @Test + void should_handle_null_context_by_using_empty_map() { + // Act + var logEntry = new LogEntry("Test", LogLevel.INFO, Instant.now(), 1L, null); + + // Assert + assertThat(logEntry.context()).isNotNull(); + assertThat(logEntry.context()).isEmpty(); + } + + @Test + void should_create_defensive_copy_of_context() { + // Arrange + Map originalContext = new HashMap<>(); + originalContext.put("key", "value"); + + // Act + var logEntry = new LogEntry("Test", LogLevel.INFO, 1L, originalContext); + + // Modify original map + originalContext.put("newKey", "newValue"); + + // Assert + assertThat(logEntry.context()).containsOnlyKeys("key"); + assertThat(logEntry.context()).doesNotContainKey("newKey"); + assertThat(logEntry.context()).hasSize(1); + } + + @Test + void should_handle_empty_context_map() { + // Act + var logEntry = new LogEntry("Test", LogLevel.INFO, 1L, Map.of()); + + // Assert + assertThat(logEntry.context()).isEmpty(); + assertThat(logEntry.context()).isNotNull(); + } + + @Test + void should_handle_context_with_null_values() { + // Arrange + Map contextWithNull = new HashMap<>(); + contextWithNull.put("validKey", "validValue"); + contextWithNull.put("nullKey", null); + + // Act + var logEntry = new LogEntry("Test", LogLevel.INFO, 1L, contextWithNull); + + // Assert + assertThat(logEntry.context()).hasSize(2); + assertThat(logEntry.context()).containsEntry("validKey", "validValue"); + assertThat(logEntry.context()).containsEntry("nullKey", null); + } + + @Test + void should_handle_different_context_value_types() { + // Arrange + Map context = new HashMap<>(); + context.put("stringValue", "test"); + context.put("intValue", 42); + context.put("doubleValue", 3.14); + context.put("booleanValue", true); + + // Act + var logEntry = new LogEntry("Test", LogLevel.INFO, 1L, context); + + // Assert + assertThat(logEntry.context()).hasSize(4); + assertThat(logEntry.context().get("stringValue")).isEqualTo("test"); + assertThat(logEntry.context().get("intValue")).isEqualTo(42); + assertThat(logEntry.context().get("doubleValue")).isEqualTo(3.14); + assertThat(logEntry.context().get("booleanValue")).isEqualTo(true); + } +} diff --git a/src/test/java/io/logdash/sdk/log/LogLevelTest.java b/src/test/java/io/logdash/sdk/log/LogLevelTest.java new file mode 100644 index 0000000..358fbb0 --- /dev/null +++ b/src/test/java/io/logdash/sdk/log/LogLevelTest.java @@ -0,0 +1,41 @@ +package io.logdash.sdk.log; + +import org.junit.jupiter.api.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +class LogLevelTest { + + @Test + void should_have_correct_values() { + assertThat(LogLevel.ERROR.getValue()).isEqualTo("error"); + assertThat(LogLevel.WARN.getValue()).isEqualTo("warning"); + assertThat(LogLevel.INFO.getValue()).isEqualTo("info"); + assertThat(LogLevel.HTTP.getValue()).isEqualTo("http"); + assertThat(LogLevel.VERBOSE.getValue()).isEqualTo("verbose"); + assertThat(LogLevel.DEBUG.getValue()).isEqualTo("debug"); + assertThat(LogLevel.SILLY.getValue()).isEqualTo("silly"); + } + + @Test + void should_return_correct_string_representation() { + assertThat(LogLevel.ERROR.toString()).isEqualTo("error"); + assertThat(LogLevel.INFO.toString()).isEqualTo("info"); + assertThat(LogLevel.DEBUG.toString()).isEqualTo("debug"); + } + + @Test + void should_have_all_expected_levels() { + var levels = LogLevel.values(); + assertThat(levels).hasSize(7); + assertThat(levels) + .contains( + LogLevel.ERROR, + LogLevel.WARN, + LogLevel.INFO, + LogLevel.HTTP, + LogLevel.VERBOSE, + LogLevel.DEBUG, + LogLevel.SILLY); + } +} diff --git a/src/test/java/io/logdash/sdk/log/LogdashLoggerTest.java b/src/test/java/io/logdash/sdk/log/LogdashLoggerTest.java new file mode 100644 index 0000000..7c71cee --- /dev/null +++ b/src/test/java/io/logdash/sdk/log/LogdashLoggerTest.java @@ -0,0 +1,181 @@ +package io.logdash.sdk.log; + +import io.logdash.sdk.config.LogdashConfig; +import io.logdash.sdk.transport.LogdashTransport; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CompletableFuture; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatNoException; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.then; +import static org.mockito.Mockito.times; + +@ExtendWith(MockitoExtension.class) +class LogdashLoggerTest { + + @Mock + private LogdashTransport transport; + + private LogdashConfig config; + private LogdashLogger logger; + + @BeforeEach + void setUp() { + config = + new LogdashConfig( + "test-key", "https://api.logdash.io", false, false, 3, 1000L, 5000L, 10000L, 100); + + given(transport.sendLog(any())).willReturn(CompletableFuture.completedFuture(null)); + logger = new LogdashLogger(config, transport); + } + + @Test + void should_send_info_log_with_correct_level() { + // Act + logger.info("test message"); + + // Assert + ArgumentCaptor captor = ArgumentCaptor.forClass(LogEntry.class); + then(transport).should().sendLog(captor.capture()); + + LogEntry captured = captor.getValue(); + assertThat(captured.level()).isEqualTo(LogLevel.INFO); + assertThat(captured.message()).isEqualTo("test message"); + } + + @Test + void should_send_log_with_context() { + // Arrange + final Map context = Map.of("userId", "123", "action", "login"); + + // Act + logger.info("User logged in", context); + + // Assert + ArgumentCaptor captor = ArgumentCaptor.forClass(LogEntry.class); + then(transport).should().sendLog(captor.capture()); + + LogEntry captured = captor.getValue(); + assertThat(captured.message()).contains("User logged in"); + assertThat(captured.message()).contains("userId"); + assertThat(captured.message()).contains("123"); + } + + @Test + void should_handle_all_log_levels() { + // Act + logger.error("error message"); + logger.warn("warn message"); + logger.info("info message"); + logger.http("http message"); + logger.verbose("verbose message"); + logger.debug("debug message"); + logger.silly("silly message"); + + // Assert + then(transport).should(times(7)).sendLog(any(LogEntry.class)); + } + + @Test + void should_handle_large_context_maps() { + // Arrange + Map largeContext = new HashMap<>(); + for (int i = 0; i < 100; i++) { + largeContext.put("key" + i, "value" + i); + } + + // Act & Assert + assertThatNoException() + .isThrownBy( + () -> { + logger.info("Large context test", largeContext); + then(transport).should().sendLog(any(LogEntry.class)); + }); + } + + @Test + void should_handle_null_values_in_context() { + // Arrange + Map contextWithNull = new HashMap<>(); + contextWithNull.put("validKey", "validValue"); + contextWithNull.put("nullKey", null); + + // Act & Assert + assertThatNoException() + .isThrownBy( + () -> { + logger.info("Null context test", contextWithNull); + then(transport).should().sendLog(any(LogEntry.class)); + }); + } + + @Test + void should_increment_sequence_numbers_correctly() { + // Act + logger.info("First message"); + logger.info("Second message"); + logger.info("Third message"); + + // Assert + ArgumentCaptor captor = ArgumentCaptor.forClass(LogEntry.class); + then(transport).should(times(3)).sendLog(captor.capture()); + + var capturedEntries = captor.getAllValues(); + assertThat(capturedEntries.get(0).sequenceNumber()).isEqualTo(1L); + assertThat(capturedEntries.get(1).sequenceNumber()).isEqualTo(2L); + assertThat(capturedEntries.get(2).sequenceNumber()).isEqualTo(3L); + } + + @Test + void should_handle_console_output_when_enabled() { + // Arrange + var configWithConsole = + new LogdashConfig( + "test-key", "https://api.logdash.io", true, false, 3, 1000L, 5000L, 10000L, 100); + var loggerWithConsole = new LogdashLogger(configWithConsole, transport); + + // Act & Assert + assertThatNoException() + .isThrownBy( + () -> { + loggerWithConsole.info("Console output test"); + }); + } + + @Test + void should_handle_empty_context() { + // Act + logger.info("Empty context", Map.of()); + + // Assert + ArgumentCaptor captor = ArgumentCaptor.forClass(LogEntry.class); + then(transport).should().sendLog(captor.capture()); + + LogEntry captured = captor.getValue(); + assertThat(captured.message()).isEqualTo("Empty context"); + } + + @Test + void should_handle_special_characters_in_message() { + // Act + logger.info("Message with \"quotes\" and \n newlines"); + + // Assert + ArgumentCaptor captor = ArgumentCaptor.forClass(LogEntry.class); + then(transport).should().sendLog(captor.capture()); + + LogEntry captured = captor.getValue(); + assertThat(captured.message()).contains("quotes"); + assertThat(captured.message()).contains("newlines"); + } +} diff --git a/src/test/java/io/logdash/sdk/metrics/LogdashMetricsTest.java b/src/test/java/io/logdash/sdk/metrics/LogdashMetricsTest.java new file mode 100644 index 0000000..4668aa5 --- /dev/null +++ b/src/test/java/io/logdash/sdk/metrics/LogdashMetricsTest.java @@ -0,0 +1,83 @@ +package io.logdash.sdk.metrics; + +import io.logdash.sdk.config.LogdashConfig; +import io.logdash.sdk.transport.LogdashTransport; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +import java.util.concurrent.CompletableFuture; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.then; +import static org.mockito.Mockito.times; + +@ExtendWith(MockitoExtension.class) +class LogdashMetricsTest { + + @Mock + private LogdashTransport transport; + + private LogdashConfig config; + private LogdashMetrics metrics; + + @BeforeEach + void setUp() { + config = new LogdashConfig( + "test-key", + "https://api.logdash.io", + false, + false, + 3, + 1000L, + 5000L, + 10000L, + 100 + ); + + given(transport.sendMetric(any())).willReturn(CompletableFuture.completedFuture(null)); + + metrics = new LogdashMetrics(config, transport); + } + + @Test + void should_send_set_metric() { + // Act + metrics.set("active_users", 100); + + // Assert + then(transport).should().sendMetric(any(MetricEntry.class)); + } + + @Test + void should_send_mutate_metric() { + // Act + metrics.mutate("temperature", -5.5); + + // Assert + then(transport).should().sendMetric(any(MetricEntry.class)); + } + + @Test + void should_handle_decimal_values_correctly() { + // Act + metrics.set("temperature", 23.5); + metrics.mutate("pressure", -1.2); + + // Assert + then(transport).should(times(2)).sendMetric(any(MetricEntry.class)); + } + + @Test + void should_handle_negative_values() { + // Act + metrics.set("deficit", -100); + metrics.mutate("altitude", -50.5); + + // Assert + then(transport).should(times(2)).sendMetric(any(MetricEntry.class)); + } +} diff --git a/src/test/java/io/logdash/sdk/metrics/MetricEntryTest.java b/src/test/java/io/logdash/sdk/metrics/MetricEntryTest.java new file mode 100644 index 0000000..accf8bd --- /dev/null +++ b/src/test/java/io/logdash/sdk/metrics/MetricEntryTest.java @@ -0,0 +1,81 @@ +package io.logdash.sdk.metrics; + +import org.junit.jupiter.api.Test; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +class MetricEntryTest { + + @Test + void should_create_metric_entry_with_valid_parameters() { + // Act + var metricEntry = new MetricEntry("cpu_usage", 75.5, MetricType.SET); + + // Assert + assertThat(metricEntry.name()).isEqualTo("cpu_usage"); + assertThat(metricEntry.value()).isEqualTo(75.5); + assertThat(metricEntry.operation()).isEqualTo(MetricType.SET); + } + + @Test + void should_create_metric_entry_with_integer_value() { + // Act + var metricEntry = new MetricEntry("request_count", 42, MetricType.MUTATE); + + // Assert + assertThat(metricEntry.name()).isEqualTo("request_count"); + assertThat(metricEntry.value()).isEqualTo(42); + assertThat(metricEntry.operation()).isEqualTo(MetricType.MUTATE); + } + + @Test + void should_throw_exception_for_null_name() { + assertThatThrownBy(() -> + new MetricEntry(null, 42, MetricType.SET) + ).isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("Metric name cannot be null or blank"); + } + + @Test + void should_throw_exception_for_blank_name() { + assertThatThrownBy(() -> + new MetricEntry(" ", 42, MetricType.SET) + ).isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("Metric name cannot be null or blank"); + } + + @Test + void should_throw_exception_for_null_value() { + assertThatThrownBy(() -> + new MetricEntry("metric", null, MetricType.SET) + ).isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("Metric value cannot be null"); + } + + @Test + void should_throw_exception_for_null_operation() { + assertThatThrownBy(() -> + new MetricEntry("metric", 42, null) + ).isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("Metric operation cannot be null"); + } + + @Test + void should_handle_negative_values() { + // Act + var metricEntry = new MetricEntry("temperature_change", -5.2, MetricType.MUTATE); + + // Assert + assertThat(metricEntry.value()).isEqualTo(-5.2); + } + + @Test + void should_handle_zero_values() { + // Act + var metricEntry = new MetricEntry("reset_counter", 0, MetricType.SET); + + // Assert + assertThat(metricEntry.value()).isEqualTo(0); + } +} diff --git a/src/test/java/io/logdash/sdk/metrics/MetricTypeTest.java b/src/test/java/io/logdash/sdk/metrics/MetricTypeTest.java new file mode 100644 index 0000000..67a46a2 --- /dev/null +++ b/src/test/java/io/logdash/sdk/metrics/MetricTypeTest.java @@ -0,0 +1,28 @@ +package io.logdash.sdk.metrics; + +import org.junit.jupiter.api.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +class MetricTypeTest { + + @Test + void should_have_correct_values() { + assertThat(MetricType.SET.getValue()).isEqualTo("set"); + assertThat(MetricType.MUTATE.getValue()).isEqualTo("change"); + } + + @Test + void should_return_correct_string_representation() { + assertThat(MetricType.SET.toString()).hasToString("set"); + assertThat(MetricType.MUTATE.toString()).hasToString("change"); + } + + @Test + void should_have_all_expected_types() { + var types = MetricType.values(); + assertThat(types) + .hasSize(2) + .contains(MetricType.SET, MetricType.MUTATE); + } +} diff --git a/src/test/java/io/logdash/sdk/performance/PerformanceTest.java b/src/test/java/io/logdash/sdk/performance/PerformanceTest.java new file mode 100644 index 0000000..37daf2d --- /dev/null +++ b/src/test/java/io/logdash/sdk/performance/PerformanceTest.java @@ -0,0 +1,82 @@ +package io.logdash.sdk.performance; + +import io.logdash.sdk.Logdash; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import java.util.ArrayList; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatNoException; + +class PerformanceTest { + + @Test + void should_handle_large_message_payloads() { + try (var logdash = Logdash.builder() + .apiKey("large-payload-test") + .enableConsoleOutput(false) + .build()) { + + var largeMessage = "A".repeat(100_000); // 100KB message + + assertThatNoException() + .isThrownBy( + () -> { + logdash.logger().info(largeMessage); + logdash.flush(); + }); + } + } + + @Test + @Timeout(value = 15, unit = TimeUnit.SECONDS) + void should_maintain_performance_under_mixed_load() throws InterruptedException { + try (var logdash = + Logdash.builder() + .apiKey("mixed-load-test") + .enableConsoleOutput(false) + .maxConcurrentRequests(20) + .build()) { + + var executor = Executors.newFixedThreadPool(10); + var futures = new ArrayList>(); + + // Mixed workload: logs and metrics + for (int i = 0; i < 50; i++) { + final int iteration = i; + + var future = + CompletableFuture.runAsync( + () -> { + // Logs + logdash.logger().info("Mixed load log " + iteration); + logdash.logger().debug("Debug info " + iteration); + + // Metrics + logdash.metrics().mutate("operations", 1); + logdash.metrics().set("current_batch", iteration); + logdash.metrics().mutate("temperature", Math.random() * 10 - 5); + }, + executor); + + futures.add(future); + } + + var allFutures = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])); + + assertThatNoException() + .isThrownBy( + () -> { + allFutures.get(10, TimeUnit.SECONDS); + logdash.flush(); + }); + + executor.shutdown(); + assertThat(executor.awaitTermination(5, TimeUnit.SECONDS)).isTrue(); + } + } +} diff --git a/src/test/java/io/logdash/sdk/transport/HttpTransportTest.java b/src/test/java/io/logdash/sdk/transport/HttpTransportTest.java new file mode 100644 index 0000000..1ecdcea --- /dev/null +++ b/src/test/java/io/logdash/sdk/transport/HttpTransportTest.java @@ -0,0 +1,821 @@ +package io.logdash.sdk.transport; + +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.core.WireMockConfiguration; +import io.logdash.sdk.config.LogdashConfig; +import io.logdash.sdk.log.LogEntry; +import io.logdash.sdk.log.LogLevel; +import io.logdash.sdk.metrics.MetricEntry; +import io.logdash.sdk.metrics.MetricType; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static com.github.tomakehurst.wiremock.client.WireMock.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatNoException; + +class HttpTransportTest { + + private WireMockServer wireMockServer; + private LogdashConfig config; + private HttpTransport transport; + + @BeforeEach + void setUp() { + wireMockServer = new WireMockServer(WireMockConfiguration.options().dynamicPort()); + wireMockServer.start(); + + config = + LogdashConfig.builder() + .apiKey("test-api-key") + .baseUrl("http://localhost:" + wireMockServer.port()) + .enableVerboseLogging(false) + .requestTimeoutMs(2000L) + .maxRetries(2) + .retryDelayMs(100L) + .maxConcurrentRequests(5) + .shutdownTimeoutMs(1000L) + .build(); + + transport = new HttpTransport(config); + } + + @AfterEach + void tearDown() { + if (transport != null) { + transport.close(); + } + if (wireMockServer != null) { + wireMockServer.stop(); + } + } + + @Test + void should_successfully_send_log_entry() { + // Arrange + wireMockServer.stubFor( + post(urlEqualTo("/logs")) + .willReturn( + aResponse().withStatus(200).withHeader("Content-Type", "application/json"))); + + var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L); + + // Act + var future = transport.sendLog(logEntry); + + // Assert + assertThat(future).succeedsWithin(3, TimeUnit.SECONDS); + + wireMockServer.verify( + postRequestedFor(urlEqualTo("/logs")) + .withHeader("Content-Type", equalTo("application/json")) + .withHeader("project-api-key", equalTo("test-api-key")) + .withHeader("User-Agent", equalTo("logdash-java-sdk/0.1.0"))); + } + + @Test + void should_successfully_send_metric_entry() { + // Arrange + wireMockServer.stubFor( + put(urlEqualTo("/metrics")) + .willReturn( + aResponse().withStatus(202).withHeader("Content-Type", "application/json"))); + + var metricEntry = new MetricEntry("test_metric", 42, MetricType.SET); + + // Act + var future = transport.sendMetric(metricEntry); + + // Assert + assertThat(future).succeedsWithin(3, TimeUnit.SECONDS); + + wireMockServer.verify( + putRequestedFor(urlEqualTo("/metrics")) + .withHeader("Content-Type", equalTo("application/json")) + .withHeader("project-api-key", equalTo("test-api-key")) + .withHeader("User-Agent", equalTo("logdash-java-sdk/0.1.0"))); + } + + @ParameterizedTest + @ValueSource(ints = {400, 401, 403, 404, 500, 502, 503}) + void should_retry_on_http_error_responses(int statusCode) { + // Arrange + wireMockServer.stubFor( + post(urlEqualTo("/logs")) + .inScenario("retry-scenario") + .whenScenarioStateIs("Started") + .willReturn(aResponse().withStatus(statusCode)) + .willSetStateTo("first-retry")); + + wireMockServer.stubFor( + post(urlEqualTo("/logs")) + .inScenario("retry-scenario") + .whenScenarioStateIs("first-retry") + .willReturn(aResponse().withStatus(200))); + + var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L); + + // Act + var future = transport.sendLog(logEntry); + + // Assert + assertThat(future).succeedsWithin(5, TimeUnit.SECONDS); + wireMockServer.verify(2, postRequestedFor(urlEqualTo("/logs"))); + } + + @Test + void should_fail_permanently_after_max_retries() throws Exception { + // Arrange + wireMockServer.stubFor(post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(500))); + + var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L); + + // Act + var future = transport.sendLog(logEntry); + + // Assert + assertThat(future).succeedsWithin(10, TimeUnit.SECONDS); + + // Wait a bit for all retry attempts to complete + Thread.sleep(1000); + + // Should have made initial request + 2 retries = 3 total + wireMockServer.verify(moreThanOrExactly(2), postRequestedFor(urlEqualTo("/logs"))); + } + + @Test + void should_handle_connection_timeout() { + // Arrange + wireMockServer.stubFor( + post(urlEqualTo("/logs")) + .willReturn( + aResponse().withStatus(200).withFixedDelay(5000))); // Longer than request timeout + + var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L); + + // Act + var future = transport.sendLog(logEntry); + + // Assert + assertThat(future).succeedsWithin(15, TimeUnit.SECONDS); // Should complete despite timeout + } + + @Test + void should_handle_concurrent_requests_within_limit() { + // Arrange + wireMockServer.stubFor( + post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200).withFixedDelay(500))); + + var futures = new ArrayList>(); + + // Act + for (int i = 0; i < 3; i++) { + var logEntry = new LogEntry("Message " + i, LogLevel.INFO, i); + futures.add(transport.sendLog(logEntry)); + } + + // Assert + var allFutures = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])); + assertThat(allFutures).succeedsWithin(10, TimeUnit.SECONDS); + wireMockServer.verify(3, postRequestedFor(urlEqualTo("/logs"))); + } + + @Test + void should_respect_concurrency_limits() { + // Arrange + var configWithLowLimit = + LogdashConfig.builder() + .apiKey("test-key") + .baseUrl("http://localhost:" + wireMockServer.port()) + .maxConcurrentRequests(1) + .requestTimeoutMs(1000L) + .build(); + + var limitedTransport = new HttpTransport(configWithLowLimit); + + wireMockServer.stubFor( + post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200).withFixedDelay(800))); + + var futures = new ArrayList>(); + + try { + // Act + for (int i = 0; i < 5; i++) { + var logEntry = new LogEntry("Message " + i, LogLevel.INFO, i); + futures.add(limitedTransport.sendLog(logEntry)); + } + + // Assert + var allFutures = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])); + assertThat(allFutures).succeedsWithin(15, TimeUnit.SECONDS); + + // Some requests might be rejected due to concurrency limit + var requestCount = wireMockServer.findAll(postRequestedFor(urlEqualTo("/logs"))).size(); + assertThat(requestCount).isLessThanOrEqualTo(5); + + } finally { + limitedTransport.close(); + } + } + + @Test + void should_not_send_requests_after_shutdown() { + // Arrange + transport.shutdown(); + var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L); + + // Act + var future = transport.sendLog(logEntry); + + // Assert + assertThat(future).succeedsWithin(1, TimeUnit.SECONDS); + wireMockServer.verify(0, postRequestedFor(urlEqualTo("/logs"))); + } + + @Test + void should_handle_graceful_shutdown_with_pending_requests() { + // Arrange + wireMockServer.stubFor( + post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200).withFixedDelay(300))); + + var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L); + var future = transport.sendLog(logEntry); + + // Act + var shutdownFuture = transport.shutdown(); + + // Assert + assertThat(shutdownFuture).isPresent(); + assertThat(shutdownFuture.get()).succeedsWithin(5, TimeUnit.SECONDS); + assertThat(future).succeedsWithin(5, TimeUnit.SECONDS); + } + + @Test + void should_flush_pending_requests() { + // Arrange + wireMockServer.stubFor( + post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200).withFixedDelay(200))); + + var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L); + transport.sendLog(logEntry); + + // Act + var flushFuture = transport.flush(); + + // Assert + assertThat(flushFuture).succeedsWithin(5, TimeUnit.SECONDS); + wireMockServer.verify(1, postRequestedFor(urlEqualTo("/logs"))); + } + + @Test + void should_handle_serialization_errors_gracefully() { + // Arrange + var logEntry = + new LogEntry("Test with very long message: " + "x".repeat(100000), LogLevel.INFO, 1L); + + // Act + var future = transport.sendLog(logEntry); + + // Assert + assertThat(future).succeedsWithin(5, TimeUnit.SECONDS); + } + + @Test + void should_handle_large_payloads() { + // Arrange + wireMockServer.stubFor(post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200))); + + var largeMessage = "A".repeat(10000); + var logEntry = new LogEntry(largeMessage, LogLevel.INFO, 1L); + + // Act + var future = transport.sendLog(logEntry); + + // Assert + assertThat(future).succeedsWithin(5, TimeUnit.SECONDS); + wireMockServer.verify(1, postRequestedFor(urlEqualTo("/logs"))); + } + + @Test + void should_handle_multiple_close_calls_safely() { + // Act & Assert + assertThatNoException() + .isThrownBy( + () -> { + transport.close(); + transport.close(); // Second close should be safe + }); + } + + @Test + void should_send_correct_json_for_log_entries_with_context() throws Exception { + // Arrange + wireMockServer.stubFor(post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200))); + + var timestamp = Instant.parse("2024-06-01T12:34:56Z"); + Map context = Map.of("userId", "123", "action", "login"); + var logEntry = new LogEntry("User logged in", LogLevel.INFO, timestamp, 42L, context); + + // Act + var future = transport.sendLog(logEntry); + assertThat(future).succeedsWithin(3, TimeUnit.SECONDS); + + // Wait for request to complete + Thread.sleep(200); + + // Assert + wireMockServer.verify( + postRequestedFor(urlEqualTo("/logs")) + .withRequestBody(containing("\"level\":\"info\"")) + .withRequestBody(containing("\"sequenceNumber\":42")) + .withRequestBody(containing("\"createdAt\":\"2024-06-01T12:34:56Z\""))); + } + + @Test + void should_send_correct_json_for_metrics() throws Exception { + // Arrange + wireMockServer.stubFor(put(urlEqualTo("/metrics")).willReturn(aResponse().withStatus(200))); + + var metricEntry = new MetricEntry("cpu_usage", 75.5, MetricType.SET); + + // Act + var future = transport.sendMetric(metricEntry); + assertThat(future).succeedsWithin(3, TimeUnit.SECONDS); + + // Wait for request to complete + Thread.sleep(200); + + // Assert + wireMockServer.verify( + putRequestedFor(urlEqualTo("/metrics")) + .withRequestBody(containing("\"name\":\"cpu_usage\"")) + .withRequestBody(containing("\"value\":75.5")) + .withRequestBody(containing("\"operation\":\"set\""))); + } + + @Test + @Timeout(15) + void should_handle_high_throughput_requests_with_realistic_expectations() + throws InterruptedException { + // Arrange - Use higher concurrency limit for throughput test + var highThroughputConfig = + LogdashConfig.builder() + .apiKey("test-key") + .baseUrl("http://localhost:" + wireMockServer.port()) + .maxConcurrentRequests(15) // Higher limit + .requestTimeoutMs(5000L) + .build(); + + var throughputTransport = new HttpTransport(highThroughputConfig); + + wireMockServer.stubFor( + post(urlEqualTo("/logs")) + .willReturn(aResponse().withStatus(200).withFixedDelay(100))); // Reasonable delay + + var requestCount = 30; // Realistic number for test + var futures = new ArrayList>(requestCount); + var completedLatch = new CountDownLatch(requestCount); + + try { + // Act + long startTime = System.currentTimeMillis(); + + for (int i = 0; i < requestCount; i++) { + var logEntry = new LogEntry("Throughput test " + i, LogLevel.INFO, i); + var future = throughputTransport.sendLog(logEntry); + future.whenComplete((result, throwable) -> completedLatch.countDown()); + futures.add(future); + + // Small delay to avoid overwhelming + if (i % 5 == 0) { + Thread.sleep(10); + } + } + + // Assert + assertThat(completedLatch.await(12, TimeUnit.SECONDS)).isTrue(); + long duration = System.currentTimeMillis() - startTime; + + // Should complete in reasonable time + assertThat(duration).isLessThan(12000); + + // All futures should complete + var allFutures = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])); + assertThat(allFutures).succeedsWithin(2, TimeUnit.SECONDS); + + // Most requests should be processed (allow for some concurrency rejections) + var actualRequests = wireMockServer.findAll(postRequestedFor(urlEqualTo("/logs"))).size(); + assertThat(actualRequests).isGreaterThan(requestCount / 2); // At least 50% + + } finally { + throughputTransport.close(); + } + } + + @Test + void should_implement_exponential_backoff_with_jitter() throws InterruptedException { + // Arrange + var retryDelayMs = 200L; + var configWithRetries = + LogdashConfig.builder() + .apiKey("test-key") + .baseUrl("http://localhost:" + wireMockServer.port()) + .maxRetries(3) + .retryDelayMs(retryDelayMs) + .requestTimeoutMs(1000L) + .build(); + + var retryTransport = new HttpTransport(configWithRetries); + + wireMockServer.stubFor( + post(urlEqualTo("/logs")) + .inScenario("backoff-test") + .whenScenarioStateIs("Started") + .willReturn(aResponse().withStatus(500)) + .willSetStateTo("retry1")); + + wireMockServer.stubFor( + post(urlEqualTo("/logs")) + .inScenario("backoff-test") + .whenScenarioStateIs("retry1") + .willReturn(aResponse().withStatus(500)) + .willSetStateTo("retry2")); + + wireMockServer.stubFor( + post(urlEqualTo("/logs")) + .inScenario("backoff-test") + .whenScenarioStateIs("retry2") + .willReturn(aResponse().withStatus(500)) + .willSetStateTo("retry3")); + + wireMockServer.stubFor( + post(urlEqualTo("/logs")) + .inScenario("backoff-test") + .whenScenarioStateIs("retry3") + .willReturn(aResponse().withStatus(200))); + + var logEntry = new LogEntry("Backoff test", LogLevel.ERROR, 1L); + + try { + // Act + long startTime = System.currentTimeMillis(); + var future = retryTransport.sendLog(logEntry); + assertThat(future).succeedsWithin(15, TimeUnit.SECONDS); + long totalTime = System.currentTimeMillis() - startTime; + + // Assert + // Should have taken time for exponential backoff (roughly 200 + 400 + 800 + jitter) + assertThat(totalTime).isGreaterThan(1200); // Minimum expected time + assertThat(totalTime).isLessThan(3000); // Maximum reasonable time + + wireMockServer.verify(4, postRequestedFor(urlEqualTo("/logs"))); + } finally { + retryTransport.close(); + } + } + + @Test + void should_demonstrate_concurrency_behavior() throws InterruptedException { + // Arrange - This test shows actual concurrency behavior + var configWithLowLimit = + LogdashConfig.builder() + .apiKey("test-key") + .baseUrl("http://localhost:" + wireMockServer.port()) + .maxConcurrentRequests(2) // Very low limit + .requestTimeoutMs(3000L) + .build(); + + var limitedTransport = new HttpTransport(configWithLowLimit); + + // Slow responses to fill up concurrency slots + wireMockServer.stubFor( + post(urlEqualTo("/logs")) + .willReturn(aResponse().withStatus(200).withFixedDelay(1000))); // 1 second delay + + var futures = new ArrayList>(); + + try { + // Act - Submit more requests than concurrency limit + for (int i = 0; i < 8; i++) { + var logEntry = new LogEntry("Concurrency demo " + i, LogLevel.INFO, i); + var future = limitedTransport.sendLog(logEntry); + futures.add(future); + + // Small delay between submissions + Thread.sleep(50); + } + + // Wait for all futures to complete + var allFutures = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])); + assertThat(allFutures).succeedsWithin(15, TimeUnit.SECONDS); + + // Assert - Should have processed some requests, rejected others + var actualRequests = wireMockServer.findAll(postRequestedFor(urlEqualTo("/logs"))).size(); + + // With concurrency limit of 2, we should see fewer requests than submitted + assertThat(actualRequests).isLessThan(8); + assertThat(actualRequests).isGreaterThan(0); + + System.out.println( + "Submitted: 8, Processed: " + actualRequests + " (demonstrates concurrency limiting)"); + + } finally { + limitedTransport.close(); + } + } + + @Test + void should_use_efficient_countdownlatch_for_flush() throws InterruptedException { + // Arrange + wireMockServer.stubFor( + post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200).withFixedDelay(300))); + + var futures = new ArrayList>(); + + // Start multiple requests + for (int i = 0; i < 3; i++) { + var logEntry = new LogEntry("Flush test " + i, LogLevel.INFO, i); + futures.add(transport.sendLog(logEntry)); + } + + // Act + long startTime = System.currentTimeMillis(); + var flushFuture = transport.flush(); + assertThat(flushFuture).succeedsWithin(5, TimeUnit.SECONDS); + long flushTime = System.currentTimeMillis() - startTime; + + // Assert + // Flush should wait for all pending requests efficiently + assertThat(flushTime).isGreaterThan(250); // At least the delay time + assertThat(flushTime).isLessThan(1000); // But not too long due to efficient waiting + + // All original futures should also complete + var allFutures = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])); + assertThat(allFutures).succeedsWithin(1, TimeUnit.SECONDS); + + wireMockServer.verify(3, postRequestedFor(urlEqualTo("/logs"))); + } + + @Test + void should_handle_multiple_shutdown_calls_safely() { + // Arrange + wireMockServer.stubFor( + post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200).withFixedDelay(100))); + + var logEntry = new LogEntry("Shutdown test", LogLevel.INFO, 1L); + transport.sendLog(logEntry); + + // Act & Assert + var shutdown1 = transport.shutdown(); + var shutdown2 = transport.shutdown(); + var shutdown3 = transport.shutdown(); + + assertThat(shutdown1).isPresent(); + assertThat(shutdown2).isPresent(); + assertThat(shutdown3).isPresent(); + + // All shutdown futures should complete successfully + assertThat(shutdown1.get()).succeedsWithin(3, TimeUnit.SECONDS); + assertThat(shutdown2.get()).succeedsWithin(1, TimeUnit.SECONDS); + assertThat(shutdown3.get()).succeedsWithin(1, TimeUnit.SECONDS); + } + + @Test + void should_mask_api_key_in_verbose_logging() { + // Arrange + var verboseConfig = + LogdashConfig.builder() + .apiKey("very-secret-api-key-12345") + .baseUrl("http://localhost:" + wireMockServer.port()) + .enableVerboseLogging(true) + .build(); + + wireMockServer.stubFor(post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200))); + + try (var verboseTransport = new HttpTransport(verboseConfig)) { + var logEntry = new LogEntry("Verbose test", LogLevel.INFO, 1L); + + // Act + var future = verboseTransport.sendLog(logEntry); + + // Assert + assertThat(future).succeedsWithin(3, TimeUnit.SECONDS); + // Note: In real scenario, you'd capture System.out to verify masking + // For this test, we just ensure it completes without issues + } + } + + @Test + void should_handle_invalid_base_url_gracefully() { + // Arrange - HttpTransport doesn't validate URL in constructor + // Invalid URLs are caught during actual request sending + var invalidConfig = + LogdashConfig.builder().apiKey("test-key").baseUrl("not-a-valid-url").build(); + + var transport = new HttpTransport(invalidConfig); + var logEntry = new LogEntry("Test", LogLevel.INFO, 1L); + + try { + // Act - invalid URL should be caught during request creation + var future = transport.sendLog(logEntry); + + // Assert - future should complete (error is handled gracefully) + assertThat(future).succeedsWithin(5, TimeUnit.SECONDS); + + // No requests should reach the server due to invalid URL + wireMockServer.verify(0, postRequestedFor(urlEqualTo("/logs"))); + } finally { + transport.close(); + } + } + + @Test + void should_handle_interrupted_threads_during_retry() throws InterruptedException { + // Arrange + wireMockServer.stubFor( + post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(500).withFixedDelay(100))); + + var logEntry = new LogEntry("Interrupt test", LogLevel.ERROR, 1L); + var future = transport.sendLog(logEntry); + + // Act - interrupt the thread during retry + Thread.sleep(150); // Let first attempt fail and retry start + + // The transport should handle interruption gracefully + assertThat(future).succeedsWithin(5, TimeUnit.SECONDS); + } + + @Test + void should_maintain_minimum_throughput_under_burst_load() throws InterruptedException { + // REGRESSION TEST: Prevents implementation from becoming too aggressive + // with request rejection, ensuring reasonable throughput under load + + wireMockServer.stubFor( + post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200).withFixedDelay(150))); + + int submittedRequests = 25; + var futures = new ArrayList>(); + var completionLatch = new CountDownLatch(submittedRequests); + + // Act - Submit burst of requests to test throughput behavior + long startTime = System.currentTimeMillis(); + + for (int i = 0; i < submittedRequests; i++) { + var logEntry = new LogEntry("Throughput test " + i, LogLevel.INFO, i); + var future = transport.sendLog(logEntry); + + future.whenComplete((result, throwable) -> completionLatch.countDown()); + futures.add(future); + + // Small stagger to simulate realistic usage pattern + if (i % 3 == 0) { + Thread.sleep(10); + } + } + + // Assert - All futures must complete (no hanging) + assertThat(completionLatch.await(15, TimeUnit.SECONDS)) + .as("All requests should complete within timeout") + .isTrue(); + + long totalTime = System.currentTimeMillis() - startTime; + var actualRequests = wireMockServer.findAll(postRequestedFor(urlEqualTo("/logs"))).size(); + + // CRITICAL REGRESSION PROTECTION: + // 1. Minimum throughput requirement - at least 60% of requests should be processed + int minimumExpectedRequests = (int) (submittedRequests * 0.6); // 60% threshold + assertThat(actualRequests) + .as( + "Transport should process at least %d%% of submitted requests (got %d/%d)", + 60, actualRequests, submittedRequests) + .isGreaterThanOrEqualTo(minimumExpectedRequests); + + // 2. Performance requirement - should complete in reasonable time + long maxExpectedTime = submittedRequests * 200L; // 200ms per request max + assertThat(totalTime) + .as( + "Transport should maintain reasonable performance (%dms for %d requests)", + totalTime, submittedRequests) + .isLessThan(maxExpectedTime); + + // 3. All futures should succeed (no exceptions) + var allFutures = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])); + assertThat(allFutures) + .as("All request futures should complete successfully") + .succeedsWithin(2, TimeUnit.SECONDS); + + // Diagnostic information for troubleshooting + if (config.enableVerboseLogging()) { + double throughputPercentage = (actualRequests * 100.0) / submittedRequests; + System.out.printf( + "Throughput regression test: %.1f%% success rate (%d/%d requests, %dms total)%n", + throughputPercentage, actualRequests, submittedRequests, totalTime); + } + } + + @Test + void should_enforce_performance_contract_requirements() throws InterruptedException { + // CONTRACT TEST: Defines minimum performance characteristics that + // the HttpTransport implementation must always satisfy + + wireMockServer.stubFor( + post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200).withFixedDelay(100))); + + // Test scenarios with different load patterns + var testScenarios = + List.of( + new TestScenario("Light Load", 5, 0.9), // 90% success rate + new TestScenario("Medium Load", 10, 0.8), // 80% success rate + new TestScenario("Heavy Load", 15, 0.6) // 60% success rate + ); + + for (var scenario : testScenarios) { + // Reset WireMock request journal + wireMockServer.resetRequests(); + + var futures = new ArrayList>(); + + // Submit requests for this scenario + for (int i = 0; i < scenario.requestCount; i++) { + var logEntry = new LogEntry(scenario.name + " " + i, LogLevel.INFO, i); + futures.add(transport.sendLog(logEntry)); + Thread.sleep(20); // Realistic spacing + } + + // Wait for completion + var allFutures = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])); + assertThat(allFutures).succeedsWithin(10, TimeUnit.SECONDS); + + // Verify contract requirements + var actualRequests = wireMockServer.findAll(postRequestedFor(urlEqualTo("/logs"))).size(); + int expectedMinimum = (int) (scenario.requestCount * scenario.minSuccessRate); + + assertThat(actualRequests) + .as( + "Scenario '%s': Expected at least %.0f%% success (%d requests), got %d", + scenario.name, scenario.minSuccessRate * 100, expectedMinimum, actualRequests) + .isGreaterThanOrEqualTo(expectedMinimum); + } + } + + @Test + void should_preserve_thread_pool_efficiency_under_reasonable_load() throws InterruptedException { + // Arrange - Use config with higher concurrency for load test + var loadConfig = + LogdashConfig.builder() + .apiKey("test-key") + .baseUrl("http://localhost:" + wireMockServer.port()) + .maxConcurrentRequests(10) // Higher concurrency + .requestTimeoutMs(3000L) + .build(); + + var loadTransport = new HttpTransport(loadConfig); + + wireMockServer.stubFor( + post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200).withFixedDelay(150))); + + var futures = new ArrayList>(); + var latch = new CountDownLatch(15); // Reasonable load + + try { + // Act - Submit requests with small delays + for (int i = 0; i < 15; i++) { + var logEntry = new LogEntry("Load test " + i, LogLevel.INFO, i); + var future = loadTransport.sendLog(logEntry); + future.whenComplete((result, throwable) -> latch.countDown()); + futures.add(future); + + // Small delay to avoid overwhelming + Thread.sleep(20); + } + + // Assert + assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue(); + + var allFutures = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])); + assertThat(allFutures).succeedsWithin(3, TimeUnit.SECONDS); + + // Most requests should have been processed + var actualRequests = wireMockServer.findAll(postRequestedFor(urlEqualTo("/logs"))).size(); + assertThat(actualRequests).isGreaterThan(10); // At least 2/3 processed + + } finally { + loadTransport.close(); + } + } + + private record TestScenario(String name, int requestCount, double minSuccessRate) { + } +} diff --git a/src/test/java/io/logdash/sdk/transport/LogdashTransportTest.java b/src/test/java/io/logdash/sdk/transport/LogdashTransportTest.java new file mode 100644 index 0000000..6391bce --- /dev/null +++ b/src/test/java/io/logdash/sdk/transport/LogdashTransportTest.java @@ -0,0 +1,64 @@ +package io.logdash.sdk.transport; + +import io.logdash.sdk.log.LogEntry; +import io.logdash.sdk.log.LogLevel; +import io.logdash.sdk.metrics.MetricEntry; +import io.logdash.sdk.metrics.MetricType; +import org.junit.jupiter.api.Test; + +import java.util.Optional; +import java.util.concurrent.CompletableFuture; + +import static org.assertj.core.api.Assertions.assertThat; + +class LogdashTransportTest { + + @Test + void should_define_contract_for_log_sending() { + LogdashTransport transport = new TestTransport(); + + var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L); + var future = transport.sendLog(logEntry); + + assertThat(future).isNotNull(); + assertThat(future).isInstanceOf(CompletableFuture.class); + } + + @Test + void should_define_contract_for_metric_sending() { + LogdashTransport transport = new TestTransport(); + + var metricEntry = new MetricEntry("test_metric", 42, MetricType.SET); + var future = transport.sendMetric(metricEntry); + + assertThat(future).isNotNull(); + assertThat(future).isInstanceOf(CompletableFuture.class); + } + + @Test + void should_provide_optional_shutdown_method() { + LogdashTransport transport = new TestTransport(); + + var shutdownFuture = transport.shutdown(); + + assertThat(shutdownFuture).isNotNull(); + assertThat(shutdownFuture).isInstanceOf(Optional.class); + } + + private static class TestTransport implements LogdashTransport { + @Override + public CompletableFuture sendLog(LogEntry logEntry) { + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture sendMetric(MetricEntry metricEntry) { + return CompletableFuture.completedFuture(null); + } + + @Override + public void close() { + // Test implementation + } + } +} diff --git a/src/test/java/io/logdash/sdk/transport/NoOpTransportTest.java b/src/test/java/io/logdash/sdk/transport/NoOpTransportTest.java new file mode 100644 index 0000000..56e351b --- /dev/null +++ b/src/test/java/io/logdash/sdk/transport/NoOpTransportTest.java @@ -0,0 +1,205 @@ +package io.logdash.sdk.transport; + +import io.logdash.sdk.config.LogdashConfig; +import io.logdash.sdk.log.LogEntry; +import io.logdash.sdk.log.LogLevel; +import io.logdash.sdk.metrics.MetricEntry; +import io.logdash.sdk.metrics.MetricType; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; + +import static org.assertj.core.api.Assertions.assertThat; + +class NoOpTransportTest { + + private NoOpTransport transport; + private LogdashConfig config; + private ByteArrayOutputStream capturedOutput; + private PrintStream originalOut; + + private static String stripAnsiCodes(String input) { + return input.replaceAll("\\u001B\\[[;\\d]*m", ""); + } + + @BeforeEach + void setUp() { + capturedOutput = new ByteArrayOutputStream(); + originalOut = System.out; + System.setOut(new PrintStream(capturedOutput)); + + config = + LogdashConfig.builder() + .apiKey("test-key") + .enableConsoleOutput(true) + .enableVerboseLogging(true) + .build(); + transport = new NoOpTransport(config); + } + + @AfterEach + void tearDown() { + System.setOut(originalOut); + if (transport != null) { + transport.close(); + } + } + + @Test + void should_initialize_with_verbose_logging() { + // Assert + assertThat(capturedOutput.toString()).contains("NoOp transport initialized"); + } + + @Test + void should_print_log_to_console_when_enabled() throws Exception { + // Arrange + var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L); + + // Act + var future = transport.sendLog(logEntry); + + // Assert + assertThat(future).succeedsWithin(100, TimeUnit.MILLISECONDS); + var output = stripAnsiCodes(capturedOutput.toString()); + assertThat(output).contains("[LOG]"); + assertThat(output).contains("INFO"); + assertThat(output).contains("Test message"); + assertThat(output).contains("seq=1"); + } + + @Test + void should_print_metric_to_console_when_enabled() throws Exception { + // Arrange + var metricEntry = new MetricEntry("test_metric", 42, MetricType.SET); + + // Act + var future = transport.sendMetric(metricEntry); + + // Assert + assertThat(future).succeedsWithin(100, TimeUnit.MILLISECONDS); + var output = stripAnsiCodes(capturedOutput.toString()); + assertThat(output).contains("[METRIC]"); + assertThat(output).contains("SET"); + assertThat(output).contains("test_metric"); + assertThat(output).contains("42"); + } + + @Test + void should_not_print_when_console_output_disabled() throws Exception { + // Arrange + var configWithoutConsole = + LogdashConfig.builder() + .apiKey("test-key") + .enableConsoleOutput(false) + .enableVerboseLogging(false) + .build(); + var silentTransport = new NoOpTransport(configWithoutConsole); + + var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L); + var metricEntry = new MetricEntry("test_metric", 42, MetricType.SET); + + // Act + var logFuture = silentTransport.sendLog(logEntry); + var metricFuture = silentTransport.sendMetric(metricEntry); + + // Assert + assertThat(logFuture).succeedsWithin(100, TimeUnit.MILLISECONDS); + assertThat(metricFuture).succeedsWithin(100, TimeUnit.MILLISECONDS); + + // Only initialization messages should be present (none in this case) + var output = capturedOutput.toString(); + assertThat(output).doesNotContain("[LOG]"); + assertThat(output).doesNotContain("[METRIC]"); + + silentTransport.close(); + } + + @Test + void should_handle_operations_after_close() throws Exception { + // Arrange + transport.close(); + var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L); + var metricEntry = new MetricEntry("test_metric", 42, MetricType.SET); + + // Act + var logFuture = transport.sendLog(logEntry); + var metricFuture = transport.sendMetric(metricEntry); + + // Assert + assertThat(logFuture).succeedsWithin(100, TimeUnit.MILLISECONDS); + assertThat(metricFuture).succeedsWithin(100, TimeUnit.MILLISECONDS); + assertThat(capturedOutput.toString()).contains("transport closed"); + } + + @Test + void should_handle_all_log_levels() throws Exception { + // Act & Assert + for (LogLevel level : LogLevel.values()) { + var logEntry = new LogEntry("Test " + level, level, 1L); + var future = transport.sendLog(logEntry); + assertThat(future).succeedsWithin(100, TimeUnit.MILLISECONDS); + } + + var output = capturedOutput.toString(); + for (LogLevel level : LogLevel.values()) { + assertThat(output).contains(level.getValue().toUpperCase()); + } + } + + @Test + void should_handle_all_metric_types() throws Exception { + // Act & Assert + for (MetricType type : MetricType.values()) { + var metricEntry = new MetricEntry("test_" + type, 42, type); + var future = transport.sendMetric(metricEntry); + assertThat(future).succeedsWithin(100, TimeUnit.MILLISECONDS); + } + + var output = capturedOutput.toString(); + for (MetricType type : MetricType.values()) { + assertThat(output).contains(type.getValue().toUpperCase()); + } + } + + @Test + void should_handle_special_characters_in_output() throws Exception { + // Arrange + var logEntry = new LogEntry("Message with émojis 🚀 and quotes \"test\"", LogLevel.INFO, 1L); + var metricEntry = new MetricEntry("metric/with-special@chars", 3.14, MetricType.SET); + + // Act + transport.sendLog(logEntry); + transport.sendMetric(metricEntry); + + // Assert + var output = capturedOutput.toString(); + assertThat(output).contains("émojis 🚀"); + assertThat(output).contains("quotes \"test\""); + assertThat(output).contains("metric/with-special@chars"); + } + + @Test + void should_handle_concurrent_operations() throws Exception { + // Arrange + var futures = new java.util.ArrayList>(); + + // Act + for (int i = 0; i < 10; i++) { + var logEntry = new LogEntry("Concurrent log " + i, LogLevel.INFO, i); + var metricEntry = new MetricEntry("concurrent_metric_" + i, i, MetricType.SET); + + futures.add(transport.sendLog(logEntry)); + futures.add(transport.sendMetric(metricEntry)); + } + + // Assert + var allFutures = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])); + assertThat(allFutures).succeedsWithin(1, TimeUnit.SECONDS); + } +} diff --git a/src/test/java/io/logdash/sdk/util/JsonSerializerEdgeCasesTest.java b/src/test/java/io/logdash/sdk/util/JsonSerializerEdgeCasesTest.java new file mode 100644 index 0000000..2d86247 --- /dev/null +++ b/src/test/java/io/logdash/sdk/util/JsonSerializerEdgeCasesTest.java @@ -0,0 +1,111 @@ +package io.logdash.sdk.util; + +import io.logdash.sdk.log.LogEntry; +import io.logdash.sdk.log.LogLevel; +import io.logdash.sdk.metrics.MetricEntry; +import io.logdash.sdk.metrics.MetricType; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.time.Instant; +import java.util.Map; + +import static org.assertj.core.api.Assertions.assertThat; + +class JsonSerializerEdgeCasesTest { + + private JsonSerializer serializer; + + @BeforeEach + void setUp() { + serializer = new JsonSerializer(); + } + + @ParameterizedTest + @ValueSource( + strings = { + "Simple message", + "Message with émojis 🚀🎉", + "Message\nwith\nnewlines", + "Message with \"quotes\"", + "Message with 中文字符", + "" + }) + void should_handle_various_message_formats(String message) { + // Arrange + var logEntry = new LogEntry(message, LogLevel.INFO, 1L); + + // Act + var json = serializer.serialize(logEntry); + + // Assert + assertThat(json).contains("\"message\":"); + assertThat(json).contains("\"level\":\"info\""); + assertThat(json).isNotNull(); + } + + @Test + void should_handle_extremely_large_numbers() { + // Arrange + var maxLong = new MetricEntry("max_long", Long.MAX_VALUE, MetricType.SET); + var minLong = new MetricEntry("min_long", Long.MIN_VALUE, MetricType.SET); + var maxDouble = new MetricEntry("max_double", Double.MAX_VALUE, MetricType.SET); + var minDouble = new MetricEntry("min_double", -Double.MAX_VALUE, MetricType.SET); + + // Act & Assert + assertThat(serializer.serialize(maxLong)).contains(String.valueOf(Long.MAX_VALUE)); + assertThat(serializer.serialize(minLong)).contains(String.valueOf(Long.MIN_VALUE)); + assertThat(serializer.serialize(maxDouble)).contains("value"); + assertThat(serializer.serialize(minDouble)).contains("value"); + } + + @Test + void should_handle_concurrent_serialization() throws InterruptedException { + // Arrange + var executor = java.util.concurrent.Executors.newFixedThreadPool(10); + var latch = new java.util.concurrent.CountDownLatch(100); + var results = new java.util.concurrent.ConcurrentLinkedQueue(); + + // Act + for (int i = 0; i < 100; i++) { + final int iteration = i; + executor.submit( + () -> { + try { + var logEntry = new LogEntry("Message " + iteration, LogLevel.INFO, iteration); + var json = serializer.serialize(logEntry); + results.add(json); + } finally { + latch.countDown(); + } + }); + } + + // Assert + assertThat(latch.await(5, java.util.concurrent.TimeUnit.SECONDS)).isTrue(); + assertThat(results).hasSize(100); + assertThat(results) + .allSatisfy( + json -> { + assertThat(json).contains("\"message\":"); + assertThat(json).contains("\"level\":\"info\""); + }); + + executor.shutdown(); + } + + @Test + void should_preserve_timestamp_precision() { + // Arrange + var timestamp = Instant.parse("2024-12-25T10:15:30.123456789Z"); + var logEntry = new LogEntry("Precision test", LogLevel.INFO, timestamp, 1L, Map.of()); + + // Act + var json = serializer.serialize(logEntry); + + // Assert + assertThat(json).contains("\"createdAt\":\"2024-12-25T10:15:30.123456789Z\""); + } +} diff --git a/src/test/java/io/logdash/sdk/util/JsonSerializerTest.java b/src/test/java/io/logdash/sdk/util/JsonSerializerTest.java new file mode 100644 index 0000000..13a7b50 --- /dev/null +++ b/src/test/java/io/logdash/sdk/util/JsonSerializerTest.java @@ -0,0 +1,304 @@ +package io.logdash.sdk.util; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import io.logdash.sdk.log.LogEntry; +import io.logdash.sdk.log.LogLevel; +import io.logdash.sdk.metrics.MetricEntry; +import io.logdash.sdk.metrics.MetricType; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.time.Instant; +import java.util.Map; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +class JsonSerializerTest { + + private JsonSerializer serializer; + + @BeforeEach + void setUp() { + serializer = new JsonSerializer(); + } + + @Test + void should_serialize_simple_log_entry() { + // Arrange + var timestamp = Instant.parse("2024-06-01T12:34:56Z"); + var logEntry = new LogEntry("Test message", LogLevel.INFO, timestamp, 42L, Map.of()); + + // Act + var json = serializer.serialize(logEntry); + + // Assert + assertThat(json).contains("\"message\":\"Test message\""); + assertThat(json).contains("\"level\":\"info\""); + assertThat(json).contains("\"createdAt\":\"2024-06-01T12:34:56Z\""); + assertThat(json).contains("\"sequenceNumber\":42"); + } + + @Test + void should_serialize_log_entry_with_special_characters() { + // Arrange + var logEntry = new LogEntry("Message with \"quotes\" and \n newlines", LogLevel.ERROR, 1L); + + // Act + var json = serializer.serialize(logEntry); + + // Assert + assertThat(json).contains("\\\"quotes\\\""); + assertThat(json).contains("\\n"); + } + + @Test + void should_serialize_metric_entry_with_integer_value() throws Exception { + // Arrange + var metricEntry = new MetricEntry("user_count", 100, MetricType.SET); + + // Act + var json = serializer.serialize(metricEntry); + + // Assert + ObjectMapper mapper = new ObjectMapper(); + JsonNode node = mapper.readTree(json); + assertThat(node.get("name").asText()).isEqualTo("user_count"); + assertThat(node.get("value").asInt()).isEqualTo(100); + assertThat(node.get("operation").asText()).isEqualTo("set"); + } + + @Test + void should_serialize_metric_entry_with_double_value() { + // Arrange + var metricEntry = new MetricEntry("temperature", 23.5, MetricType.MUTATE); + + // Act + var json = serializer.serialize(metricEntry); + + // Assert + assertThat(json).contains("\"name\":\"temperature\""); + assertThat(json).contains("\"value\":23.5"); + assertThat(json).contains("\"operation\":\"change\""); + } + + @Test + void should_handle_metric_with_special_characters_in_name() { + // Arrange + var metricEntry = new MetricEntry("metric/with\"special\\chars", 42, MetricType.SET); + + // Act + var json = serializer.serialize(metricEntry); + + // Assert + assertThat(json).contains("metric/with\\\"special\\\\chars"); + } + + @Test + void should_handle_null_metric_name_gracefully() { + assertThatThrownBy(() -> new MetricEntry(null, 42, MetricType.SET)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("Metric name cannot be null"); + } + + @Test + void should_serialize_log_with_unicode_characters() { + // Arrange + var logEntry = new LogEntry("Message with émojis 🚀 and ñáéíóú", LogLevel.INFO, 1L); + + // Act + var json = serializer.serialize(logEntry); + + // Assert + assertThat(json).contains("émojis 🚀 and ñáéíóú"); + } + + @Test + void should_handle_empty_and_single_character_strings() { + // Arrange + var emptyEntry = new LogEntry("", LogLevel.INFO, 1L); + var singleCharEntry = new LogEntry("x", LogLevel.INFO, 2L); + + // Act + var emptyJson = serializer.serialize(emptyEntry); + var singleCharJson = serializer.serialize(singleCharEntry); + + // Assert + assertThat(emptyJson).contains("\"message\":\"\""); + assertThat(singleCharJson).contains("\"message\":\"x\""); + } + + @Test + void should_handle_all_control_characters_properly() { + // Arrange + var message = "Test\b\f\r\t\u0001\u001F"; + var logEntry = new LogEntry(message, LogLevel.INFO, 1L); + + // Act + var json = serializer.serialize(logEntry); + + // Assert + assertThat(json).contains("\\r"); + assertThat(json).contains("\\t"); + assertThat(json).doesNotContain("\\b"); + assertThat(json).doesNotContain("\\f"); + assertThat(json).doesNotContain("\\u0001"); + assertThat(json).doesNotContain("\\u001F"); + } + + @Test + void should_handle_negative_numbers() { + // Arrange + var negativeInt = new MetricEntry("negative", -42, MetricType.SET); + var negativeDouble = new MetricEntry("negative_double", -3.14, MetricType.MUTATE); + + // Act + var intJson = serializer.serialize(negativeInt); + var doubleJson = serializer.serialize(negativeDouble); + + // Assert + assertThat(intJson).contains("-42"); + assertThat(doubleJson).contains("-3.14"); + } + + @Test + void should_handle_zero_values() { + // Arrange + var zeroInt = new MetricEntry("zero", 0, MetricType.SET); + var zeroDouble = new MetricEntry("zero_double", 0.0, MetricType.SET); + + // Act + var intJson = serializer.serialize(zeroInt); + var doubleJson = serializer.serialize(zeroDouble); + + // Assert + assertThat(intJson).contains("\"value\":0"); + assertThat(doubleJson).contains("\"value\":0"); + } + + @Test + void should_handle_very_long_strings() { + // Arrange + var longMessage = "A".repeat(10000); + var logEntry = new LogEntry(longMessage, LogLevel.INFO, 1L); + + // Act + var json = serializer.serialize(logEntry); + + // Assert + assertThat(json).contains(longMessage); + assertThat(json.length()).isGreaterThan(10000); + } + + @Test + void should_handle_nan_and_infinite_double_values() { + // Arrange + var nanMetric = new MetricEntry("nan_value", Double.NaN, MetricType.SET); + var infiniteMetric = + new MetricEntry("infinite_value", Double.POSITIVE_INFINITY, MetricType.SET); + var negInfiniteMetric = + new MetricEntry("neg_infinite", Double.NEGATIVE_INFINITY, MetricType.SET); + + // Act + var nanJson = serializer.serialize(nanMetric); + var infiniteJson = serializer.serialize(infiniteMetric); + var negInfiniteJson = serializer.serialize(negInfiniteMetric); + + // Assert + assertThat(nanJson).contains("\"value\":0.0"); + assertThat(infiniteJson).contains("\"value\":0.0"); + assertThat(negInfiniteJson).contains("\"value\":0.0"); + } + + @Test + void should_format_large_integer_values_correctly() { + // Arrange + var largeInt = new MetricEntry("large_int", Long.MAX_VALUE, MetricType.SET); + var largeDouble = new MetricEntry("large_double", 1.23456789012345e14, MetricType.SET); + + // Act + var intJson = serializer.serialize(largeInt); + var doubleJson = serializer.serialize(largeDouble); + + // Assert + assertThat(intJson).contains(String.valueOf(Long.MAX_VALUE)); + // For large doubles, accept scientific notation as valid + assertThat(doubleJson) + .satisfiesAnyOf( + json -> assertThat(json).contains("123456789012345"), + json -> assertThat(json).contains("1.23456789012345E14"), + json -> assertThat(json).contains("1.23456789012345e14")); + } + + @Test + void should_handle_log_entries_with_timestamp_edge_cases() { + // Arrange + var epoch = Instant.EPOCH; + var farFuture = Instant.parse("2099-12-31T23:59:59Z"); + + var epochEntry = new LogEntry("Epoch", LogLevel.INFO, epoch, 1L, Map.of()); + var futureEntry = new LogEntry("Future", LogLevel.INFO, farFuture, 2L, Map.of()); + + // Act + var epochJson = serializer.serialize(epochEntry); + var futureJson = serializer.serialize(futureEntry); + + // Assert + assertThat(epochJson).contains("\"createdAt\":\"1970-01-01T00:00:00Z\""); + assertThat(futureJson).contains("\"createdAt\":\"2099-12-31T23:59:59Z\""); + } + + @Test + void should_handle_different_number_types() { + // Arrange + var intMetric = new MetricEntry("int_metric", 42, MetricType.SET); + var longMetric = new MetricEntry("long_metric", 123456789L, MetricType.SET); + var floatMetric = new MetricEntry("float_metric", 3.14f, MetricType.SET); + var doubleMetric = new MetricEntry("double_metric", 2.718281828, MetricType.SET); + + // Act & Assert + assertThat(serializer.serialize(intMetric)).contains("\"value\":42"); + assertThat(serializer.serialize(longMetric)).contains("\"value\":123456789"); + assertThat(serializer.serialize(floatMetric)).contains("\"value\":3.14"); + assertThat(serializer.serialize(doubleMetric)).contains("\"value\":2.718281828"); + } + + @Test + void should_escape_all_json_control_characters() { + // Arrange + var message = "Test\"\\\b\f\n\r\t\u0000\u001F"; + var logEntry = new LogEntry(message, LogLevel.INFO, 1L); + + // Act + var json = serializer.serialize(logEntry); + + // Assert + assertThat(json).contains("\\\""); // quote + assertThat(json).contains("\\\\"); // backslash + assertThat(json).contains("\\n"); // newline + assertThat(json).contains("\\r"); // carriage return + assertThat(json).contains("\\t"); // tab + assertThat(json).doesNotContain("\\b"); // backspace + assertThat(json).doesNotContain("\\f"); // form feed + assertThat(json).doesNotContain("\\u0000"); // null character + assertThat(json).doesNotContain("\\u001F"); // unit separator + } + + @Test + void should_handle_empty_strings_and_contexts() { + // Arrange + var emptyMessage = new LogEntry("", LogLevel.INFO, 1L); + + // Act + var logJson = serializer.serialize(emptyMessage); + + // Assert + assertThat(logJson).contains("\"message\":\"\""); + + // MetricEntry with empty name should throw + assertThatThrownBy(() -> new MetricEntry("", 0, MetricType.SET)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("Metric name cannot be null or blank"); + } +}