context() {
+ return Collections.unmodifiableMap(new HashMap<>(context));
+ }
+}
diff --git a/src/main/java/io/logdash/sdk/log/LogLevel.java b/src/main/java/io/logdash/sdk/log/LogLevel.java
new file mode 100644
index 0000000..75033fa
--- /dev/null
+++ b/src/main/java/io/logdash/sdk/log/LogLevel.java
@@ -0,0 +1,64 @@
+package io.logdash.sdk.log;
+
+/**
+ * Defines the severity levels for log messages in Logdash.
+ *
+ * Log levels are ordered from most severe ({@link #ERROR}) to least severe ({@link #SILLY}).
+ * Each level corresponds to a specific string value used in the Logdash API.
+ */
+public enum LogLevel {
+ /**
+ * Error level for application errors and exceptions
+ */
+ ERROR("error"),
+
+ /**
+ * Warning level for potentially harmful situations
+ */
+ WARN("warning"),
+
+ /**
+ * Informational level for general application flow
+ */
+ INFO("info"),
+
+ /**
+ * HTTP level for HTTP request/response logging
+ */
+ HTTP("http"),
+
+ /**
+ * Verbose level for detailed operational information
+ */
+ VERBOSE("verbose"),
+
+ /**
+ * Debug level for detailed diagnostic information
+ */
+ DEBUG("debug"),
+
+ /**
+ * Silly level for very detailed trace information
+ */
+ SILLY("silly");
+
+ private final String value;
+
+ LogLevel(String value) {
+ this.value = value;
+ }
+
+ /**
+ * Returns the string value used in the Logdash API.
+ *
+ * @return the API string representation of this log level
+ */
+ public String getValue() {
+ return value;
+ }
+
+ @Override
+ public String toString() {
+ return value;
+ }
+}
diff --git a/src/main/java/io/logdash/sdk/log/LogdashLogger.java b/src/main/java/io/logdash/sdk/log/LogdashLogger.java
new file mode 100644
index 0000000..385dc69
--- /dev/null
+++ b/src/main/java/io/logdash/sdk/log/LogdashLogger.java
@@ -0,0 +1,303 @@
+package io.logdash.sdk.log;
+
+import io.logdash.sdk.config.LogdashConfig;
+import io.logdash.sdk.transport.LogdashTransport;
+
+import java.time.OffsetDateTime;
+import java.time.format.DateTimeFormatter;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * Primary interface for sending structured log messages to Logdash platform.
+ *
+ *
Provides convenient methods for logging at different severity levels with optional structured
+ * context data. All log entries are sent asynchronously and are thread-safe for high-throughput
+ * scenarios.
+ *
+ *
Example usage:
+ *
+ *
{@code
+ * logger.info("User logged in successfully");
+ * logger.error("Database connection failed", Map.of("userId", 123, "retries", 3));
+ * logger.debug("Processing request", Map.of("requestId", "req-456"));
+ * }
+ */
+public final class LogdashLogger {
+
+ private static final DateTimeFormatter CONSOLE_FORMATTER = DateTimeFormatter.ofPattern(
+ "yyyy-MM-dd'T'HH:mm:ss.SSSXXX");
+ private static final String RESET = "\u001B[0m";
+ private static final String RED = "\u001B[31m";
+ private static final String YELLOW = "\u001B[33m";
+ private static final String BLUE = "\u001B[34m";
+ private static final String GREEN = "\u001B[32m";
+ private static final String PURPLE = "\u001B[35m";
+ private static final String CYAN = "\u001B[36m";
+ private static final String GRAY = "\u001B[90m";
+
+ private final LogdashConfig config;
+ private final LogdashTransport transport;
+ private final AtomicLong sequenceNumber = new AtomicLong(0);
+
+ /**
+ * Creates a new logger instance with the specified configuration and transport.
+ *
+ * Note: This constructor is internal to the SDK. Use {@link
+ * io.logdash.sdk.Logdash#logger()} to obtain an instance.
+ *
+ * @param config the SDK configuration
+ * @param transport the transport for sending log entries
+ */
+ public LogdashLogger(LogdashConfig config, LogdashTransport transport) {
+ this.config = config;
+ this.transport = transport;
+ }
+
+ /**
+ * Logs an error message indicating a serious problem.
+ *
+ * @param message the error message
+ */
+ public void error(String message) {
+ log(LogLevel.ERROR, message, Map.of());
+ }
+
+ /**
+ * Logs an error message with additional structured context.
+ *
+ * @param message the error message
+ * @param context additional context data as key-value pairs
+ */
+ public void error(String message, Map context) {
+ log(LogLevel.ERROR, message, context);
+ }
+
+ /**
+ * Logs a warning message indicating a potentially harmful situation.
+ *
+ * @param message the warning message
+ */
+ public void warn(String message) {
+ log(LogLevel.WARN, message, Map.of());
+ }
+
+ /**
+ * Logs a warning message with additional structured context.
+ *
+ * @param message the warning message
+ * @param context additional context data as key-value pairs
+ */
+ public void warn(String message, Map context) {
+ log(LogLevel.WARN, message, context);
+ }
+
+ /**
+ * Logs an informational message about normal application flow.
+ *
+ * @param message the informational message
+ */
+ public void info(String message) {
+ log(LogLevel.INFO, message, Map.of());
+ }
+
+ /**
+ * Logs an informational message with additional structured context.
+ *
+ * @param message the informational message
+ * @param context additional context data as key-value pairs
+ */
+ public void info(String message, Map context) {
+ log(LogLevel.INFO, message, context);
+ }
+
+ /**
+ * Logs an HTTP-related message (requests, responses, etc.).
+ *
+ * @param message the HTTP-related message
+ */
+ public void http(String message) {
+ log(LogLevel.HTTP, message, Map.of());
+ }
+
+ /**
+ * Logs an HTTP-related message with additional structured context.
+ *
+ * @param message the HTTP-related message
+ * @param context additional context data as key-value pairs
+ */
+ public void http(String message, Map context) {
+ log(LogLevel.HTTP, message, context);
+ }
+
+ /**
+ * Logs a verbose message with detailed operational information.
+ *
+ * @param message the verbose message
+ */
+ public void verbose(String message) {
+ log(LogLevel.VERBOSE, message, Map.of());
+ }
+
+ /**
+ * Logs a verbose message with additional structured context.
+ *
+ * @param message the verbose message
+ * @param context additional context data as key-value pairs
+ */
+ public void verbose(String message, Map context) {
+ log(LogLevel.VERBOSE, message, context);
+ }
+
+ /**
+ * Logs a debug message with detailed diagnostic information.
+ *
+ * @param message the debug message
+ */
+ public void debug(String message) {
+ log(LogLevel.DEBUG, message, Map.of());
+ }
+
+ /**
+ * Logs a debug message with additional structured context.
+ *
+ * @param message the debug message
+ * @param context additional context data as key-value pairs
+ */
+ public void debug(String message, Map context) {
+ log(LogLevel.DEBUG, message, context);
+ }
+
+ /**
+ * Logs a silly/trace level message with very detailed information.
+ *
+ * @param message the trace-level message
+ */
+ public void silly(String message) {
+ log(LogLevel.SILLY, message, Map.of());
+ }
+
+ /**
+ * Logs a silly/trace level message with additional structured context.
+ *
+ * @param message the trace-level message
+ * @param context additional context data as key-value pairs
+ */
+ public void silly(String message, Map context) {
+ log(LogLevel.SILLY, message, context);
+ }
+
+ private long getNextSequenceNumber() {
+ long current, next;
+ do {
+ current = sequenceNumber.get();
+ next = current == Long.MAX_VALUE ? 1 : current + 1;
+ } while (!sequenceNumber.compareAndSet(current, next));
+ return next;
+ }
+
+ private void log(LogLevel level, String message, Map context) {
+ var finalMessage = buildFinalMessage(message, context);
+ var logEntry = new LogEntry(finalMessage, level, getNextSequenceNumber());
+
+ if (config.enableConsoleOutput()) {
+ printToConsole(message, level, context);
+ }
+
+ transport.sendLog(logEntry);
+ }
+
+ private String buildFinalMessage(String message, Map context) {
+ if (context.isEmpty()) {
+ return message;
+ }
+
+ var contextJson = buildContextJson(context);
+ return message + " " + contextJson;
+ }
+
+ private String buildContextJson(Map context) {
+ if (context.isEmpty()) {
+ return "{}";
+ }
+
+ var sb = new StringBuilder(context.size() * 32);
+ sb.append("{");
+
+ var iterator = context.entrySet().iterator();
+ while (iterator.hasNext()) {
+ var entry = iterator.next();
+ sb.append("\"").append(escapeJsonString(entry.getKey())).append("\":");
+ sb.append(formatJsonValue(entry.getValue()));
+
+ if (iterator.hasNext()) {
+ sb.append(",");
+ }
+ }
+
+ sb.append("}");
+ return sb.toString();
+ }
+
+ private String formatJsonValue(Object value) {
+ if (value == null) {
+ return "null";
+ }
+ if (value instanceof String str) {
+ return "\"" + escapeJsonString(str) + "\"";
+ }
+ if (value instanceof Number || value instanceof Boolean) {
+ return value.toString();
+ }
+ return "\"" + escapeJsonString(String.valueOf(value)) + "\"";
+ }
+
+ private String escapeJsonString(String str) {
+ return str.replace("\\", "\\\\")
+ .replace("\"", "\\\"")
+ .replace("\n", "\\n")
+ .replace("\r", "\\r")
+ .replace("\t", "\\t");
+ }
+
+ private void printToConsole(String originalMessage, LogLevel level, Map context) {
+ var timestamp = OffsetDateTime.now().format(CONSOLE_FORMATTER);
+ var levelColor = getLevelColor(level);
+ var levelStr = level.getValue();
+
+ var contextStr = context.isEmpty() ? "" : " " + formatContext(context);
+
+ System.out.printf(
+ "%s [%s%s%s] %s%s%n", timestamp, levelColor, levelStr, RESET, originalMessage, contextStr);
+ }
+
+ private String formatContext(Map context) {
+ if (context.isEmpty()) {
+ return "";
+ }
+
+ var sb = new StringBuilder("{");
+ var first = true;
+ for (var entry : context.entrySet()) {
+ if (!first) {
+ sb.append(", ");
+ }
+ sb.append(entry.getKey()).append("=").append(entry.getValue());
+ first = false;
+ }
+ sb.append("}");
+ return sb.toString();
+ }
+
+ private String getLevelColor(LogLevel level) {
+ return switch (level) {
+ case ERROR -> RED;
+ case WARN -> YELLOW;
+ case INFO -> BLUE;
+ case HTTP -> GREEN;
+ case VERBOSE -> PURPLE;
+ case DEBUG -> CYAN;
+ case SILLY -> GRAY;
+ };
+ }
+}
diff --git a/src/main/java/io/logdash/sdk/metrics/LogdashMetrics.java b/src/main/java/io/logdash/sdk/metrics/LogdashMetrics.java
new file mode 100644
index 0000000..f7c8731
--- /dev/null
+++ b/src/main/java/io/logdash/sdk/metrics/LogdashMetrics.java
@@ -0,0 +1,73 @@
+package io.logdash.sdk.metrics;
+
+import io.logdash.sdk.config.LogdashConfig;
+import io.logdash.sdk.transport.LogdashTransport;
+
+/**
+ * Primary interface for sending custom metrics to Logdash platform.
+ *
+ * Provides convenient methods for common metric operations including counters, gauges, and
+ * custom measurements. All operations are asynchronous and thread-safe.
+ *
+ *
Example usage:
+ *
+ *
{@code
+ * metrics.set("active_users", 150);
+ * metrics.mutate("response_time_ms", -50);
+ * }
+ */
+public final class LogdashMetrics {
+
+ private final LogdashConfig config;
+ private final LogdashTransport transport;
+
+ /**
+ * Creates a new metrics instance with the specified configuration and transport.
+ *
+ * Note: This constructor is internal to the SDK. Use {@link
+ * io.logdash.sdk.Logdash#metrics()} to obtain an instance.
+ *
+ * @param config the SDK configuration
+ * @param transport the transport for sending metrics
+ */
+ public LogdashMetrics(LogdashConfig config, LogdashTransport transport) {
+ this.config = config;
+ this.transport = transport;
+ }
+
+ /**
+ * Sets a metric to a specific absolute value.
+ *
+ *
Use this for gauge-type metrics where you want to record the current state or level of
+ * something.
+ *
+ * @param name the metric name (e.g., "memory_usage_mb")
+ * @param value the absolute value to set
+ */
+ public void set(String name, Number value) {
+ var metric = new MetricEntry(name, value, MetricType.SET);
+ sendMetric(metric);
+ }
+
+ /**
+ * Changes a metric by the specified delta value.
+ *
+ *
Positive values increase the metric, negative values decrease it. This is the most flexible
+ * method for counter-type metrics.
+ *
+ * @param name the metric name
+ * @param delta the change amount (positive or negative)
+ */
+ public void mutate(String name, Number delta) {
+ var metric = new MetricEntry(name, delta, MetricType.MUTATE);
+ sendMetric(metric);
+ }
+
+ private void sendMetric(MetricEntry metric) {
+ if (config.enableVerboseLogging()) {
+ System.out.printf(
+ "Metric: %s %s %s%n", metric.operation().getValue(), metric.name(), metric.value());
+ }
+ transport.sendMetric(metric);
+ }
+}
diff --git a/src/main/java/io/logdash/sdk/metrics/MetricEntry.java b/src/main/java/io/logdash/sdk/metrics/MetricEntry.java
new file mode 100644
index 0000000..2edce93
--- /dev/null
+++ b/src/main/java/io/logdash/sdk/metrics/MetricEntry.java
@@ -0,0 +1,36 @@
+package io.logdash.sdk.metrics;
+
+/**
+ * Immutable data container representing a metric entry to be sent to Logdash.
+ *
+ *
A metric entry consists of:
+ *
+ *
+ * - A unique name identifying the metric (e.g., "http_requests_total")
+ *
- A numeric value representing the measurement
+ *
- An operation type defining how the metric should be processed
+ *
+ *
+ * @param name the metric identifier, must not be null or blank
+ * @param value the numeric value, must not be null
+ * @param operation the type of operation to perform, must not be null
+ */
+public record MetricEntry(String name, Number value, MetricType operation) {
+
+ /**
+ * Validates the metric entry parameters during construction.
+ *
+ * @throws IllegalArgumentException if any parameter is null or invalid
+ */
+ public MetricEntry {
+ if (name == null || name.isBlank()) {
+ throw new IllegalArgumentException("Metric name cannot be null or blank");
+ }
+ if (value == null) {
+ throw new IllegalArgumentException("Metric value cannot be null");
+ }
+ if (operation == null) {
+ throw new IllegalArgumentException("Metric operation cannot be null");
+ }
+ }
+}
diff --git a/src/main/java/io/logdash/sdk/metrics/MetricType.java b/src/main/java/io/logdash/sdk/metrics/MetricType.java
new file mode 100644
index 0000000..6e2ccd9
--- /dev/null
+++ b/src/main/java/io/logdash/sdk/metrics/MetricType.java
@@ -0,0 +1,45 @@
+package io.logdash.sdk.metrics;
+
+/**
+ * Defines the types of operations that can be performed on metrics in Logdash.
+ *
+ * These operations determine how metric values are processed by the Logdash platform:
+ *
+ *
+ * - {@link #SET} - Replaces the current metric value with the provided value
+ *
- {@link #MUTATE} - Modifies the current metric value by adding the provided delta
+ *
+ */
+public enum MetricType {
+ /**
+ * Set operation - replaces the current metric value with the provided value. Use for gauge-type
+ * metrics where you want to record absolute values.
+ */
+ SET("set"),
+
+ /**
+ * Mutate operation - modifies the current metric value by the provided delta. Use for
+ * counter-type metrics where you want to increment/decrement values.
+ */
+ MUTATE("change");
+
+ private final String value;
+
+ MetricType(String value) {
+ this.value = value;
+ }
+
+ /**
+ * Returns the string value used in the Logdash API.
+ *
+ * @return the API string representation of this metric operation
+ */
+ public String getValue() {
+ return value;
+ }
+
+ @Override
+ public String toString() {
+ return value;
+ }
+}
diff --git a/src/main/java/io/logdash/sdk/transport/HttpTransport.java b/src/main/java/io/logdash/sdk/transport/HttpTransport.java
new file mode 100644
index 0000000..38082af
--- /dev/null
+++ b/src/main/java/io/logdash/sdk/transport/HttpTransport.java
@@ -0,0 +1,408 @@
+package io.logdash.sdk.transport;
+
+import io.logdash.sdk.config.LogdashConfig;
+import io.logdash.sdk.exception.LogdashException;
+import io.logdash.sdk.log.LogEntry;
+import io.logdash.sdk.metrics.MetricEntry;
+import io.logdash.sdk.util.JsonSerializer;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.http.HttpClient;
+import java.net.http.HttpRequest;
+import java.net.http.HttpResponse;
+import java.time.Duration;
+import java.util.Optional;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * High-performance HTTP transport implementation for sending observability data to Logdash
+ * platform.
+ *
+ * This transport is optimized for throughput and low latency with:
+ *
+ *
+ * - Non-blocking concurrency control using tryAcquire
+ *
- Efficient request tracking with CountDownLatch
+ *
- Optimized thread pool sizing
+ *
- Fast retry logic with exponential backoff
+ *
- Minimal synchronization overhead
+ *
+ */
+public final class HttpTransport implements LogdashTransport, AutoCloseable {
+
+ private static final String LOGS_ENDPOINT = "/logs";
+ private static final String METRICS_ENDPOINT = "/metrics";
+
+ private static final String API_KEY_HEADER = "project-api-key";
+ private static final String USER_AGENT = "logdash-java-sdk/0.1.0";
+ private static final String CONTENT_TYPE = "application/json";
+
+ private final LogdashConfig config;
+ private final JsonSerializer serializer;
+ private final HttpClient httpClient;
+ private final ExecutorService executorService;
+ private final Semaphore rateLimiter;
+ private final AtomicBoolean closed = new AtomicBoolean(false);
+ private final AtomicBoolean shutdownInitiated = new AtomicBoolean(false);
+ private final AtomicLong requestCounter = new AtomicLong(0);
+ private final AtomicLong activeRequests = new AtomicLong(0);
+
+ private volatile CountDownLatch pendingRequestsLatch = new CountDownLatch(0);
+
+ /**
+ * Creates a new HTTP transport with the specified configuration.
+ *
+ * @param config the configuration for HTTP operations
+ * @throws LogdashException if transport initialization fails
+ */
+ public HttpTransport(LogdashConfig config) {
+ this.config = config;
+ this.serializer = new JsonSerializer();
+ this.rateLimiter = new Semaphore(config.maxConcurrentRequests(), true);
+ this.executorService = createOptimizedExecutorService();
+ this.httpClient = createOptimizedHttpClient();
+
+ if (config.enableVerboseLogging()) {
+ System.out.println("HTTP transport initialized for " + config.baseUrl());
+ }
+ }
+
+ @Override
+ public CompletableFuture sendLog(LogEntry logEntry) {
+ if (shutdownInitiated.get() || closed.get()) {
+ return CompletableFuture.completedFuture(null);
+ }
+
+ try {
+ String json = serializer.serialize(logEntry);
+ return sendWithRetry(LOGS_ENDPOINT, json, "POST");
+ } catch (Exception e) {
+ handleTransportError("Failed to serialize log entry", e);
+ return CompletableFuture.completedFuture(null);
+ }
+ }
+
+ @Override
+ public CompletableFuture sendMetric(MetricEntry metricEntry) {
+ if (shutdownInitiated.get() || closed.get()) {
+ return CompletableFuture.completedFuture(null);
+ }
+
+ try {
+ String json = serializer.serialize(metricEntry);
+ return sendWithRetry(METRICS_ENDPOINT, json, "PUT");
+ } catch (Exception e) {
+ handleTransportError("Failed to serialize metric entry", e);
+ return CompletableFuture.completedFuture(null);
+ }
+ }
+
+ /**
+ * Sends an HTTP request with optimized retry logic and smart concurrency control. Uses tryAcquire
+ * with timeout instead of immediate rejection for better throughput.
+ *
+ * @param endpoint the API endpoint path
+ * @param json the JSON payload to send
+ * @param method the HTTP method (POST or PUT)
+ * @return a CompletableFuture that completes when the request succeeds or all retries are exhausted
+ */
+ private CompletableFuture sendWithRetry(String endpoint, String json, String method) {
+ if (shutdownInitiated.get() || closed.get()) {
+ return CompletableFuture.completedFuture(null);
+ }
+
+ final long sequenceId = requestCounter.incrementAndGet();
+
+ return CompletableFuture.runAsync(
+ () -> {
+ boolean acquired = false;
+ try {
+ acquired = rateLimiter.tryAcquire(200, TimeUnit.MILLISECONDS);
+ if (!acquired) {
+ if (config.enableVerboseLogging()) {
+ System.out.printf(
+ "Request #%d rejected due to concurrency limit timeout%n", sequenceId);
+ }
+ return;
+ }
+
+ if (shutdownInitiated.get() || closed.get()) {
+ return;
+ }
+
+ long currentActive = activeRequests.incrementAndGet();
+ if (currentActive == 1) {
+ pendingRequestsLatch = new CountDownLatch(1);
+ }
+
+ Exception lastException = null;
+ for (int attempt = 1; attempt <= config.maxRetries() + 1; attempt++) {
+ try {
+ sendHttpRequest(endpoint, json, method, attempt, sequenceId);
+ return;
+ } catch (Exception e) {
+ lastException = e;
+ if (config.enableVerboseLogging()) {
+ System.err.printf(
+ "Request #%d failed: %s (attempt %d/%d)%n",
+ sequenceId, e.getMessage(), attempt, config.maxRetries() + 1);
+ }
+
+ if (attempt <= config.maxRetries() && !shutdownInitiated.get() && !closed.get()) {
+ try {
+ long baseDelay = config.retryDelayMs();
+ long exponentialDelay = baseDelay * (1L << (attempt - 1));
+ long maxDelay = Math.min(exponentialDelay, 5000);
+ long jitter = (long) (Math.random() * 100);
+ Thread.sleep(maxDelay + jitter);
+ } catch (InterruptedException ie) {
+ Thread.currentThread().interrupt();
+ return;
+ }
+ }
+ }
+ }
+
+ handleTransportError(
+ String.format(
+ "Request #%d failed permanently after %d attempts",
+ sequenceId, config.maxRetries() + 1),
+ lastException);
+
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ } finally {
+ if (acquired) {
+ rateLimiter.release();
+ }
+
+ long remaining = activeRequests.decrementAndGet();
+ if (remaining == 0) {
+ pendingRequestsLatch.countDown();
+ }
+ }
+ },
+ executorService);
+ }
+
+ private void sendHttpRequest(
+ String endpoint,
+ String json,
+ String method,
+ int attempt,
+ long sequenceId) throws InterruptedException {
+ URI uri;
+ try {
+ uri = createEndpointUri(endpoint);
+ } catch (LogdashException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new LogdashException("Failed to create endpoint URI", e);
+ }
+
+ if (config.enableVerboseLogging()) {
+ logRequestStart(method, endpoint, sequenceId, json.length(), attempt);
+ }
+
+ try {
+ HttpRequest request = HttpRequest.newBuilder()
+ .uri(uri)
+ .timeout(Duration.ofMillis(config.requestTimeoutMs()))
+ .header("Content-Type", CONTENT_TYPE)
+ .header(API_KEY_HEADER, config.apiKey())
+ .header("User-Agent", USER_AGENT)
+ .method(method, HttpRequest.BodyPublishers.ofString(json))
+ .build();
+
+ HttpResponse response = httpClient.send(request, HttpResponse.BodyHandlers.ofString());
+
+ if (isSuccessResponse(response.statusCode())) {
+ if (config.enableVerboseLogging()) {
+ System.out.printf(
+ "Successfully sent request #%d to %s (status=%d)%n",
+ sequenceId, endpoint, response.statusCode());
+ }
+ } else {
+ throw new LogdashException("HTTP " + response.statusCode() + ": " + response.body());
+ }
+ } catch (IOException e) {
+ throw new LogdashException("I/O error during HTTP request", e);
+ }
+ }
+
+ /**
+ * Flushes all pending requests efficiently using CountDownLatch.
+ *
+ * @return a future that completes when all pending requests finish
+ */
+ public CompletableFuture flush() {
+ return CompletableFuture.runAsync(
+ () -> {
+ try {
+ CountDownLatch currentLatch = pendingRequestsLatch;
+ if (currentLatch != null && activeRequests.get() > 0) {
+ long timeoutMs = Math.min(config.shutdownTimeoutMs(), 5000);
+ boolean completed = currentLatch.await(timeoutMs, TimeUnit.MILLISECONDS);
+
+ if (completed && config.enableVerboseLogging()) {
+ System.out.println("All pending requests flushed successfully");
+ } else if (!completed && config.enableVerboseLogging()) {
+ System.out.printf(
+ "Flush timeout, %d requests still pending%n", activeRequests.get());
+ }
+ }
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ },
+ executorService);
+ }
+
+ @Override
+ public Optional> shutdown() {
+ if (shutdownInitiated.compareAndSet(false, true)) {
+ return Optional.of(
+ CompletableFuture.runAsync(
+ () -> {
+ try {
+ if (config.enableVerboseLogging()) {
+ System.out.println("Initiating HTTP transport shutdown...");
+ }
+
+ CountDownLatch currentLatch = pendingRequestsLatch;
+ long timeoutMs = Math.min(config.shutdownTimeoutMs(), 3000);
+
+ if (currentLatch != null && activeRequests.get() > 0) {
+ if (config.enableVerboseLogging()) {
+ System.out.printf(
+ "Waiting for %d pending HTTP requests...%n", activeRequests.get());
+ }
+
+ boolean completed = currentLatch.await(timeoutMs, TimeUnit.MILLISECONDS);
+
+ if (completed && config.enableVerboseLogging()) {
+ System.out.println("All HTTP requests completed successfully");
+ } else if (!completed && config.enableVerboseLogging()) {
+ System.out.printf(
+ "Shutdown timeout exceeded, %d requests may be lost%n",
+ activeRequests.get());
+ }
+ }
+
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ } finally {
+ closed.set(true);
+ }
+ },
+ executorService));
+ }
+ return Optional.of(CompletableFuture.completedFuture(null));
+ }
+
+ @Override
+ public void close() {
+ shutdownInitiated.set(true);
+ closed.set(true);
+
+ try {
+ executorService.shutdown();
+ if (!executorService.awaitTermination(500, TimeUnit.MILLISECONDS)) {
+ if (config.enableVerboseLogging()) {
+ System.out.println("Forcing HTTP executor shutdown");
+ }
+ executorService.shutdownNow();
+ }
+
+ if (config.enableVerboseLogging()) {
+ System.out.println("HTTP transport closed");
+ }
+
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ executorService.shutdownNow();
+ }
+ }
+
+ /**
+ * Creates an optimized thread pool for HTTP operations. Uses more aggressive sizing for better
+ * throughput.
+ */
+ private ExecutorService createOptimizedExecutorService() {
+ int corePoolSize = Math.max(4, Runtime.getRuntime().availableProcessors());
+ int maxPoolSize = Math.max(config.maxConcurrentRequests(), corePoolSize * 2);
+
+ ThreadPoolExecutor executor =
+ new ThreadPoolExecutor(
+ corePoolSize,
+ maxPoolSize,
+ 60L,
+ TimeUnit.SECONDS,
+ new LinkedBlockingQueue<>(config.maxConcurrentRequests() * 2),
+ r -> {
+ Thread t = new Thread(r, "logdash-http-" + System.currentTimeMillis());
+ t.setDaemon(true);
+ return t;
+ },
+ new ThreadPoolExecutor.CallerRunsPolicy());
+
+ executor.allowCoreThreadTimeOut(true);
+ return executor;
+ }
+
+ private HttpClient createOptimizedHttpClient() {
+ return HttpClient.newBuilder()
+ .connectTimeout(Duration.ofMillis(Math.min(config.requestTimeoutMs(), 10000)))
+ .build();
+ }
+
+ private URI createEndpointUri(String endpoint) {
+ try {
+ String baseUrl =
+ config.baseUrl().endsWith("/")
+ ? config.baseUrl().substring(0, config.baseUrl().length() - 1)
+ : config.baseUrl();
+ return new URI(baseUrl + endpoint);
+ } catch (URISyntaxException e) {
+ throw new LogdashException("Invalid endpoint URI: " + config.baseUrl() + endpoint, e);
+ }
+ }
+
+ private boolean isSuccessResponse(int statusCode) {
+ return statusCode >= 200 && statusCode < 300;
+ }
+
+ private String maskApiKey(String apiKey) {
+ if (apiKey == null || apiKey.length() <= 8) {
+ return "***";
+ }
+ return apiKey.substring(0, 4) + "..." + apiKey.substring(apiKey.length() - 4);
+ }
+
+ private void logRequestStart(
+ String method, String endpoint, long requestId, int bodySize, int attempt) {
+ System.out.printf(
+ "Sending HTTP %s request #%d to %s (attempt %d): headers={Content-Type=[%s], %s=[%s]}, bodySize=%d%n",
+ method,
+ requestId,
+ endpoint,
+ attempt,
+ CONTENT_TYPE,
+ API_KEY_HEADER,
+ maskApiKey(config.apiKey()),
+ bodySize);
+ }
+
+ private void handleTransportError(String message, Throwable cause) {
+ if (config.enableVerboseLogging()) {
+ System.err.println("Logdash transport error: " + message);
+ if (cause != null) {
+ System.err.println("Cause: " + cause.getMessage());
+ }
+ }
+ }
+}
diff --git a/src/main/java/io/logdash/sdk/transport/LogdashTransport.java b/src/main/java/io/logdash/sdk/transport/LogdashTransport.java
new file mode 100644
index 0000000..407fada
--- /dev/null
+++ b/src/main/java/io/logdash/sdk/transport/LogdashTransport.java
@@ -0,0 +1,52 @@
+package io.logdash.sdk.transport;
+
+import io.logdash.sdk.log.LogEntry;
+import io.logdash.sdk.metrics.MetricEntry;
+
+import java.util.Optional;
+import java.util.concurrent.CompletableFuture;
+
+/**
+ * Transport interface for sending observability data to Logdash platform.
+ *
+ * Implementations handle the actual delivery mechanism for logs and metrics, whether through
+ * HTTP, console output, or other means. All operations are asynchronous to avoid blocking the
+ * application.
+ */
+public interface LogdashTransport {
+
+ /**
+ * Sends a log entry asynchronously to the transport destination.
+ *
+ * @param logEntry the log entry to send
+ * @return a future that completes when the operation finishes
+ */
+ CompletableFuture sendLog(LogEntry logEntry);
+
+ /**
+ * Sends a metric entry asynchronously to the transport destination.
+ *
+ * @param metricEntry the metric entry to send
+ * @return a future that completes when the operation finishes
+ */
+ CompletableFuture sendMetric(MetricEntry metricEntry);
+
+ /**
+ * Initiates graceful shutdown of the transport.
+ *
+ * Implementations should ensure all pending operations complete before the returned future
+ * completes.
+ *
+ * @return an optional future for shutdown completion, empty if no cleanup needed
+ */
+ default Optional> shutdown() {
+ return Optional.empty();
+ }
+
+ /**
+ * Immediately closes the transport and releases all resources.
+ *
+ * This method should be idempotent and safe to call multiple times.
+ */
+ void close();
+}
diff --git a/src/main/java/io/logdash/sdk/transport/NoOpTransport.java b/src/main/java/io/logdash/sdk/transport/NoOpTransport.java
new file mode 100644
index 0000000..4f0d8d4
--- /dev/null
+++ b/src/main/java/io/logdash/sdk/transport/NoOpTransport.java
@@ -0,0 +1,108 @@
+package io.logdash.sdk.transport;
+
+import io.logdash.sdk.config.LogdashConfig;
+import io.logdash.sdk.log.LogEntry;
+import io.logdash.sdk.metrics.MetricEntry;
+
+import java.time.OffsetDateTime;
+import java.time.format.DateTimeFormatter;
+import java.util.Locale;
+import java.util.Optional;
+import java.util.concurrent.CompletableFuture;
+
+/**
+ * No-operation transport that outputs observability data to console only.
+ *
+ *
This transport is used as a fallback when no API key is provided or when HTTP transport
+ * initialization fails. It provides local visibility into logs and metrics without sending data to
+ * external services.
+ *
+ *
Useful for development, testing, and scenarios where external connectivity is not available or
+ * desired.
+ */
+public final class NoOpTransport implements LogdashTransport {
+
+ private static final DateTimeFormatter CONSOLE_FORMATTER = DateTimeFormatter.ofPattern(
+ "yyyy-MM-dd'T'HH:mm:ss.SSSXXX");
+ private static final String RESET = "\u001B[0m";
+ private static final String CYAN = "\u001B[36m";
+ private static final String YELLOW = "\u001B[33m";
+
+ private final LogdashConfig config;
+ private volatile boolean closed = false;
+
+ /**
+ * Creates a new NoOp transport with the specified configuration.
+ *
+ * @param config the configuration for console output behavior
+ */
+ public NoOpTransport(LogdashConfig config) {
+ this.config = config;
+ if (config.enableVerboseLogging()) {
+ System.out.println("Logdash NoOp transport initialized (console-only mode)");
+ }
+ }
+
+ @Override
+ public CompletableFuture sendLog(LogEntry logEntry) {
+ if (closed) {
+ return CompletableFuture.completedFuture(null);
+ }
+
+ if (config.enableConsoleOutput()) {
+ printLogToConsole(logEntry);
+ }
+
+ return CompletableFuture.completedFuture(null);
+ }
+
+ @Override
+ public CompletableFuture sendMetric(MetricEntry metricEntry) {
+ if (closed) {
+ return CompletableFuture.completedFuture(null);
+ }
+
+ if (config.enableConsoleOutput()) {
+ printMetricToConsole(metricEntry);
+ }
+
+ return CompletableFuture.completedFuture(null);
+ }
+
+ @Override
+ public Optional> shutdown() {
+ return Optional.empty();
+ }
+
+ @Override
+ public void close() {
+ closed = true;
+ if (config.enableVerboseLogging()) {
+ System.out.println("Logdash NoOp transport closed");
+ }
+ }
+
+ private void printLogToConsole(LogEntry logEntry) {
+ var timestamp = OffsetDateTime.now().format(CONSOLE_FORMATTER);
+ System.out.printf(
+ "%s [%sLOG%s] %s: %s (seq=%d)%n",
+ timestamp,
+ CYAN,
+ RESET,
+ logEntry.level().getValue().toUpperCase(Locale.ENGLISH),
+ logEntry.message(),
+ logEntry.sequenceNumber());
+ }
+
+ private void printMetricToConsole(MetricEntry metricEntry) {
+ var timestamp = OffsetDateTime.now().format(CONSOLE_FORMATTER);
+ System.out.printf(
+ "%s [%sMETRIC%s] %s %s = %s%n",
+ timestamp,
+ YELLOW,
+ RESET,
+ metricEntry.operation().getValue().toUpperCase(Locale.ENGLISH),
+ metricEntry.name(),
+ metricEntry.value());
+ }
+}
diff --git a/src/main/java/io/logdash/sdk/util/JsonSerializer.java b/src/main/java/io/logdash/sdk/util/JsonSerializer.java
new file mode 100644
index 0000000..a65535d
--- /dev/null
+++ b/src/main/java/io/logdash/sdk/util/JsonSerializer.java
@@ -0,0 +1,124 @@
+package io.logdash.sdk.util;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.DeserializationFeature;
+import com.fasterxml.jackson.databind.MapperFeature;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializationFeature;
+import com.fasterxml.jackson.databind.json.JsonMapper;
+import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
+import io.logdash.sdk.log.LogEntry;
+import io.logdash.sdk.metrics.MetricEntry;
+
+import java.util.Map;
+
+/**
+ * Internal JSON serializer for converting SDK objects to JSON format.
+ *
+ * This lightweight, dependency-free serializer is specifically designed for Logdash SDK objects.
+ * It handles proper escaping and formatting according to JSON specification.
+ *
+ *
Note: This class is internal to the SDK and should not be used directly by
+ * client applications.
+ */
+public final class JsonSerializer {
+ private static final ObjectMapper OBJECT_MAPPER = createObjectMapper();
+
+ private static ObjectMapper createObjectMapper() {
+ return JsonMapper.builder()
+ .addModule(new JavaTimeModule())
+ .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES)
+ .disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS)
+ .enable(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY)
+ .serializationInclusion(JsonInclude.Include.NON_NULL)
+ .build();
+ }
+
+ /**
+ * Serializes a log entry to JSON format.
+ *
+ * @param logEntry the log entry to serialize, must not be null
+ * @return JSON string representation
+ */
+ public String serialize(LogEntry logEntry) {
+ try {
+ var logData =
+ Map.of(
+ "message", sanitizeString(logEntry.message()),
+ "level", logEntry.level().getValue(),
+ "createdAt", logEntry.createdAt(),
+ "sequenceNumber", logEntry.sequenceNumber());
+ return OBJECT_MAPPER.writeValueAsString(logData);
+ } catch (JsonProcessingException e) {
+ return createFallbackLogJson(logEntry, e);
+ }
+ }
+
+ /**
+ * Serializes a metric entry to JSON format.
+ *
+ * @param metricEntry the metric entry to serialize, must not be null
+ * @return JSON string representation
+ */
+ public String serialize(MetricEntry metricEntry) {
+ try {
+ var metricData =
+ Map.of(
+ "name", sanitizeString(metricEntry.name()),
+ "value", sanitizeNumber(metricEntry.value()),
+ "operation", metricEntry.operation().getValue());
+ return OBJECT_MAPPER.writeValueAsString(metricData);
+ } catch (JsonProcessingException e) {
+ return createFallbackMetricJson(metricEntry, e);
+ }
+ }
+
+ private String sanitizeString(String input) {
+ if (input == null) return "";
+ return input.replaceAll("[\\p{Cntrl}&&[^\t\n\r]]", "");
+ }
+
+ private Number sanitizeNumber(Number number) {
+ if (number instanceof Double d) {
+ if (Double.isNaN(d) || Double.isInfinite(d)) {
+ return 0.0;
+ }
+ }
+ if (number instanceof Float f) {
+ if (Float.isNaN(f) || Float.isInfinite(f)) {
+ return 0.0f;
+ }
+ }
+ return number;
+ }
+
+ private String createFallbackLogJson(LogEntry logEntry, Exception error) {
+ return String.format(
+ "{\"message\":\"%s\",\"level\":\"%s\",\"createdAt\":\"%s\",\"sequenceNumber\":%d,"
+ + "\"_error\":\"Serialization failed: %s\"}",
+ escapeJsonString(logEntry.message()),
+ logEntry.level().getValue(),
+ logEntry.createdAt(),
+ logEntry.sequenceNumber(),
+ escapeJsonString(error.getMessage()));
+ }
+
+ private String createFallbackMetricJson(MetricEntry metricEntry, Exception error) {
+ return String.format(
+ "{\"name\":\"%s\",\"value\":0,\"operation\":\"%s\",\"_error\":\"Serialization failed: %s\"}",
+ escapeJsonString(metricEntry.name()),
+ metricEntry.operation().getValue(),
+ escapeJsonString(error.getMessage()));
+ }
+
+ private String escapeJsonString(String input) {
+ if (input == null) return "";
+ return input
+ .replace("\\", "\\\\")
+ .replace("\"", "\\\"")
+ .replace("\n", "\\n")
+ .replace("\r", "\\r")
+ .replace("\t", "\\t");
+ }
+}
diff --git a/src/test/java/io/logdash/sdk/LogdashTest.java b/src/test/java/io/logdash/sdk/LogdashTest.java
new file mode 100644
index 0000000..edb7fde
--- /dev/null
+++ b/src/test/java/io/logdash/sdk/LogdashTest.java
@@ -0,0 +1,229 @@
+package io.logdash.sdk;
+
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import static org.assertj.core.api.Assertions.*;
+
+class LogdashTest {
+
+ @Test
+ void should_create_sdk_with_valid_configuration() {
+ assertThatNoException()
+ .isThrownBy(
+ () -> {
+ try (var sdk =
+ Logdash.builder()
+ .apiKey("test-api-key")
+ .baseUrl("https://api.logdash.io")
+ .build()) {
+
+ assertThat(sdk.logger()).isNotNull();
+ assertThat(sdk.metrics()).isNotNull();
+ assertThat(sdk.config().apiKey()).isEqualTo("test-api-key");
+ assertThat(sdk.config().baseUrl()).isEqualTo("https://api.logdash.io");
+ }
+ });
+ }
+
+ @Test
+ void should_create_sdk_with_default_configuration() {
+ // Act
+ try (var sdk = Logdash.create("test-key")) {
+ // Assert
+ assertThat(sdk.config().apiKey()).isEqualTo("test-key");
+ assertThat(sdk.config().baseUrl()).isEqualTo("https://api.logdash.io");
+ assertThat(sdk.config().enableConsoleOutput()).isTrue();
+ assertThat(sdk.config().enableVerboseLogging()).isFalse();
+ }
+ }
+
+ @Test
+ void should_create_sdk_without_api_key_and_use_noop_transport() {
+ // Act
+ try (var sdk = Logdash.builder()
+ .apiKey("")
+ .enableVerboseLogging(true)
+ .build()) {
+
+ // Assert
+ assertThat(sdk.logger()).isNotNull();
+ assertThat(sdk.metrics()).isNotNull();
+ }
+ }
+
+ @Test
+ void should_throw_exception_when_accessing_closed_sdk() {
+ // Arrange
+ var sdk = Logdash.create("test-key");
+ sdk.close();
+
+ // Act & Assert
+ assertThatThrownBy(sdk::logger)
+ .isInstanceOf(IllegalStateException.class)
+ .hasMessageContaining("closed");
+
+ assertThatThrownBy(sdk::metrics)
+ .isInstanceOf(IllegalStateException.class)
+ .hasMessageContaining("closed");
+ }
+
+ @Test
+ @Timeout(value = 10, unit = TimeUnit.SECONDS)
+ void should_handle_flush_operation() {
+ // Arrange
+ try (var sdk = Logdash.builder()
+ .apiKey("test-key")
+ .enableVerboseLogging(false)
+ .build()) {
+
+ sdk.logger().info("Test message");
+ sdk.metrics().set("test_metric", 42);
+
+ // Act & Assert
+ assertThatNoException().isThrownBy(sdk::flush);
+ }
+ }
+
+ @Test
+ void should_configure_all_builder_options() {
+ // Act
+ try (var sdk =
+ Logdash.builder()
+ .apiKey("custom-key")
+ .baseUrl("https://custom.logdash.io")
+ .enableConsoleOutput(false)
+ .enableVerboseLogging(true)
+ .maxRetries(5)
+ .retryDelayMs(2000L)
+ .requestTimeoutMs(20000L)
+ .shutdownTimeoutMs(15000L)
+ .maxConcurrentRequests(50)
+ .build()) {
+
+ // Assert
+ var config = sdk.config();
+ assertThat(config.apiKey()).isEqualTo("custom-key");
+ assertThat(config.baseUrl()).isEqualTo("https://custom.logdash.io");
+ assertThat(config.enableConsoleOutput()).isFalse();
+ assertThat(config.enableVerboseLogging()).isTrue();
+ assertThat(config.maxRetries()).isEqualTo(5);
+ assertThat(config.retryDelayMs()).isEqualTo(2000L);
+ assertThat(config.requestTimeoutMs()).isEqualTo(20000L);
+ assertThat(config.shutdownTimeoutMs()).isEqualTo(15000L);
+ assertThat(config.maxConcurrentRequests()).isEqualTo(50);
+ }
+ }
+
+ @Test
+ void should_allow_multiple_close_calls_safely() {
+ // Arrange
+ var sdk = Logdash.create("test-key");
+
+ // Act & Assert
+ assertThatNoException()
+ .isThrownBy(
+ () -> {
+ sdk.close();
+ sdk.close(); // Second close should be safe
+ });
+ }
+
+ @Test
+ void should_work_with_try_with_resources() {
+ // Act & Assert
+ assertThatNoException()
+ .isThrownBy(
+ () -> {
+ try (var sdk = Logdash.create("test-key")) {
+ sdk.logger().info("Test message", Map.of("key", "value"));
+ sdk.metrics().mutate("test_counter", 1);
+ }
+ // Auto-close should work without issues
+ });
+ }
+
+ @Test
+ void should_handle_builder_with_all_default_values() {
+ try (var sdk = Logdash.builder().build()) {
+ var config = sdk.config();
+
+ assertThat(config.apiKey()).isNull();
+ assertThat(config.baseUrl()).isEqualTo("https://api.logdash.io");
+ assertThat(config.enableConsoleOutput()).isTrue();
+ assertThat(config.enableVerboseLogging()).isFalse();
+ assertThat(config.maxRetries()).isEqualTo(3);
+ assertThat(config.retryDelayMs()).isEqualTo(500L);
+ assertThat(config.requestTimeoutMs()).isEqualTo(15000L);
+ assertThat(config.shutdownTimeoutMs()).isEqualTo(10000L);
+ assertThat(config.maxConcurrentRequests()).isEqualTo(20);
+ }
+ }
+
+ @Test
+ void should_handle_runtime_shutdown_hook() {
+ // Test that shutdown hook is properly registered and removed
+ var sdk = Logdash.create("test-key");
+
+ // Verify SDK is operational
+ assertThat(sdk.logger()).isNotNull();
+ assertThat(sdk.metrics()).isNotNull();
+
+ // Close should not throw exception
+ assertThatNoException().isThrownBy(sdk::close);
+ }
+
+ @Test
+ void should_maintain_thread_safety_during_concurrent_access() throws InterruptedException {
+ try (var sdk = Logdash.builder()
+ .apiKey("concurrent-test")
+ .enableConsoleOutput(false)
+ .build()) {
+
+ var executor = Executors.newFixedThreadPool(10);
+ var latch = new CountDownLatch(50);
+ var exceptions = new ConcurrentLinkedQueue();
+
+ // Submit concurrent operations
+ for (int i = 0; i < 50; i++) {
+ final int iteration = i;
+ executor.submit(
+ () -> {
+ try {
+ sdk.logger().info("Concurrent message " + iteration);
+ sdk.metrics().mutate("concurrent_counter", 1);
+ } catch (Exception e) {
+ exceptions.add(e);
+ } finally {
+ latch.countDown();
+ }
+ });
+ }
+
+ // Wait for all operations to complete
+ assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue();
+ assertThat(exceptions).isEmpty();
+
+ executor.shutdown();
+ assertThat(executor.awaitTermination(5, TimeUnit.SECONDS)).isTrue();
+ }
+ }
+
+ @Test
+ void should_handle_flush_with_no_pending_operations() {
+ try (var sdk = Logdash.builder()
+ .apiKey("test-key")
+ .enableConsoleOutput(false)
+ .build()) {
+
+ // Flush when no operations are pending
+ assertThatNoException().isThrownBy(sdk::flush);
+ }
+ }
+}
diff --git a/src/test/java/io/logdash/sdk/config/LogdashConfigBuilderTest.java b/src/test/java/io/logdash/sdk/config/LogdashConfigBuilderTest.java
new file mode 100644
index 0000000..34e4170
--- /dev/null
+++ b/src/test/java/io/logdash/sdk/config/LogdashConfigBuilderTest.java
@@ -0,0 +1,79 @@
+package io.logdash.sdk.config;
+
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.ValueSource;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatNoException;
+
+class LogdashConfigBuilderTest {
+
+ @Test
+ void should_chain_builder_methods_correctly() {
+ // Act
+ var builder =
+ LogdashConfig.builder()
+ .apiKey("test")
+ .baseUrl("https://test.com")
+ .enableConsoleOutput(false)
+ .enableVerboseLogging(true)
+ .maxRetries(5)
+ .retryDelayMs(1000L)
+ .requestTimeoutMs(5000L)
+ .shutdownTimeoutMs(3000L)
+ .maxConcurrentRequests(20);
+
+ // Assert
+ assertThat(builder).isNotNull();
+
+ var config = builder.build();
+ assertThat(config.apiKey()).isEqualTo("test");
+ assertThat(config.baseUrl()).isEqualTo("https://test.com");
+ assertThat(config.enableConsoleOutput()).isFalse();
+ assertThat(config.enableVerboseLogging()).isTrue();
+ assertThat(config.maxRetries()).isEqualTo(5);
+ assertThat(config.retryDelayMs()).isEqualTo(1000L);
+ assertThat(config.requestTimeoutMs()).isEqualTo(5000L);
+ assertThat(config.shutdownTimeoutMs()).isEqualTo(3000L);
+ assertThat(config.maxConcurrentRequests()).isEqualTo(20);
+ }
+
+ @ParameterizedTest
+ @ValueSource(
+ strings = {
+ "https://api.example.com",
+ "http://localhost:8080",
+ "https://subdomain.example.org/path",
+ "relative-path"
+ })
+ void should_accept_various_url_formats(String url) {
+ // Act & Assert
+ assertThatNoException().isThrownBy(() -> LogdashConfig.builder().baseUrl(url).build());
+ }
+
+ @Test
+ void should_validate_extreme_timeout_values() {
+ // Very large values should be accepted
+ assertThatNoException()
+ .isThrownBy(
+ () ->
+ LogdashConfig.builder()
+ .requestTimeoutMs(Long.MAX_VALUE)
+ .shutdownTimeoutMs(Long.MAX_VALUE)
+ .retryDelayMs(Long.MAX_VALUE)
+ .build());
+
+ // Boundary values
+ assertThatNoException()
+ .isThrownBy(
+ () ->
+ LogdashConfig.builder()
+ .requestTimeoutMs(1L)
+ .shutdownTimeoutMs(1L)
+ .retryDelayMs(0L)
+ .maxRetries(0)
+ .maxConcurrentRequests(1)
+ .build());
+ }
+}
diff --git a/src/test/java/io/logdash/sdk/config/LogdashConfigTest.java b/src/test/java/io/logdash/sdk/config/LogdashConfigTest.java
new file mode 100644
index 0000000..f1cebd4
--- /dev/null
+++ b/src/test/java/io/logdash/sdk/config/LogdashConfigTest.java
@@ -0,0 +1,259 @@
+package io.logdash.sdk.config;
+
+import io.logdash.sdk.exception.LogdashException;
+import org.junit.jupiter.api.Test;
+
+import static org.assertj.core.api.Assertions.*;
+
+class LogdashConfigTest {
+
+ @Test
+ void should_create_config_with_valid_parameters() {
+ // Act
+ var config =
+ LogdashConfig.builder()
+ .apiKey("test-key")
+ .baseUrl("https://api.logdash.io")
+ .enableConsoleOutput(true)
+ .enableVerboseLogging(false)
+ .maxRetries(3)
+ .retryDelayMs(1000L)
+ .requestTimeoutMs(15000L)
+ .shutdownTimeoutMs(10000L)
+ .maxConcurrentRequests(10)
+ .build();
+
+ // Assert
+ assertThat(config.apiKey()).isEqualTo("test-key");
+ assertThat(config.baseUrl()).isEqualTo("https://api.logdash.io");
+ assertThat(config.enableConsoleOutput()).isTrue();
+ assertThat(config.enableVerboseLogging()).isFalse();
+ assertThat(config.maxRetries()).isEqualTo(3);
+ assertThat(config.retryDelayMs()).isEqualTo(1000L);
+ assertThat(config.requestTimeoutMs()).isEqualTo(15000L);
+ assertThat(config.shutdownTimeoutMs()).isEqualTo(10000L);
+ assertThat(config.maxConcurrentRequests()).isEqualTo(10);
+ }
+
+ @Test
+ void should_use_default_values() {
+ // Act
+ var config = LogdashConfig.builder().build();
+
+ // Assert
+ assertThat(config.apiKey()).isNull();
+ assertThat(config.baseUrl()).isEqualTo("https://api.logdash.io");
+ assertThat(config.enableConsoleOutput()).isTrue();
+ assertThat(config.enableVerboseLogging()).isFalse();
+ assertThat(config.maxRetries()).isEqualTo(3);
+ assertThat(config.retryDelayMs()).isEqualTo(500L);
+ assertThat(config.requestTimeoutMs()).isEqualTo(15000L);
+ assertThat(config.shutdownTimeoutMs()).isEqualTo(10000L);
+ assertThat(config.maxConcurrentRequests()).isEqualTo(10);
+ }
+
+ @Test
+ void should_throw_exception_for_null_base_url() {
+ assertThatThrownBy(() ->
+ LogdashConfig.builder()
+ .baseUrl(null)
+ .build()
+ ).isInstanceOf(LogdashException.class)
+ .hasMessageContaining("Base URL cannot be null or blank");
+ }
+
+ @Test
+ void should_throw_exception_for_blank_base_url() {
+ assertThatThrownBy(() ->
+ LogdashConfig.builder()
+ .baseUrl(" ")
+ .build()
+ ).isInstanceOf(LogdashException.class)
+ .hasMessageContaining("Base URL cannot be null or blank");
+ }
+
+ @Test
+ void should_accept_relative_urls_as_valid() {
+ assertThatNoException().isThrownBy(() ->
+ LogdashConfig.builder()
+ .baseUrl("not-a-valid-url")
+ .build()
+ );
+ }
+
+ @Test
+ void should_throw_exception_for_invalid_uri_syntax() {
+ assertThatThrownBy(() ->
+ LogdashConfig.builder()
+ .baseUrl("ht tp://invalid url with spaces")
+ .build()
+ ).isInstanceOf(LogdashException.class)
+ .hasMessageContaining("Invalid base URL");
+ }
+
+ @Test
+ void should_throw_exception_for_negative_max_retries() {
+ assertThatThrownBy(() ->
+ LogdashConfig.builder()
+ .maxRetries(-1)
+ .build()
+ ).isInstanceOf(LogdashException.class)
+ .hasMessageContaining("Max retries cannot be negative");
+ }
+
+ @Test
+ void should_throw_exception_for_negative_retry_delay() {
+ assertThatThrownBy(() ->
+ LogdashConfig.builder()
+ .retryDelayMs(-1L)
+ .build()
+ ).isInstanceOf(LogdashException.class)
+ .hasMessageContaining("Retry delay cannot be negative");
+ }
+
+ @Test
+ void should_throw_exception_for_zero_or_negative_request_timeout() {
+ assertThatThrownBy(() ->
+ LogdashConfig.builder()
+ .requestTimeoutMs(0L)
+ .build()
+ ).isInstanceOf(LogdashException.class)
+ .hasMessageContaining("Request timeout must be positive");
+
+ assertThatThrownBy(() ->
+ LogdashConfig.builder()
+ .requestTimeoutMs(-1000L)
+ .build()
+ ).isInstanceOf(LogdashException.class)
+ .hasMessageContaining("Request timeout must be positive");
+ }
+
+ @Test
+ void should_throw_exception_for_zero_or_negative_shutdown_timeout() {
+ assertThatThrownBy(() ->
+ LogdashConfig.builder()
+ .shutdownTimeoutMs(0L)
+ .build()
+ ).isInstanceOf(LogdashException.class)
+ .hasMessageContaining("Shutdown timeout must be positive");
+ }
+
+ @Test
+ void should_throw_exception_for_zero_or_negative_max_concurrent_requests() {
+ assertThatThrownBy(() ->
+ LogdashConfig.builder()
+ .maxConcurrentRequests(0)
+ .build()
+ ).isInstanceOf(LogdashException.class)
+ .hasMessageContaining("Max concurrent requests must be positive");
+ }
+
+ @Test
+ void should_accept_zero_max_retries() {
+ assertThatNoException().isThrownBy(() ->
+ LogdashConfig.builder()
+ .maxRetries(0)
+ .build()
+ );
+ }
+
+ @Test
+ void should_accept_zero_retry_delay() {
+ assertThatNoException().isThrownBy(() ->
+ LogdashConfig.builder()
+ .retryDelayMs(0L)
+ .build()
+ );
+ }
+
+ @Test
+ void should_validate_all_parameters_in_constructor() {
+ assertThatThrownBy(() ->
+ new LogdashConfig(
+ "test-key",
+ "",
+ true,
+ false,
+ 3,
+ 1000L,
+ 15000L,
+ 10000L,
+ 10
+ )
+ ).isInstanceOf(LogdashException.class)
+ .hasMessageContaining("Base URL cannot be null or blank");
+ }
+
+ @Test
+ void should_create_valid_urls_for_common_protocols() {
+ assertThatNoException()
+ .isThrownBy(
+ () -> {
+ LogdashConfig.builder().baseUrl("https://api.logdash.io").build();
+ LogdashConfig.builder().baseUrl("http://localhost:8080").build();
+ LogdashConfig.builder().baseUrl("https://custom-domain.com/api/v1").build();
+ });
+ }
+
+ @Test
+ void should_validate_url_schemes_correctly() {
+ // Act & Assert
+ assertThatNoException()
+ .isThrownBy(
+ () -> {
+ LogdashConfig.builder().baseUrl("https://api.logdash.io").build();
+ LogdashConfig.builder().baseUrl("http://localhost:8080").build();
+ LogdashConfig.builder().baseUrl("HTTP://EXAMPLE.COM").build();
+ LogdashConfig.builder().baseUrl("HTTPS://EXAMPLE.COM").build();
+ });
+ }
+
+ @Test
+ void should_handle_edge_case_timeout_values() {
+ // Act & Assert
+ assertThatNoException()
+ .isThrownBy(
+ () -> {
+ LogdashConfig.builder()
+ .requestTimeoutMs(1L)
+ .shutdownTimeoutMs(1L)
+ .retryDelayMs(0L)
+ .maxRetries(0)
+ .maxConcurrentRequests(1)
+ .build();
+ });
+ }
+
+ @Test
+ void should_validate_concurrent_requests_limits() {
+ assertThatThrownBy(() ->
+ LogdashConfig.builder()
+ .maxConcurrentRequests(-1)
+ .build()
+ ).isInstanceOf(LogdashException.class)
+ .hasMessageContaining("Max concurrent requests must be positive");
+ }
+
+ @Test
+ void should_handle_builder_method_chaining() {
+ // Act
+ var config =
+ LogdashConfig.builder()
+ .apiKey("key")
+ .baseUrl("https://test.com")
+ .enableConsoleOutput(false)
+ .enableVerboseLogging(true)
+ .maxRetries(5)
+ .retryDelayMs(2000L)
+ .requestTimeoutMs(30000L)
+ .shutdownTimeoutMs(15000L)
+ .maxConcurrentRequests(50);
+
+ // Assert
+ assertThatNoException().isThrownBy(config::build);
+
+ var builtConfig = config.build();
+ assertThat(builtConfig.apiKey()).isEqualTo("key");
+ assertThat(builtConfig.maxConcurrentRequests()).isEqualTo(50);
+ }
+}
diff --git a/src/test/java/io/logdash/sdk/error/ErrorHandlingTest.java b/src/test/java/io/logdash/sdk/error/ErrorHandlingTest.java
new file mode 100644
index 0000000..041c549
--- /dev/null
+++ b/src/test/java/io/logdash/sdk/error/ErrorHandlingTest.java
@@ -0,0 +1,141 @@
+package io.logdash.sdk.error;
+
+import io.logdash.sdk.Logdash;
+import io.logdash.sdk.config.LogdashConfig;
+import io.logdash.sdk.exception.LogdashException;
+import io.logdash.sdk.log.LogEntry;
+import io.logdash.sdk.log.LogLevel;
+import io.logdash.sdk.metrics.MetricEntry;
+import io.logdash.sdk.metrics.MetricType;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.NullAndEmptySource;
+import org.junit.jupiter.params.provider.ValueSource;
+
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import static org.assertj.core.api.Assertions.*;
+
+class ErrorHandlingTest {
+
+ @ParameterizedTest
+ @NullAndEmptySource
+ @ValueSource(strings = {" ", "\t", "\n"})
+ void should_handle_invalid_api_keys(String apiKey) {
+ assertThatNoException()
+ .isThrownBy(
+ () -> {
+ try (var logdash = Logdash.builder().apiKey(apiKey).build()) {
+
+ logdash.logger().info("Test");
+ logdash.metrics().mutate("counter", 1);
+ }
+ });
+ }
+
+ @Test
+ void should_validate_config_parameters() {
+ assertThatThrownBy(() ->
+ LogdashConfig.builder().maxRetries(-1).build()
+ ).isInstanceOf(LogdashException.class)
+ .hasMessageContaining("Max retries cannot be negative");
+
+ assertThatThrownBy(() ->
+ LogdashConfig.builder().requestTimeoutMs(0).build()
+ ).isInstanceOf(LogdashException.class)
+ .hasMessageContaining("Request timeout must be positive");
+
+ assertThatThrownBy(() ->
+ LogdashConfig.builder().maxConcurrentRequests(0).build()
+ ).isInstanceOf(LogdashException.class)
+ .hasMessageContaining("Max concurrent requests must be positive");
+ }
+
+ @Test
+ void should_handle_extreme_values() {
+ assertThatNoException()
+ .isThrownBy(
+ () -> {
+ try (var logdash =
+ Logdash.builder()
+ .apiKey("test")
+ .requestTimeoutMs(Long.MAX_VALUE)
+ .retryDelayMs(Long.MAX_VALUE)
+ .maxRetries(Integer.MAX_VALUE)
+ .build()) {
+
+ logdash.logger().info("Extreme config test");
+ }
+ });
+ }
+
+ @Test
+ void should_handle_resource_exhaustion_scenarios() {
+ try (var logdash =
+ Logdash.builder().apiKey("test").maxConcurrentRequests(1).requestTimeoutMs(100L).build()) {
+
+ for (int i = 0; i < 100; i++) {
+ logdash.logger().info("Stress test " + i);
+ logdash.metrics().mutate("stress_counter", 1);
+ }
+
+ assertThatNoException().isThrownBy(logdash::flush);
+ }
+ }
+
+ @Test
+ void should_handle_malformed_data_gracefully() {
+ assertThatNoException()
+ .isThrownBy(
+ () -> {
+ var entry = new LogEntry("\u0000\u001F\uFFFF", LogLevel.INFO, 1L);
+ });
+
+ assertThatNoException()
+ .isThrownBy(
+ () -> {
+ var metric = new MetricEntry("test\n\r\t", Double.NaN, MetricType.SET);
+ });
+ }
+
+ @Test
+ void should_maintain_thread_safety_under_stress() throws InterruptedException {
+ try (var logdash =
+ Logdash.builder().apiKey("thread-safety-test").enableConsoleOutput(false).build()) {
+
+ var executor = Executors.newFixedThreadPool(20);
+ var latch = new CountDownLatch(200);
+ var exceptions = new ConcurrentLinkedQueue();
+
+ for (int i = 0; i < 200; i++) {
+ final int iteration = i;
+ executor.submit(
+ () -> {
+ try {
+ logdash.logger().info("Thread safety test " + iteration);
+ logdash.metrics().mutate("thread_counter", 1);
+ logdash.metrics().set("thread_id", getThreadId());
+ } catch (Exception e) {
+ exceptions.add(e);
+ } finally {
+ latch.countDown();
+ }
+ });
+ }
+
+ assertThat(latch.await(30, TimeUnit.SECONDS)).isTrue();
+ assertThat(exceptions).isEmpty();
+
+ executor.shutdown();
+ assertThat(executor.awaitTermination(5, TimeUnit.SECONDS)).isTrue();
+ }
+ }
+
+ @SuppressWarnings("deprecation")
+ private long getThreadId() {
+ return Thread.currentThread().getId();
+ }
+}
diff --git a/src/test/java/io/logdash/sdk/exception/LogdashExceptionTest.java b/src/test/java/io/logdash/sdk/exception/LogdashExceptionTest.java
new file mode 100644
index 0000000..6a5f84a
--- /dev/null
+++ b/src/test/java/io/logdash/sdk/exception/LogdashExceptionTest.java
@@ -0,0 +1,41 @@
+package io.logdash.sdk.exception;
+
+import org.junit.jupiter.api.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+class LogdashExceptionTest {
+
+ @Test
+ void should_create_exception_with_message() {
+ // Arrange
+ var message = "Test error message";
+
+ // Act
+ var exception = new LogdashException(message);
+
+ // Assert
+ assertThat(exception.getMessage()).isEqualTo(message);
+ assertThat(exception.getCause()).isNull();
+ }
+
+ @Test
+ void should_create_exception_with_message_and_cause() {
+ // Arrange
+ var message = "Test error message";
+ var cause = new RuntimeException("Root cause");
+
+ // Act
+ var exception = new LogdashException(message, cause);
+
+ // Assert
+ assertThat(exception.getMessage()).isEqualTo(message);
+ assertThat(exception.getCause()).isEqualTo(cause);
+ }
+
+ @Test
+ void should_be_runtime_exception() {
+ var exception = new LogdashException("Test");
+ assertThat(exception).isInstanceOf(RuntimeException.class);
+ }
+}
diff --git a/src/test/java/io/logdash/sdk/integration/LogdashIntegrationTest.java b/src/test/java/io/logdash/sdk/integration/LogdashIntegrationTest.java
new file mode 100644
index 0000000..462a4e9
--- /dev/null
+++ b/src/test/java/io/logdash/sdk/integration/LogdashIntegrationTest.java
@@ -0,0 +1,200 @@
+package io.logdash.sdk.integration;
+
+import io.logdash.sdk.Logdash;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.TimeUnit;
+
+import static org.assertj.core.api.Assertions.assertThatNoException;
+
+class LogdashIntegrationTest {
+
+ @Test
+ @Timeout(value = 30, unit = TimeUnit.SECONDS)
+ void should_handle_rapid_successive_operations() {
+ try (var logdash =
+ Logdash.builder()
+ .apiKey("test-key")
+ .enableVerboseLogging(false)
+ .enableConsoleOutput(false)
+ .maxConcurrentRequests(5)
+ .requestTimeoutMs(1000L)
+ .retryDelayMs(50L)
+ .maxRetries(1)
+ .build()) {
+
+ var logger = logdash.logger();
+ var metrics = logdash.metrics();
+
+ // Rapid operations
+ for (int i = 0; i < 50; i++) {
+ logger.info("Message " + i, Map.of("iteration", i));
+ metrics.mutate("counter", i % 5);
+ if (i % 5 == 0) {
+ metrics.set("batch", i / 5);
+ }
+ }
+
+ // Should complete without exceptions
+ assertThatNoException().isThrownBy(logdash::flush);
+ }
+ }
+
+ @Test
+ void should_handle_all_log_levels_correctly() {
+ try (var logdash = Logdash.builder().apiKey("test-key").enableConsoleOutput(false).build()) {
+ var logger = logdash.logger();
+
+ assertThatNoException()
+ .isThrownBy(
+ () -> {
+ logger.error("Error message");
+ logger.warn("Warning message");
+ logger.info("Info message");
+ logger.http("HTTP message");
+ logger.verbose("Verbose message");
+ logger.debug("Debug message");
+ logger.silly("Silly message");
+ });
+ }
+ }
+
+ @Test
+ void should_handle_metric_operations_correctly() {
+ try (var logdash = Logdash.builder().apiKey("test-key").enableConsoleOutput(false).build()) {
+ var metrics = logdash.metrics();
+
+ assertThatNoException()
+ .isThrownBy(
+ () -> {
+ metrics.set("users", 100);
+ metrics.mutate("requests", 1);
+ metrics.mutate("requests", 5);
+ metrics.mutate("errors", -1);
+ metrics.mutate("errors", -2);
+ metrics.mutate("temperature", -5.5);
+ });
+ }
+ }
+
+ @Test
+ void should_recover_from_transport_creation_failure() {
+ // Using invalid URL to force transport creation failure
+ try (var logdash =
+ Logdash.builder()
+ .apiKey("test-key")
+ .baseUrl("invalid-url")
+ .enableVerboseLogging(false)
+ .enableConsoleOutput(false)
+ .build()) {
+
+ // Should still work with NoOp transport
+ assertThatNoException()
+ .isThrownBy(
+ () -> {
+ logdash.logger().info("Test message");
+ logdash.metrics().set("test", 1);
+ });
+ }
+ }
+
+ @Test
+ void should_handle_no_api_key_scenario() {
+ try (var logdash = Logdash.builder().apiKey("").enableConsoleOutput(false).build()) {
+
+ assertThatNoException()
+ .isThrownBy(
+ () -> {
+ logdash.logger().info("Test message");
+ logdash.metrics().mutate("counter", 1);
+ });
+ }
+ }
+
+ @Test
+ @Timeout(value = 45, unit = TimeUnit.SECONDS)
+ void should_handle_mixed_logging_and_metrics_under_load() {
+ try (var logdash =
+ Logdash.builder()
+ .apiKey("load-test-key")
+ .enableVerboseLogging(false)
+ .enableConsoleOutput(false)
+ .maxConcurrentRequests(10)
+ .requestTimeoutMs(2000L)
+ .retryDelayMs(100L)
+ .maxRetries(2)
+ .build()) {
+
+ var logger = logdash.logger();
+ var metrics = logdash.metrics();
+ var futures = new ArrayList>();
+
+ // Act
+ for (int i = 0; i < 100; i++) {
+ final int iteration = i;
+
+ // Log at different levels
+ logger.info("Iteration " + iteration, Map.of("step", iteration));
+ if (iteration % 10 == 0) {
+ logger.warn("Checkpoint reached", Map.of("checkpoint", iteration / 10));
+ }
+ if (iteration % 25 == 0) {
+ logger.error("Quarter milestone", Map.of("quarter", iteration / 25));
+ }
+
+ // Various metrics
+ metrics.mutate("iterations", 1);
+ metrics.set("current_iteration", iteration);
+ if (iteration % 5 == 0) {
+ metrics.mutate("batch_size", iteration % 3 == 0 ? 1 : -1);
+ }
+ }
+
+ // Assert
+ assertThatNoException().isThrownBy(logdash::flush);
+ }
+ }
+
+ @Test
+ void should_handle_very_long_messages_gracefully() {
+ try (var logdash =
+ Logdash.builder().apiKey("long-message-test").enableConsoleOutput(false).build()) {
+
+ var logger = logdash.logger();
+ var veryLongMessage = "X".repeat(50000); // 50KB message
+
+ // Act & Assert
+ assertThatNoException()
+ .isThrownBy(
+ () -> {
+ logger.info(veryLongMessage);
+ logger.debug(veryLongMessage, Map.of("size", veryLongMessage.length()));
+ });
+ }
+ }
+
+ @Test
+ void should_maintain_sequence_numbers_across_different_log_levels() {
+ try (var logdash =
+ Logdash.builder().apiKey("sequence-test").enableConsoleOutput(false).build()) {
+
+ var logger = logdash.logger();
+
+ // Act
+ logger.error("Error 1");
+ logger.info("Info 1");
+ logger.debug("Debug 1");
+ logger.warn("Warn 1");
+ logger.verbose("Verbose 1");
+ logger.silly("Silly 1");
+ logger.http("HTTP 1");
+
+ // Assert
+ assertThatNoException().isThrownBy(logdash::flush);
+ }
+ }
+}
diff --git a/src/test/java/io/logdash/sdk/log/LogEntryTest.java b/src/test/java/io/logdash/sdk/log/LogEntryTest.java
new file mode 100644
index 0000000..1676e58
--- /dev/null
+++ b/src/test/java/io/logdash/sdk/log/LogEntryTest.java
@@ -0,0 +1,159 @@
+package io.logdash.sdk.log;
+
+import org.junit.jupiter.api.Test;
+
+import java.time.Instant;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+class LogEntryTest {
+
+ @Test
+ void should_create_log_entry_with_all_parameters() {
+ // Arrange
+ var timestamp = Instant.now();
+ Map context = Map.of("key", "value");
+
+ // Act
+ var logEntry = new LogEntry("Test message", LogLevel.INFO, timestamp, 42L, context);
+
+ // Assert
+ assertThat(logEntry.message()).isEqualTo("Test message");
+ assertThat(logEntry.level()).isEqualTo(LogLevel.INFO);
+ assertThat(logEntry.createdAt()).isEqualTo(timestamp);
+ assertThat(logEntry.sequenceNumber()).isEqualTo(42L);
+ assertThat(logEntry.context()).containsEntry("key", "value");
+ }
+
+ @Test
+ void should_create_log_entry_with_minimal_parameters() {
+ // Act
+ var logEntry = new LogEntry("Test message", LogLevel.ERROR, 123L);
+
+ // Assert
+ assertThat(logEntry.message()).isEqualTo("Test message");
+ assertThat(logEntry.level()).isEqualTo(LogLevel.ERROR);
+ assertThat(logEntry.sequenceNumber()).isEqualTo(123L);
+ assertThat(logEntry.createdAt()).isNotNull();
+ assertThat(logEntry.context()).isEmpty();
+ }
+
+ @Test
+ void should_create_log_entry_with_context() {
+ // Arrange
+ Map context = Map.of("userId", "123", "action", "login");
+
+ // Act
+ var logEntry = new LogEntry("User action", LogLevel.INFO, 1L, context);
+
+ // Assert
+ assertThat(logEntry.message()).isEqualTo("User action");
+ assertThat(logEntry.level()).isEqualTo(LogLevel.INFO);
+ assertThat(logEntry.sequenceNumber()).isEqualTo(1L);
+ assertThat(logEntry.context()).containsEntry("userId", "123");
+ assertThat(logEntry.context()).containsEntry("action", "login");
+ }
+
+ @Test
+ void should_throw_exception_for_null_message() {
+ assertThatThrownBy(() ->
+ new LogEntry(null, LogLevel.INFO, 1L)
+ ).isInstanceOf(IllegalArgumentException.class)
+ .hasMessageContaining("Message cannot be null");
+ }
+
+ @Test
+ void should_throw_exception_for_null_level() {
+ assertThatThrownBy(() ->
+ new LogEntry("Test", null, 1L)
+ ).isInstanceOf(IllegalArgumentException.class)
+ .hasMessageContaining("Level cannot be null");
+ }
+
+ @Test
+ void should_handle_null_timestamp_by_using_current_time() {
+ // Act
+ var logEntry = new LogEntry("Test", LogLevel.INFO, null, 1L, Map.of());
+
+ // Assert
+ assertThat(logEntry.createdAt()).isNotNull();
+ assertThat(logEntry.createdAt()).isBeforeOrEqualTo(Instant.now());
+ }
+
+ @Test
+ void should_handle_null_context_by_using_empty_map() {
+ // Act
+ var logEntry = new LogEntry("Test", LogLevel.INFO, Instant.now(), 1L, null);
+
+ // Assert
+ assertThat(logEntry.context()).isNotNull();
+ assertThat(logEntry.context()).isEmpty();
+ }
+
+ @Test
+ void should_create_defensive_copy_of_context() {
+ // Arrange
+ Map originalContext = new HashMap<>();
+ originalContext.put("key", "value");
+
+ // Act
+ var logEntry = new LogEntry("Test", LogLevel.INFO, 1L, originalContext);
+
+ // Modify original map
+ originalContext.put("newKey", "newValue");
+
+ // Assert
+ assertThat(logEntry.context()).containsOnlyKeys("key");
+ assertThat(logEntry.context()).doesNotContainKey("newKey");
+ assertThat(logEntry.context()).hasSize(1);
+ }
+
+ @Test
+ void should_handle_empty_context_map() {
+ // Act
+ var logEntry = new LogEntry("Test", LogLevel.INFO, 1L, Map.of());
+
+ // Assert
+ assertThat(logEntry.context()).isEmpty();
+ assertThat(logEntry.context()).isNotNull();
+ }
+
+ @Test
+ void should_handle_context_with_null_values() {
+ // Arrange
+ Map contextWithNull = new HashMap<>();
+ contextWithNull.put("validKey", "validValue");
+ contextWithNull.put("nullKey", null);
+
+ // Act
+ var logEntry = new LogEntry("Test", LogLevel.INFO, 1L, contextWithNull);
+
+ // Assert
+ assertThat(logEntry.context()).hasSize(2);
+ assertThat(logEntry.context()).containsEntry("validKey", "validValue");
+ assertThat(logEntry.context()).containsEntry("nullKey", null);
+ }
+
+ @Test
+ void should_handle_different_context_value_types() {
+ // Arrange
+ Map context = new HashMap<>();
+ context.put("stringValue", "test");
+ context.put("intValue", 42);
+ context.put("doubleValue", 3.14);
+ context.put("booleanValue", true);
+
+ // Act
+ var logEntry = new LogEntry("Test", LogLevel.INFO, 1L, context);
+
+ // Assert
+ assertThat(logEntry.context()).hasSize(4);
+ assertThat(logEntry.context().get("stringValue")).isEqualTo("test");
+ assertThat(logEntry.context().get("intValue")).isEqualTo(42);
+ assertThat(logEntry.context().get("doubleValue")).isEqualTo(3.14);
+ assertThat(logEntry.context().get("booleanValue")).isEqualTo(true);
+ }
+}
diff --git a/src/test/java/io/logdash/sdk/log/LogLevelTest.java b/src/test/java/io/logdash/sdk/log/LogLevelTest.java
new file mode 100644
index 0000000..358fbb0
--- /dev/null
+++ b/src/test/java/io/logdash/sdk/log/LogLevelTest.java
@@ -0,0 +1,41 @@
+package io.logdash.sdk.log;
+
+import org.junit.jupiter.api.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+class LogLevelTest {
+
+ @Test
+ void should_have_correct_values() {
+ assertThat(LogLevel.ERROR.getValue()).isEqualTo("error");
+ assertThat(LogLevel.WARN.getValue()).isEqualTo("warning");
+ assertThat(LogLevel.INFO.getValue()).isEqualTo("info");
+ assertThat(LogLevel.HTTP.getValue()).isEqualTo("http");
+ assertThat(LogLevel.VERBOSE.getValue()).isEqualTo("verbose");
+ assertThat(LogLevel.DEBUG.getValue()).isEqualTo("debug");
+ assertThat(LogLevel.SILLY.getValue()).isEqualTo("silly");
+ }
+
+ @Test
+ void should_return_correct_string_representation() {
+ assertThat(LogLevel.ERROR.toString()).isEqualTo("error");
+ assertThat(LogLevel.INFO.toString()).isEqualTo("info");
+ assertThat(LogLevel.DEBUG.toString()).isEqualTo("debug");
+ }
+
+ @Test
+ void should_have_all_expected_levels() {
+ var levels = LogLevel.values();
+ assertThat(levels).hasSize(7);
+ assertThat(levels)
+ .contains(
+ LogLevel.ERROR,
+ LogLevel.WARN,
+ LogLevel.INFO,
+ LogLevel.HTTP,
+ LogLevel.VERBOSE,
+ LogLevel.DEBUG,
+ LogLevel.SILLY);
+ }
+}
diff --git a/src/test/java/io/logdash/sdk/log/LogdashLoggerTest.java b/src/test/java/io/logdash/sdk/log/LogdashLoggerTest.java
new file mode 100644
index 0000000..7c71cee
--- /dev/null
+++ b/src/test/java/io/logdash/sdk/log/LogdashLoggerTest.java
@@ -0,0 +1,181 @@
+package io.logdash.sdk.log;
+
+import io.logdash.sdk.config.LogdashConfig;
+import io.logdash.sdk.transport.LogdashTransport;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mock;
+import org.mockito.junit.jupiter.MockitoExtension;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.CompletableFuture;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatNoException;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.BDDMockito.given;
+import static org.mockito.BDDMockito.then;
+import static org.mockito.Mockito.times;
+
+@ExtendWith(MockitoExtension.class)
+class LogdashLoggerTest {
+
+ @Mock
+ private LogdashTransport transport;
+
+ private LogdashConfig config;
+ private LogdashLogger logger;
+
+ @BeforeEach
+ void setUp() {
+ config =
+ new LogdashConfig(
+ "test-key", "https://api.logdash.io", false, false, 3, 1000L, 5000L, 10000L, 100);
+
+ given(transport.sendLog(any())).willReturn(CompletableFuture.completedFuture(null));
+ logger = new LogdashLogger(config, transport);
+ }
+
+ @Test
+ void should_send_info_log_with_correct_level() {
+ // Act
+ logger.info("test message");
+
+ // Assert
+ ArgumentCaptor captor = ArgumentCaptor.forClass(LogEntry.class);
+ then(transport).should().sendLog(captor.capture());
+
+ LogEntry captured = captor.getValue();
+ assertThat(captured.level()).isEqualTo(LogLevel.INFO);
+ assertThat(captured.message()).isEqualTo("test message");
+ }
+
+ @Test
+ void should_send_log_with_context() {
+ // Arrange
+ final Map context = Map.of("userId", "123", "action", "login");
+
+ // Act
+ logger.info("User logged in", context);
+
+ // Assert
+ ArgumentCaptor captor = ArgumentCaptor.forClass(LogEntry.class);
+ then(transport).should().sendLog(captor.capture());
+
+ LogEntry captured = captor.getValue();
+ assertThat(captured.message()).contains("User logged in");
+ assertThat(captured.message()).contains("userId");
+ assertThat(captured.message()).contains("123");
+ }
+
+ @Test
+ void should_handle_all_log_levels() {
+ // Act
+ logger.error("error message");
+ logger.warn("warn message");
+ logger.info("info message");
+ logger.http("http message");
+ logger.verbose("verbose message");
+ logger.debug("debug message");
+ logger.silly("silly message");
+
+ // Assert
+ then(transport).should(times(7)).sendLog(any(LogEntry.class));
+ }
+
+ @Test
+ void should_handle_large_context_maps() {
+ // Arrange
+ Map largeContext = new HashMap<>();
+ for (int i = 0; i < 100; i++) {
+ largeContext.put("key" + i, "value" + i);
+ }
+
+ // Act & Assert
+ assertThatNoException()
+ .isThrownBy(
+ () -> {
+ logger.info("Large context test", largeContext);
+ then(transport).should().sendLog(any(LogEntry.class));
+ });
+ }
+
+ @Test
+ void should_handle_null_values_in_context() {
+ // Arrange
+ Map contextWithNull = new HashMap<>();
+ contextWithNull.put("validKey", "validValue");
+ contextWithNull.put("nullKey", null);
+
+ // Act & Assert
+ assertThatNoException()
+ .isThrownBy(
+ () -> {
+ logger.info("Null context test", contextWithNull);
+ then(transport).should().sendLog(any(LogEntry.class));
+ });
+ }
+
+ @Test
+ void should_increment_sequence_numbers_correctly() {
+ // Act
+ logger.info("First message");
+ logger.info("Second message");
+ logger.info("Third message");
+
+ // Assert
+ ArgumentCaptor captor = ArgumentCaptor.forClass(LogEntry.class);
+ then(transport).should(times(3)).sendLog(captor.capture());
+
+ var capturedEntries = captor.getAllValues();
+ assertThat(capturedEntries.get(0).sequenceNumber()).isEqualTo(1L);
+ assertThat(capturedEntries.get(1).sequenceNumber()).isEqualTo(2L);
+ assertThat(capturedEntries.get(2).sequenceNumber()).isEqualTo(3L);
+ }
+
+ @Test
+ void should_handle_console_output_when_enabled() {
+ // Arrange
+ var configWithConsole =
+ new LogdashConfig(
+ "test-key", "https://api.logdash.io", true, false, 3, 1000L, 5000L, 10000L, 100);
+ var loggerWithConsole = new LogdashLogger(configWithConsole, transport);
+
+ // Act & Assert
+ assertThatNoException()
+ .isThrownBy(
+ () -> {
+ loggerWithConsole.info("Console output test");
+ });
+ }
+
+ @Test
+ void should_handle_empty_context() {
+ // Act
+ logger.info("Empty context", Map.of());
+
+ // Assert
+ ArgumentCaptor captor = ArgumentCaptor.forClass(LogEntry.class);
+ then(transport).should().sendLog(captor.capture());
+
+ LogEntry captured = captor.getValue();
+ assertThat(captured.message()).isEqualTo("Empty context");
+ }
+
+ @Test
+ void should_handle_special_characters_in_message() {
+ // Act
+ logger.info("Message with \"quotes\" and \n newlines");
+
+ // Assert
+ ArgumentCaptor captor = ArgumentCaptor.forClass(LogEntry.class);
+ then(transport).should().sendLog(captor.capture());
+
+ LogEntry captured = captor.getValue();
+ assertThat(captured.message()).contains("quotes");
+ assertThat(captured.message()).contains("newlines");
+ }
+}
diff --git a/src/test/java/io/logdash/sdk/metrics/LogdashMetricsTest.java b/src/test/java/io/logdash/sdk/metrics/LogdashMetricsTest.java
new file mode 100644
index 0000000..4668aa5
--- /dev/null
+++ b/src/test/java/io/logdash/sdk/metrics/LogdashMetricsTest.java
@@ -0,0 +1,83 @@
+package io.logdash.sdk.metrics;
+
+import io.logdash.sdk.config.LogdashConfig;
+import io.logdash.sdk.transport.LogdashTransport;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.Mock;
+import org.mockito.junit.jupiter.MockitoExtension;
+
+import java.util.concurrent.CompletableFuture;
+
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.BDDMockito.given;
+import static org.mockito.BDDMockito.then;
+import static org.mockito.Mockito.times;
+
+@ExtendWith(MockitoExtension.class)
+class LogdashMetricsTest {
+
+ @Mock
+ private LogdashTransport transport;
+
+ private LogdashConfig config;
+ private LogdashMetrics metrics;
+
+ @BeforeEach
+ void setUp() {
+ config = new LogdashConfig(
+ "test-key",
+ "https://api.logdash.io",
+ false,
+ false,
+ 3,
+ 1000L,
+ 5000L,
+ 10000L,
+ 100
+ );
+
+ given(transport.sendMetric(any())).willReturn(CompletableFuture.completedFuture(null));
+
+ metrics = new LogdashMetrics(config, transport);
+ }
+
+ @Test
+ void should_send_set_metric() {
+ // Act
+ metrics.set("active_users", 100);
+
+ // Assert
+ then(transport).should().sendMetric(any(MetricEntry.class));
+ }
+
+ @Test
+ void should_send_mutate_metric() {
+ // Act
+ metrics.mutate("temperature", -5.5);
+
+ // Assert
+ then(transport).should().sendMetric(any(MetricEntry.class));
+ }
+
+ @Test
+ void should_handle_decimal_values_correctly() {
+ // Act
+ metrics.set("temperature", 23.5);
+ metrics.mutate("pressure", -1.2);
+
+ // Assert
+ then(transport).should(times(2)).sendMetric(any(MetricEntry.class));
+ }
+
+ @Test
+ void should_handle_negative_values() {
+ // Act
+ metrics.set("deficit", -100);
+ metrics.mutate("altitude", -50.5);
+
+ // Assert
+ then(transport).should(times(2)).sendMetric(any(MetricEntry.class));
+ }
+}
diff --git a/src/test/java/io/logdash/sdk/metrics/MetricEntryTest.java b/src/test/java/io/logdash/sdk/metrics/MetricEntryTest.java
new file mode 100644
index 0000000..accf8bd
--- /dev/null
+++ b/src/test/java/io/logdash/sdk/metrics/MetricEntryTest.java
@@ -0,0 +1,81 @@
+package io.logdash.sdk.metrics;
+
+import org.junit.jupiter.api.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+class MetricEntryTest {
+
+ @Test
+ void should_create_metric_entry_with_valid_parameters() {
+ // Act
+ var metricEntry = new MetricEntry("cpu_usage", 75.5, MetricType.SET);
+
+ // Assert
+ assertThat(metricEntry.name()).isEqualTo("cpu_usage");
+ assertThat(metricEntry.value()).isEqualTo(75.5);
+ assertThat(metricEntry.operation()).isEqualTo(MetricType.SET);
+ }
+
+ @Test
+ void should_create_metric_entry_with_integer_value() {
+ // Act
+ var metricEntry = new MetricEntry("request_count", 42, MetricType.MUTATE);
+
+ // Assert
+ assertThat(metricEntry.name()).isEqualTo("request_count");
+ assertThat(metricEntry.value()).isEqualTo(42);
+ assertThat(metricEntry.operation()).isEqualTo(MetricType.MUTATE);
+ }
+
+ @Test
+ void should_throw_exception_for_null_name() {
+ assertThatThrownBy(() ->
+ new MetricEntry(null, 42, MetricType.SET)
+ ).isInstanceOf(IllegalArgumentException.class)
+ .hasMessageContaining("Metric name cannot be null or blank");
+ }
+
+ @Test
+ void should_throw_exception_for_blank_name() {
+ assertThatThrownBy(() ->
+ new MetricEntry(" ", 42, MetricType.SET)
+ ).isInstanceOf(IllegalArgumentException.class)
+ .hasMessageContaining("Metric name cannot be null or blank");
+ }
+
+ @Test
+ void should_throw_exception_for_null_value() {
+ assertThatThrownBy(() ->
+ new MetricEntry("metric", null, MetricType.SET)
+ ).isInstanceOf(IllegalArgumentException.class)
+ .hasMessageContaining("Metric value cannot be null");
+ }
+
+ @Test
+ void should_throw_exception_for_null_operation() {
+ assertThatThrownBy(() ->
+ new MetricEntry("metric", 42, null)
+ ).isInstanceOf(IllegalArgumentException.class)
+ .hasMessageContaining("Metric operation cannot be null");
+ }
+
+ @Test
+ void should_handle_negative_values() {
+ // Act
+ var metricEntry = new MetricEntry("temperature_change", -5.2, MetricType.MUTATE);
+
+ // Assert
+ assertThat(metricEntry.value()).isEqualTo(-5.2);
+ }
+
+ @Test
+ void should_handle_zero_values() {
+ // Act
+ var metricEntry = new MetricEntry("reset_counter", 0, MetricType.SET);
+
+ // Assert
+ assertThat(metricEntry.value()).isEqualTo(0);
+ }
+}
diff --git a/src/test/java/io/logdash/sdk/metrics/MetricTypeTest.java b/src/test/java/io/logdash/sdk/metrics/MetricTypeTest.java
new file mode 100644
index 0000000..67a46a2
--- /dev/null
+++ b/src/test/java/io/logdash/sdk/metrics/MetricTypeTest.java
@@ -0,0 +1,28 @@
+package io.logdash.sdk.metrics;
+
+import org.junit.jupiter.api.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+class MetricTypeTest {
+
+ @Test
+ void should_have_correct_values() {
+ assertThat(MetricType.SET.getValue()).isEqualTo("set");
+ assertThat(MetricType.MUTATE.getValue()).isEqualTo("change");
+ }
+
+ @Test
+ void should_return_correct_string_representation() {
+ assertThat(MetricType.SET.toString()).hasToString("set");
+ assertThat(MetricType.MUTATE.toString()).hasToString("change");
+ }
+
+ @Test
+ void should_have_all_expected_types() {
+ var types = MetricType.values();
+ assertThat(types)
+ .hasSize(2)
+ .contains(MetricType.SET, MetricType.MUTATE);
+ }
+}
diff --git a/src/test/java/io/logdash/sdk/performance/PerformanceTest.java b/src/test/java/io/logdash/sdk/performance/PerformanceTest.java
new file mode 100644
index 0000000..37daf2d
--- /dev/null
+++ b/src/test/java/io/logdash/sdk/performance/PerformanceTest.java
@@ -0,0 +1,82 @@
+package io.logdash.sdk.performance;
+
+import io.logdash.sdk.Logdash;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+
+import java.util.ArrayList;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatNoException;
+
+class PerformanceTest {
+
+ @Test
+ void should_handle_large_message_payloads() {
+ try (var logdash = Logdash.builder()
+ .apiKey("large-payload-test")
+ .enableConsoleOutput(false)
+ .build()) {
+
+ var largeMessage = "A".repeat(100_000); // 100KB message
+
+ assertThatNoException()
+ .isThrownBy(
+ () -> {
+ logdash.logger().info(largeMessage);
+ logdash.flush();
+ });
+ }
+ }
+
+ @Test
+ @Timeout(value = 15, unit = TimeUnit.SECONDS)
+ void should_maintain_performance_under_mixed_load() throws InterruptedException {
+ try (var logdash =
+ Logdash.builder()
+ .apiKey("mixed-load-test")
+ .enableConsoleOutput(false)
+ .maxConcurrentRequests(20)
+ .build()) {
+
+ var executor = Executors.newFixedThreadPool(10);
+ var futures = new ArrayList>();
+
+ // Mixed workload: logs and metrics
+ for (int i = 0; i < 50; i++) {
+ final int iteration = i;
+
+ var future =
+ CompletableFuture.runAsync(
+ () -> {
+ // Logs
+ logdash.logger().info("Mixed load log " + iteration);
+ logdash.logger().debug("Debug info " + iteration);
+
+ // Metrics
+ logdash.metrics().mutate("operations", 1);
+ logdash.metrics().set("current_batch", iteration);
+ logdash.metrics().mutate("temperature", Math.random() * 10 - 5);
+ },
+ executor);
+
+ futures.add(future);
+ }
+
+ var allFutures = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
+
+ assertThatNoException()
+ .isThrownBy(
+ () -> {
+ allFutures.get(10, TimeUnit.SECONDS);
+ logdash.flush();
+ });
+
+ executor.shutdown();
+ assertThat(executor.awaitTermination(5, TimeUnit.SECONDS)).isTrue();
+ }
+ }
+}
diff --git a/src/test/java/io/logdash/sdk/transport/HttpTransportTest.java b/src/test/java/io/logdash/sdk/transport/HttpTransportTest.java
new file mode 100644
index 0000000..1ecdcea
--- /dev/null
+++ b/src/test/java/io/logdash/sdk/transport/HttpTransportTest.java
@@ -0,0 +1,821 @@
+package io.logdash.sdk.transport;
+
+import com.github.tomakehurst.wiremock.WireMockServer;
+import com.github.tomakehurst.wiremock.core.WireMockConfiguration;
+import io.logdash.sdk.config.LogdashConfig;
+import io.logdash.sdk.log.LogEntry;
+import io.logdash.sdk.log.LogLevel;
+import io.logdash.sdk.metrics.MetricEntry;
+import io.logdash.sdk.metrics.MetricType;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.ValueSource;
+
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import static com.github.tomakehurst.wiremock.client.WireMock.*;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatNoException;
+
+class HttpTransportTest {
+
+ private WireMockServer wireMockServer;
+ private LogdashConfig config;
+ private HttpTransport transport;
+
+ @BeforeEach
+ void setUp() {
+ wireMockServer = new WireMockServer(WireMockConfiguration.options().dynamicPort());
+ wireMockServer.start();
+
+ config =
+ LogdashConfig.builder()
+ .apiKey("test-api-key")
+ .baseUrl("http://localhost:" + wireMockServer.port())
+ .enableVerboseLogging(false)
+ .requestTimeoutMs(2000L)
+ .maxRetries(2)
+ .retryDelayMs(100L)
+ .maxConcurrentRequests(5)
+ .shutdownTimeoutMs(1000L)
+ .build();
+
+ transport = new HttpTransport(config);
+ }
+
+ @AfterEach
+ void tearDown() {
+ if (transport != null) {
+ transport.close();
+ }
+ if (wireMockServer != null) {
+ wireMockServer.stop();
+ }
+ }
+
+ @Test
+ void should_successfully_send_log_entry() {
+ // Arrange
+ wireMockServer.stubFor(
+ post(urlEqualTo("/logs"))
+ .willReturn(
+ aResponse().withStatus(200).withHeader("Content-Type", "application/json")));
+
+ var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L);
+
+ // Act
+ var future = transport.sendLog(logEntry);
+
+ // Assert
+ assertThat(future).succeedsWithin(3, TimeUnit.SECONDS);
+
+ wireMockServer.verify(
+ postRequestedFor(urlEqualTo("/logs"))
+ .withHeader("Content-Type", equalTo("application/json"))
+ .withHeader("project-api-key", equalTo("test-api-key"))
+ .withHeader("User-Agent", equalTo("logdash-java-sdk/0.1.0")));
+ }
+
+ @Test
+ void should_successfully_send_metric_entry() {
+ // Arrange
+ wireMockServer.stubFor(
+ put(urlEqualTo("/metrics"))
+ .willReturn(
+ aResponse().withStatus(202).withHeader("Content-Type", "application/json")));
+
+ var metricEntry = new MetricEntry("test_metric", 42, MetricType.SET);
+
+ // Act
+ var future = transport.sendMetric(metricEntry);
+
+ // Assert
+ assertThat(future).succeedsWithin(3, TimeUnit.SECONDS);
+
+ wireMockServer.verify(
+ putRequestedFor(urlEqualTo("/metrics"))
+ .withHeader("Content-Type", equalTo("application/json"))
+ .withHeader("project-api-key", equalTo("test-api-key"))
+ .withHeader("User-Agent", equalTo("logdash-java-sdk/0.1.0")));
+ }
+
+ @ParameterizedTest
+ @ValueSource(ints = {400, 401, 403, 404, 500, 502, 503})
+ void should_retry_on_http_error_responses(int statusCode) {
+ // Arrange
+ wireMockServer.stubFor(
+ post(urlEqualTo("/logs"))
+ .inScenario("retry-scenario")
+ .whenScenarioStateIs("Started")
+ .willReturn(aResponse().withStatus(statusCode))
+ .willSetStateTo("first-retry"));
+
+ wireMockServer.stubFor(
+ post(urlEqualTo("/logs"))
+ .inScenario("retry-scenario")
+ .whenScenarioStateIs("first-retry")
+ .willReturn(aResponse().withStatus(200)));
+
+ var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L);
+
+ // Act
+ var future = transport.sendLog(logEntry);
+
+ // Assert
+ assertThat(future).succeedsWithin(5, TimeUnit.SECONDS);
+ wireMockServer.verify(2, postRequestedFor(urlEqualTo("/logs")));
+ }
+
+ @Test
+ void should_fail_permanently_after_max_retries() throws Exception {
+ // Arrange
+ wireMockServer.stubFor(post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(500)));
+
+ var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L);
+
+ // Act
+ var future = transport.sendLog(logEntry);
+
+ // Assert
+ assertThat(future).succeedsWithin(10, TimeUnit.SECONDS);
+
+ // Wait a bit for all retry attempts to complete
+ Thread.sleep(1000);
+
+ // Should have made initial request + 2 retries = 3 total
+ wireMockServer.verify(moreThanOrExactly(2), postRequestedFor(urlEqualTo("/logs")));
+ }
+
+ @Test
+ void should_handle_connection_timeout() {
+ // Arrange
+ wireMockServer.stubFor(
+ post(urlEqualTo("/logs"))
+ .willReturn(
+ aResponse().withStatus(200).withFixedDelay(5000))); // Longer than request timeout
+
+ var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L);
+
+ // Act
+ var future = transport.sendLog(logEntry);
+
+ // Assert
+ assertThat(future).succeedsWithin(15, TimeUnit.SECONDS); // Should complete despite timeout
+ }
+
+ @Test
+ void should_handle_concurrent_requests_within_limit() {
+ // Arrange
+ wireMockServer.stubFor(
+ post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200).withFixedDelay(500)));
+
+ var futures = new ArrayList>();
+
+ // Act
+ for (int i = 0; i < 3; i++) {
+ var logEntry = new LogEntry("Message " + i, LogLevel.INFO, i);
+ futures.add(transport.sendLog(logEntry));
+ }
+
+ // Assert
+ var allFutures = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
+ assertThat(allFutures).succeedsWithin(10, TimeUnit.SECONDS);
+ wireMockServer.verify(3, postRequestedFor(urlEqualTo("/logs")));
+ }
+
+ @Test
+ void should_respect_concurrency_limits() {
+ // Arrange
+ var configWithLowLimit =
+ LogdashConfig.builder()
+ .apiKey("test-key")
+ .baseUrl("http://localhost:" + wireMockServer.port())
+ .maxConcurrentRequests(1)
+ .requestTimeoutMs(1000L)
+ .build();
+
+ var limitedTransport = new HttpTransport(configWithLowLimit);
+
+ wireMockServer.stubFor(
+ post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200).withFixedDelay(800)));
+
+ var futures = new ArrayList>();
+
+ try {
+ // Act
+ for (int i = 0; i < 5; i++) {
+ var logEntry = new LogEntry("Message " + i, LogLevel.INFO, i);
+ futures.add(limitedTransport.sendLog(logEntry));
+ }
+
+ // Assert
+ var allFutures = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
+ assertThat(allFutures).succeedsWithin(15, TimeUnit.SECONDS);
+
+ // Some requests might be rejected due to concurrency limit
+ var requestCount = wireMockServer.findAll(postRequestedFor(urlEqualTo("/logs"))).size();
+ assertThat(requestCount).isLessThanOrEqualTo(5);
+
+ } finally {
+ limitedTransport.close();
+ }
+ }
+
+ @Test
+ void should_not_send_requests_after_shutdown() {
+ // Arrange
+ transport.shutdown();
+ var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L);
+
+ // Act
+ var future = transport.sendLog(logEntry);
+
+ // Assert
+ assertThat(future).succeedsWithin(1, TimeUnit.SECONDS);
+ wireMockServer.verify(0, postRequestedFor(urlEqualTo("/logs")));
+ }
+
+ @Test
+ void should_handle_graceful_shutdown_with_pending_requests() {
+ // Arrange
+ wireMockServer.stubFor(
+ post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200).withFixedDelay(300)));
+
+ var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L);
+ var future = transport.sendLog(logEntry);
+
+ // Act
+ var shutdownFuture = transport.shutdown();
+
+ // Assert
+ assertThat(shutdownFuture).isPresent();
+ assertThat(shutdownFuture.get()).succeedsWithin(5, TimeUnit.SECONDS);
+ assertThat(future).succeedsWithin(5, TimeUnit.SECONDS);
+ }
+
+ @Test
+ void should_flush_pending_requests() {
+ // Arrange
+ wireMockServer.stubFor(
+ post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200).withFixedDelay(200)));
+
+ var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L);
+ transport.sendLog(logEntry);
+
+ // Act
+ var flushFuture = transport.flush();
+
+ // Assert
+ assertThat(flushFuture).succeedsWithin(5, TimeUnit.SECONDS);
+ wireMockServer.verify(1, postRequestedFor(urlEqualTo("/logs")));
+ }
+
+ @Test
+ void should_handle_serialization_errors_gracefully() {
+ // Arrange
+ var logEntry =
+ new LogEntry("Test with very long message: " + "x".repeat(100000), LogLevel.INFO, 1L);
+
+ // Act
+ var future = transport.sendLog(logEntry);
+
+ // Assert
+ assertThat(future).succeedsWithin(5, TimeUnit.SECONDS);
+ }
+
+ @Test
+ void should_handle_large_payloads() {
+ // Arrange
+ wireMockServer.stubFor(post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200)));
+
+ var largeMessage = "A".repeat(10000);
+ var logEntry = new LogEntry(largeMessage, LogLevel.INFO, 1L);
+
+ // Act
+ var future = transport.sendLog(logEntry);
+
+ // Assert
+ assertThat(future).succeedsWithin(5, TimeUnit.SECONDS);
+ wireMockServer.verify(1, postRequestedFor(urlEqualTo("/logs")));
+ }
+
+ @Test
+ void should_handle_multiple_close_calls_safely() {
+ // Act & Assert
+ assertThatNoException()
+ .isThrownBy(
+ () -> {
+ transport.close();
+ transport.close(); // Second close should be safe
+ });
+ }
+
+ @Test
+ void should_send_correct_json_for_log_entries_with_context() throws Exception {
+ // Arrange
+ wireMockServer.stubFor(post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200)));
+
+ var timestamp = Instant.parse("2024-06-01T12:34:56Z");
+ Map context = Map.of("userId", "123", "action", "login");
+ var logEntry = new LogEntry("User logged in", LogLevel.INFO, timestamp, 42L, context);
+
+ // Act
+ var future = transport.sendLog(logEntry);
+ assertThat(future).succeedsWithin(3, TimeUnit.SECONDS);
+
+ // Wait for request to complete
+ Thread.sleep(200);
+
+ // Assert
+ wireMockServer.verify(
+ postRequestedFor(urlEqualTo("/logs"))
+ .withRequestBody(containing("\"level\":\"info\""))
+ .withRequestBody(containing("\"sequenceNumber\":42"))
+ .withRequestBody(containing("\"createdAt\":\"2024-06-01T12:34:56Z\"")));
+ }
+
+ @Test
+ void should_send_correct_json_for_metrics() throws Exception {
+ // Arrange
+ wireMockServer.stubFor(put(urlEqualTo("/metrics")).willReturn(aResponse().withStatus(200)));
+
+ var metricEntry = new MetricEntry("cpu_usage", 75.5, MetricType.SET);
+
+ // Act
+ var future = transport.sendMetric(metricEntry);
+ assertThat(future).succeedsWithin(3, TimeUnit.SECONDS);
+
+ // Wait for request to complete
+ Thread.sleep(200);
+
+ // Assert
+ wireMockServer.verify(
+ putRequestedFor(urlEqualTo("/metrics"))
+ .withRequestBody(containing("\"name\":\"cpu_usage\""))
+ .withRequestBody(containing("\"value\":75.5"))
+ .withRequestBody(containing("\"operation\":\"set\"")));
+ }
+
+ @Test
+ @Timeout(15)
+ void should_handle_high_throughput_requests_with_realistic_expectations()
+ throws InterruptedException {
+ // Arrange - Use higher concurrency limit for throughput test
+ var highThroughputConfig =
+ LogdashConfig.builder()
+ .apiKey("test-key")
+ .baseUrl("http://localhost:" + wireMockServer.port())
+ .maxConcurrentRequests(15) // Higher limit
+ .requestTimeoutMs(5000L)
+ .build();
+
+ var throughputTransport = new HttpTransport(highThroughputConfig);
+
+ wireMockServer.stubFor(
+ post(urlEqualTo("/logs"))
+ .willReturn(aResponse().withStatus(200).withFixedDelay(100))); // Reasonable delay
+
+ var requestCount = 30; // Realistic number for test
+ var futures = new ArrayList>(requestCount);
+ var completedLatch = new CountDownLatch(requestCount);
+
+ try {
+ // Act
+ long startTime = System.currentTimeMillis();
+
+ for (int i = 0; i < requestCount; i++) {
+ var logEntry = new LogEntry("Throughput test " + i, LogLevel.INFO, i);
+ var future = throughputTransport.sendLog(logEntry);
+ future.whenComplete((result, throwable) -> completedLatch.countDown());
+ futures.add(future);
+
+ // Small delay to avoid overwhelming
+ if (i % 5 == 0) {
+ Thread.sleep(10);
+ }
+ }
+
+ // Assert
+ assertThat(completedLatch.await(12, TimeUnit.SECONDS)).isTrue();
+ long duration = System.currentTimeMillis() - startTime;
+
+ // Should complete in reasonable time
+ assertThat(duration).isLessThan(12000);
+
+ // All futures should complete
+ var allFutures = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
+ assertThat(allFutures).succeedsWithin(2, TimeUnit.SECONDS);
+
+ // Most requests should be processed (allow for some concurrency rejections)
+ var actualRequests = wireMockServer.findAll(postRequestedFor(urlEqualTo("/logs"))).size();
+ assertThat(actualRequests).isGreaterThan(requestCount / 2); // At least 50%
+
+ } finally {
+ throughputTransport.close();
+ }
+ }
+
+ @Test
+ void should_implement_exponential_backoff_with_jitter() throws InterruptedException {
+ // Arrange
+ var retryDelayMs = 200L;
+ var configWithRetries =
+ LogdashConfig.builder()
+ .apiKey("test-key")
+ .baseUrl("http://localhost:" + wireMockServer.port())
+ .maxRetries(3)
+ .retryDelayMs(retryDelayMs)
+ .requestTimeoutMs(1000L)
+ .build();
+
+ var retryTransport = new HttpTransport(configWithRetries);
+
+ wireMockServer.stubFor(
+ post(urlEqualTo("/logs"))
+ .inScenario("backoff-test")
+ .whenScenarioStateIs("Started")
+ .willReturn(aResponse().withStatus(500))
+ .willSetStateTo("retry1"));
+
+ wireMockServer.stubFor(
+ post(urlEqualTo("/logs"))
+ .inScenario("backoff-test")
+ .whenScenarioStateIs("retry1")
+ .willReturn(aResponse().withStatus(500))
+ .willSetStateTo("retry2"));
+
+ wireMockServer.stubFor(
+ post(urlEqualTo("/logs"))
+ .inScenario("backoff-test")
+ .whenScenarioStateIs("retry2")
+ .willReturn(aResponse().withStatus(500))
+ .willSetStateTo("retry3"));
+
+ wireMockServer.stubFor(
+ post(urlEqualTo("/logs"))
+ .inScenario("backoff-test")
+ .whenScenarioStateIs("retry3")
+ .willReturn(aResponse().withStatus(200)));
+
+ var logEntry = new LogEntry("Backoff test", LogLevel.ERROR, 1L);
+
+ try {
+ // Act
+ long startTime = System.currentTimeMillis();
+ var future = retryTransport.sendLog(logEntry);
+ assertThat(future).succeedsWithin(15, TimeUnit.SECONDS);
+ long totalTime = System.currentTimeMillis() - startTime;
+
+ // Assert
+ // Should have taken time for exponential backoff (roughly 200 + 400 + 800 + jitter)
+ assertThat(totalTime).isGreaterThan(1200); // Minimum expected time
+ assertThat(totalTime).isLessThan(3000); // Maximum reasonable time
+
+ wireMockServer.verify(4, postRequestedFor(urlEqualTo("/logs")));
+ } finally {
+ retryTransport.close();
+ }
+ }
+
+ @Test
+ void should_demonstrate_concurrency_behavior() throws InterruptedException {
+ // Arrange - This test shows actual concurrency behavior
+ var configWithLowLimit =
+ LogdashConfig.builder()
+ .apiKey("test-key")
+ .baseUrl("http://localhost:" + wireMockServer.port())
+ .maxConcurrentRequests(2) // Very low limit
+ .requestTimeoutMs(3000L)
+ .build();
+
+ var limitedTransport = new HttpTransport(configWithLowLimit);
+
+ // Slow responses to fill up concurrency slots
+ wireMockServer.stubFor(
+ post(urlEqualTo("/logs"))
+ .willReturn(aResponse().withStatus(200).withFixedDelay(1000))); // 1 second delay
+
+ var futures = new ArrayList>();
+
+ try {
+ // Act - Submit more requests than concurrency limit
+ for (int i = 0; i < 8; i++) {
+ var logEntry = new LogEntry("Concurrency demo " + i, LogLevel.INFO, i);
+ var future = limitedTransport.sendLog(logEntry);
+ futures.add(future);
+
+ // Small delay between submissions
+ Thread.sleep(50);
+ }
+
+ // Wait for all futures to complete
+ var allFutures = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
+ assertThat(allFutures).succeedsWithin(15, TimeUnit.SECONDS);
+
+ // Assert - Should have processed some requests, rejected others
+ var actualRequests = wireMockServer.findAll(postRequestedFor(urlEqualTo("/logs"))).size();
+
+ // With concurrency limit of 2, we should see fewer requests than submitted
+ assertThat(actualRequests).isLessThan(8);
+ assertThat(actualRequests).isGreaterThan(0);
+
+ System.out.println(
+ "Submitted: 8, Processed: " + actualRequests + " (demonstrates concurrency limiting)");
+
+ } finally {
+ limitedTransport.close();
+ }
+ }
+
+ @Test
+ void should_use_efficient_countdownlatch_for_flush() throws InterruptedException {
+ // Arrange
+ wireMockServer.stubFor(
+ post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200).withFixedDelay(300)));
+
+ var futures = new ArrayList>();
+
+ // Start multiple requests
+ for (int i = 0; i < 3; i++) {
+ var logEntry = new LogEntry("Flush test " + i, LogLevel.INFO, i);
+ futures.add(transport.sendLog(logEntry));
+ }
+
+ // Act
+ long startTime = System.currentTimeMillis();
+ var flushFuture = transport.flush();
+ assertThat(flushFuture).succeedsWithin(5, TimeUnit.SECONDS);
+ long flushTime = System.currentTimeMillis() - startTime;
+
+ // Assert
+ // Flush should wait for all pending requests efficiently
+ assertThat(flushTime).isGreaterThan(250); // At least the delay time
+ assertThat(flushTime).isLessThan(1000); // But not too long due to efficient waiting
+
+ // All original futures should also complete
+ var allFutures = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
+ assertThat(allFutures).succeedsWithin(1, TimeUnit.SECONDS);
+
+ wireMockServer.verify(3, postRequestedFor(urlEqualTo("/logs")));
+ }
+
+ @Test
+ void should_handle_multiple_shutdown_calls_safely() {
+ // Arrange
+ wireMockServer.stubFor(
+ post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200).withFixedDelay(100)));
+
+ var logEntry = new LogEntry("Shutdown test", LogLevel.INFO, 1L);
+ transport.sendLog(logEntry);
+
+ // Act & Assert
+ var shutdown1 = transport.shutdown();
+ var shutdown2 = transport.shutdown();
+ var shutdown3 = transport.shutdown();
+
+ assertThat(shutdown1).isPresent();
+ assertThat(shutdown2).isPresent();
+ assertThat(shutdown3).isPresent();
+
+ // All shutdown futures should complete successfully
+ assertThat(shutdown1.get()).succeedsWithin(3, TimeUnit.SECONDS);
+ assertThat(shutdown2.get()).succeedsWithin(1, TimeUnit.SECONDS);
+ assertThat(shutdown3.get()).succeedsWithin(1, TimeUnit.SECONDS);
+ }
+
+ @Test
+ void should_mask_api_key_in_verbose_logging() {
+ // Arrange
+ var verboseConfig =
+ LogdashConfig.builder()
+ .apiKey("very-secret-api-key-12345")
+ .baseUrl("http://localhost:" + wireMockServer.port())
+ .enableVerboseLogging(true)
+ .build();
+
+ wireMockServer.stubFor(post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200)));
+
+ try (var verboseTransport = new HttpTransport(verboseConfig)) {
+ var logEntry = new LogEntry("Verbose test", LogLevel.INFO, 1L);
+
+ // Act
+ var future = verboseTransport.sendLog(logEntry);
+
+ // Assert
+ assertThat(future).succeedsWithin(3, TimeUnit.SECONDS);
+ // Note: In real scenario, you'd capture System.out to verify masking
+ // For this test, we just ensure it completes without issues
+ }
+ }
+
+ @Test
+ void should_handle_invalid_base_url_gracefully() {
+ // Arrange - HttpTransport doesn't validate URL in constructor
+ // Invalid URLs are caught during actual request sending
+ var invalidConfig =
+ LogdashConfig.builder().apiKey("test-key").baseUrl("not-a-valid-url").build();
+
+ var transport = new HttpTransport(invalidConfig);
+ var logEntry = new LogEntry("Test", LogLevel.INFO, 1L);
+
+ try {
+ // Act - invalid URL should be caught during request creation
+ var future = transport.sendLog(logEntry);
+
+ // Assert - future should complete (error is handled gracefully)
+ assertThat(future).succeedsWithin(5, TimeUnit.SECONDS);
+
+ // No requests should reach the server due to invalid URL
+ wireMockServer.verify(0, postRequestedFor(urlEqualTo("/logs")));
+ } finally {
+ transport.close();
+ }
+ }
+
+ @Test
+ void should_handle_interrupted_threads_during_retry() throws InterruptedException {
+ // Arrange
+ wireMockServer.stubFor(
+ post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(500).withFixedDelay(100)));
+
+ var logEntry = new LogEntry("Interrupt test", LogLevel.ERROR, 1L);
+ var future = transport.sendLog(logEntry);
+
+ // Act - interrupt the thread during retry
+ Thread.sleep(150); // Let first attempt fail and retry start
+
+ // The transport should handle interruption gracefully
+ assertThat(future).succeedsWithin(5, TimeUnit.SECONDS);
+ }
+
+ @Test
+ void should_maintain_minimum_throughput_under_burst_load() throws InterruptedException {
+ // REGRESSION TEST: Prevents implementation from becoming too aggressive
+ // with request rejection, ensuring reasonable throughput under load
+
+ wireMockServer.stubFor(
+ post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200).withFixedDelay(150)));
+
+ int submittedRequests = 25;
+ var futures = new ArrayList>();
+ var completionLatch = new CountDownLatch(submittedRequests);
+
+ // Act - Submit burst of requests to test throughput behavior
+ long startTime = System.currentTimeMillis();
+
+ for (int i = 0; i < submittedRequests; i++) {
+ var logEntry = new LogEntry("Throughput test " + i, LogLevel.INFO, i);
+ var future = transport.sendLog(logEntry);
+
+ future.whenComplete((result, throwable) -> completionLatch.countDown());
+ futures.add(future);
+
+ // Small stagger to simulate realistic usage pattern
+ if (i % 3 == 0) {
+ Thread.sleep(10);
+ }
+ }
+
+ // Assert - All futures must complete (no hanging)
+ assertThat(completionLatch.await(15, TimeUnit.SECONDS))
+ .as("All requests should complete within timeout")
+ .isTrue();
+
+ long totalTime = System.currentTimeMillis() - startTime;
+ var actualRequests = wireMockServer.findAll(postRequestedFor(urlEqualTo("/logs"))).size();
+
+ // CRITICAL REGRESSION PROTECTION:
+ // 1. Minimum throughput requirement - at least 60% of requests should be processed
+ int minimumExpectedRequests = (int) (submittedRequests * 0.6); // 60% threshold
+ assertThat(actualRequests)
+ .as(
+ "Transport should process at least %d%% of submitted requests (got %d/%d)",
+ 60, actualRequests, submittedRequests)
+ .isGreaterThanOrEqualTo(minimumExpectedRequests);
+
+ // 2. Performance requirement - should complete in reasonable time
+ long maxExpectedTime = submittedRequests * 200L; // 200ms per request max
+ assertThat(totalTime)
+ .as(
+ "Transport should maintain reasonable performance (%dms for %d requests)",
+ totalTime, submittedRequests)
+ .isLessThan(maxExpectedTime);
+
+ // 3. All futures should succeed (no exceptions)
+ var allFutures = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
+ assertThat(allFutures)
+ .as("All request futures should complete successfully")
+ .succeedsWithin(2, TimeUnit.SECONDS);
+
+ // Diagnostic information for troubleshooting
+ if (config.enableVerboseLogging()) {
+ double throughputPercentage = (actualRequests * 100.0) / submittedRequests;
+ System.out.printf(
+ "Throughput regression test: %.1f%% success rate (%d/%d requests, %dms total)%n",
+ throughputPercentage, actualRequests, submittedRequests, totalTime);
+ }
+ }
+
+ @Test
+ void should_enforce_performance_contract_requirements() throws InterruptedException {
+ // CONTRACT TEST: Defines minimum performance characteristics that
+ // the HttpTransport implementation must always satisfy
+
+ wireMockServer.stubFor(
+ post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200).withFixedDelay(100)));
+
+ // Test scenarios with different load patterns
+ var testScenarios =
+ List.of(
+ new TestScenario("Light Load", 5, 0.9), // 90% success rate
+ new TestScenario("Medium Load", 10, 0.8), // 80% success rate
+ new TestScenario("Heavy Load", 15, 0.6) // 60% success rate
+ );
+
+ for (var scenario : testScenarios) {
+ // Reset WireMock request journal
+ wireMockServer.resetRequests();
+
+ var futures = new ArrayList>();
+
+ // Submit requests for this scenario
+ for (int i = 0; i < scenario.requestCount; i++) {
+ var logEntry = new LogEntry(scenario.name + " " + i, LogLevel.INFO, i);
+ futures.add(transport.sendLog(logEntry));
+ Thread.sleep(20); // Realistic spacing
+ }
+
+ // Wait for completion
+ var allFutures = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
+ assertThat(allFutures).succeedsWithin(10, TimeUnit.SECONDS);
+
+ // Verify contract requirements
+ var actualRequests = wireMockServer.findAll(postRequestedFor(urlEqualTo("/logs"))).size();
+ int expectedMinimum = (int) (scenario.requestCount * scenario.minSuccessRate);
+
+ assertThat(actualRequests)
+ .as(
+ "Scenario '%s': Expected at least %.0f%% success (%d requests), got %d",
+ scenario.name, scenario.minSuccessRate * 100, expectedMinimum, actualRequests)
+ .isGreaterThanOrEqualTo(expectedMinimum);
+ }
+ }
+
+ @Test
+ void should_preserve_thread_pool_efficiency_under_reasonable_load() throws InterruptedException {
+ // Arrange - Use config with higher concurrency for load test
+ var loadConfig =
+ LogdashConfig.builder()
+ .apiKey("test-key")
+ .baseUrl("http://localhost:" + wireMockServer.port())
+ .maxConcurrentRequests(10) // Higher concurrency
+ .requestTimeoutMs(3000L)
+ .build();
+
+ var loadTransport = new HttpTransport(loadConfig);
+
+ wireMockServer.stubFor(
+ post(urlEqualTo("/logs")).willReturn(aResponse().withStatus(200).withFixedDelay(150)));
+
+ var futures = new ArrayList>();
+ var latch = new CountDownLatch(15); // Reasonable load
+
+ try {
+ // Act - Submit requests with small delays
+ for (int i = 0; i < 15; i++) {
+ var logEntry = new LogEntry("Load test " + i, LogLevel.INFO, i);
+ var future = loadTransport.sendLog(logEntry);
+ future.whenComplete((result, throwable) -> latch.countDown());
+ futures.add(future);
+
+ // Small delay to avoid overwhelming
+ Thread.sleep(20);
+ }
+
+ // Assert
+ assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue();
+
+ var allFutures = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
+ assertThat(allFutures).succeedsWithin(3, TimeUnit.SECONDS);
+
+ // Most requests should have been processed
+ var actualRequests = wireMockServer.findAll(postRequestedFor(urlEqualTo("/logs"))).size();
+ assertThat(actualRequests).isGreaterThan(10); // At least 2/3 processed
+
+ } finally {
+ loadTransport.close();
+ }
+ }
+
+ private record TestScenario(String name, int requestCount, double minSuccessRate) {
+ }
+}
diff --git a/src/test/java/io/logdash/sdk/transport/LogdashTransportTest.java b/src/test/java/io/logdash/sdk/transport/LogdashTransportTest.java
new file mode 100644
index 0000000..6391bce
--- /dev/null
+++ b/src/test/java/io/logdash/sdk/transport/LogdashTransportTest.java
@@ -0,0 +1,64 @@
+package io.logdash.sdk.transport;
+
+import io.logdash.sdk.log.LogEntry;
+import io.logdash.sdk.log.LogLevel;
+import io.logdash.sdk.metrics.MetricEntry;
+import io.logdash.sdk.metrics.MetricType;
+import org.junit.jupiter.api.Test;
+
+import java.util.Optional;
+import java.util.concurrent.CompletableFuture;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+class LogdashTransportTest {
+
+ @Test
+ void should_define_contract_for_log_sending() {
+ LogdashTransport transport = new TestTransport();
+
+ var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L);
+ var future = transport.sendLog(logEntry);
+
+ assertThat(future).isNotNull();
+ assertThat(future).isInstanceOf(CompletableFuture.class);
+ }
+
+ @Test
+ void should_define_contract_for_metric_sending() {
+ LogdashTransport transport = new TestTransport();
+
+ var metricEntry = new MetricEntry("test_metric", 42, MetricType.SET);
+ var future = transport.sendMetric(metricEntry);
+
+ assertThat(future).isNotNull();
+ assertThat(future).isInstanceOf(CompletableFuture.class);
+ }
+
+ @Test
+ void should_provide_optional_shutdown_method() {
+ LogdashTransport transport = new TestTransport();
+
+ var shutdownFuture = transport.shutdown();
+
+ assertThat(shutdownFuture).isNotNull();
+ assertThat(shutdownFuture).isInstanceOf(Optional.class);
+ }
+
+ private static class TestTransport implements LogdashTransport {
+ @Override
+ public CompletableFuture sendLog(LogEntry logEntry) {
+ return CompletableFuture.completedFuture(null);
+ }
+
+ @Override
+ public CompletableFuture sendMetric(MetricEntry metricEntry) {
+ return CompletableFuture.completedFuture(null);
+ }
+
+ @Override
+ public void close() {
+ // Test implementation
+ }
+ }
+}
diff --git a/src/test/java/io/logdash/sdk/transport/NoOpTransportTest.java b/src/test/java/io/logdash/sdk/transport/NoOpTransportTest.java
new file mode 100644
index 0000000..56e351b
--- /dev/null
+++ b/src/test/java/io/logdash/sdk/transport/NoOpTransportTest.java
@@ -0,0 +1,205 @@
+package io.logdash.sdk.transport;
+
+import io.logdash.sdk.config.LogdashConfig;
+import io.logdash.sdk.log.LogEntry;
+import io.logdash.sdk.log.LogLevel;
+import io.logdash.sdk.metrics.MetricEntry;
+import io.logdash.sdk.metrics.MetricType;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.TimeUnit;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+class NoOpTransportTest {
+
+ private NoOpTransport transport;
+ private LogdashConfig config;
+ private ByteArrayOutputStream capturedOutput;
+ private PrintStream originalOut;
+
+ private static String stripAnsiCodes(String input) {
+ return input.replaceAll("\\u001B\\[[;\\d]*m", "");
+ }
+
+ @BeforeEach
+ void setUp() {
+ capturedOutput = new ByteArrayOutputStream();
+ originalOut = System.out;
+ System.setOut(new PrintStream(capturedOutput));
+
+ config =
+ LogdashConfig.builder()
+ .apiKey("test-key")
+ .enableConsoleOutput(true)
+ .enableVerboseLogging(true)
+ .build();
+ transport = new NoOpTransport(config);
+ }
+
+ @AfterEach
+ void tearDown() {
+ System.setOut(originalOut);
+ if (transport != null) {
+ transport.close();
+ }
+ }
+
+ @Test
+ void should_initialize_with_verbose_logging() {
+ // Assert
+ assertThat(capturedOutput.toString()).contains("NoOp transport initialized");
+ }
+
+ @Test
+ void should_print_log_to_console_when_enabled() throws Exception {
+ // Arrange
+ var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L);
+
+ // Act
+ var future = transport.sendLog(logEntry);
+
+ // Assert
+ assertThat(future).succeedsWithin(100, TimeUnit.MILLISECONDS);
+ var output = stripAnsiCodes(capturedOutput.toString());
+ assertThat(output).contains("[LOG]");
+ assertThat(output).contains("INFO");
+ assertThat(output).contains("Test message");
+ assertThat(output).contains("seq=1");
+ }
+
+ @Test
+ void should_print_metric_to_console_when_enabled() throws Exception {
+ // Arrange
+ var metricEntry = new MetricEntry("test_metric", 42, MetricType.SET);
+
+ // Act
+ var future = transport.sendMetric(metricEntry);
+
+ // Assert
+ assertThat(future).succeedsWithin(100, TimeUnit.MILLISECONDS);
+ var output = stripAnsiCodes(capturedOutput.toString());
+ assertThat(output).contains("[METRIC]");
+ assertThat(output).contains("SET");
+ assertThat(output).contains("test_metric");
+ assertThat(output).contains("42");
+ }
+
+ @Test
+ void should_not_print_when_console_output_disabled() throws Exception {
+ // Arrange
+ var configWithoutConsole =
+ LogdashConfig.builder()
+ .apiKey("test-key")
+ .enableConsoleOutput(false)
+ .enableVerboseLogging(false)
+ .build();
+ var silentTransport = new NoOpTransport(configWithoutConsole);
+
+ var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L);
+ var metricEntry = new MetricEntry("test_metric", 42, MetricType.SET);
+
+ // Act
+ var logFuture = silentTransport.sendLog(logEntry);
+ var metricFuture = silentTransport.sendMetric(metricEntry);
+
+ // Assert
+ assertThat(logFuture).succeedsWithin(100, TimeUnit.MILLISECONDS);
+ assertThat(metricFuture).succeedsWithin(100, TimeUnit.MILLISECONDS);
+
+ // Only initialization messages should be present (none in this case)
+ var output = capturedOutput.toString();
+ assertThat(output).doesNotContain("[LOG]");
+ assertThat(output).doesNotContain("[METRIC]");
+
+ silentTransport.close();
+ }
+
+ @Test
+ void should_handle_operations_after_close() throws Exception {
+ // Arrange
+ transport.close();
+ var logEntry = new LogEntry("Test message", LogLevel.INFO, 1L);
+ var metricEntry = new MetricEntry("test_metric", 42, MetricType.SET);
+
+ // Act
+ var logFuture = transport.sendLog(logEntry);
+ var metricFuture = transport.sendMetric(metricEntry);
+
+ // Assert
+ assertThat(logFuture).succeedsWithin(100, TimeUnit.MILLISECONDS);
+ assertThat(metricFuture).succeedsWithin(100, TimeUnit.MILLISECONDS);
+ assertThat(capturedOutput.toString()).contains("transport closed");
+ }
+
+ @Test
+ void should_handle_all_log_levels() throws Exception {
+ // Act & Assert
+ for (LogLevel level : LogLevel.values()) {
+ var logEntry = new LogEntry("Test " + level, level, 1L);
+ var future = transport.sendLog(logEntry);
+ assertThat(future).succeedsWithin(100, TimeUnit.MILLISECONDS);
+ }
+
+ var output = capturedOutput.toString();
+ for (LogLevel level : LogLevel.values()) {
+ assertThat(output).contains(level.getValue().toUpperCase());
+ }
+ }
+
+ @Test
+ void should_handle_all_metric_types() throws Exception {
+ // Act & Assert
+ for (MetricType type : MetricType.values()) {
+ var metricEntry = new MetricEntry("test_" + type, 42, type);
+ var future = transport.sendMetric(metricEntry);
+ assertThat(future).succeedsWithin(100, TimeUnit.MILLISECONDS);
+ }
+
+ var output = capturedOutput.toString();
+ for (MetricType type : MetricType.values()) {
+ assertThat(output).contains(type.getValue().toUpperCase());
+ }
+ }
+
+ @Test
+ void should_handle_special_characters_in_output() throws Exception {
+ // Arrange
+ var logEntry = new LogEntry("Message with émojis 🚀 and quotes \"test\"", LogLevel.INFO, 1L);
+ var metricEntry = new MetricEntry("metric/with-special@chars", 3.14, MetricType.SET);
+
+ // Act
+ transport.sendLog(logEntry);
+ transport.sendMetric(metricEntry);
+
+ // Assert
+ var output = capturedOutput.toString();
+ assertThat(output).contains("émojis 🚀");
+ assertThat(output).contains("quotes \"test\"");
+ assertThat(output).contains("metric/with-special@chars");
+ }
+
+ @Test
+ void should_handle_concurrent_operations() throws Exception {
+ // Arrange
+ var futures = new java.util.ArrayList>();
+
+ // Act
+ for (int i = 0; i < 10; i++) {
+ var logEntry = new LogEntry("Concurrent log " + i, LogLevel.INFO, i);
+ var metricEntry = new MetricEntry("concurrent_metric_" + i, i, MetricType.SET);
+
+ futures.add(transport.sendLog(logEntry));
+ futures.add(transport.sendMetric(metricEntry));
+ }
+
+ // Assert
+ var allFutures = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
+ assertThat(allFutures).succeedsWithin(1, TimeUnit.SECONDS);
+ }
+}
diff --git a/src/test/java/io/logdash/sdk/util/JsonSerializerEdgeCasesTest.java b/src/test/java/io/logdash/sdk/util/JsonSerializerEdgeCasesTest.java
new file mode 100644
index 0000000..2d86247
--- /dev/null
+++ b/src/test/java/io/logdash/sdk/util/JsonSerializerEdgeCasesTest.java
@@ -0,0 +1,111 @@
+package io.logdash.sdk.util;
+
+import io.logdash.sdk.log.LogEntry;
+import io.logdash.sdk.log.LogLevel;
+import io.logdash.sdk.metrics.MetricEntry;
+import io.logdash.sdk.metrics.MetricType;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.ValueSource;
+
+import java.time.Instant;
+import java.util.Map;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+class JsonSerializerEdgeCasesTest {
+
+ private JsonSerializer serializer;
+
+ @BeforeEach
+ void setUp() {
+ serializer = new JsonSerializer();
+ }
+
+ @ParameterizedTest
+ @ValueSource(
+ strings = {
+ "Simple message",
+ "Message with émojis 🚀🎉",
+ "Message\nwith\nnewlines",
+ "Message with \"quotes\"",
+ "Message with 中文字符",
+ ""
+ })
+ void should_handle_various_message_formats(String message) {
+ // Arrange
+ var logEntry = new LogEntry(message, LogLevel.INFO, 1L);
+
+ // Act
+ var json = serializer.serialize(logEntry);
+
+ // Assert
+ assertThat(json).contains("\"message\":");
+ assertThat(json).contains("\"level\":\"info\"");
+ assertThat(json).isNotNull();
+ }
+
+ @Test
+ void should_handle_extremely_large_numbers() {
+ // Arrange
+ var maxLong = new MetricEntry("max_long", Long.MAX_VALUE, MetricType.SET);
+ var minLong = new MetricEntry("min_long", Long.MIN_VALUE, MetricType.SET);
+ var maxDouble = new MetricEntry("max_double", Double.MAX_VALUE, MetricType.SET);
+ var minDouble = new MetricEntry("min_double", -Double.MAX_VALUE, MetricType.SET);
+
+ // Act & Assert
+ assertThat(serializer.serialize(maxLong)).contains(String.valueOf(Long.MAX_VALUE));
+ assertThat(serializer.serialize(minLong)).contains(String.valueOf(Long.MIN_VALUE));
+ assertThat(serializer.serialize(maxDouble)).contains("value");
+ assertThat(serializer.serialize(minDouble)).contains("value");
+ }
+
+ @Test
+ void should_handle_concurrent_serialization() throws InterruptedException {
+ // Arrange
+ var executor = java.util.concurrent.Executors.newFixedThreadPool(10);
+ var latch = new java.util.concurrent.CountDownLatch(100);
+ var results = new java.util.concurrent.ConcurrentLinkedQueue();
+
+ // Act
+ for (int i = 0; i < 100; i++) {
+ final int iteration = i;
+ executor.submit(
+ () -> {
+ try {
+ var logEntry = new LogEntry("Message " + iteration, LogLevel.INFO, iteration);
+ var json = serializer.serialize(logEntry);
+ results.add(json);
+ } finally {
+ latch.countDown();
+ }
+ });
+ }
+
+ // Assert
+ assertThat(latch.await(5, java.util.concurrent.TimeUnit.SECONDS)).isTrue();
+ assertThat(results).hasSize(100);
+ assertThat(results)
+ .allSatisfy(
+ json -> {
+ assertThat(json).contains("\"message\":");
+ assertThat(json).contains("\"level\":\"info\"");
+ });
+
+ executor.shutdown();
+ }
+
+ @Test
+ void should_preserve_timestamp_precision() {
+ // Arrange
+ var timestamp = Instant.parse("2024-12-25T10:15:30.123456789Z");
+ var logEntry = new LogEntry("Precision test", LogLevel.INFO, timestamp, 1L, Map.of());
+
+ // Act
+ var json = serializer.serialize(logEntry);
+
+ // Assert
+ assertThat(json).contains("\"createdAt\":\"2024-12-25T10:15:30.123456789Z\"");
+ }
+}
diff --git a/src/test/java/io/logdash/sdk/util/JsonSerializerTest.java b/src/test/java/io/logdash/sdk/util/JsonSerializerTest.java
new file mode 100644
index 0000000..13a7b50
--- /dev/null
+++ b/src/test/java/io/logdash/sdk/util/JsonSerializerTest.java
@@ -0,0 +1,304 @@
+package io.logdash.sdk.util;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import io.logdash.sdk.log.LogEntry;
+import io.logdash.sdk.log.LogLevel;
+import io.logdash.sdk.metrics.MetricEntry;
+import io.logdash.sdk.metrics.MetricType;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+import java.time.Instant;
+import java.util.Map;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+class JsonSerializerTest {
+
+ private JsonSerializer serializer;
+
+ @BeforeEach
+ void setUp() {
+ serializer = new JsonSerializer();
+ }
+
+ @Test
+ void should_serialize_simple_log_entry() {
+ // Arrange
+ var timestamp = Instant.parse("2024-06-01T12:34:56Z");
+ var logEntry = new LogEntry("Test message", LogLevel.INFO, timestamp, 42L, Map.of());
+
+ // Act
+ var json = serializer.serialize(logEntry);
+
+ // Assert
+ assertThat(json).contains("\"message\":\"Test message\"");
+ assertThat(json).contains("\"level\":\"info\"");
+ assertThat(json).contains("\"createdAt\":\"2024-06-01T12:34:56Z\"");
+ assertThat(json).contains("\"sequenceNumber\":42");
+ }
+
+ @Test
+ void should_serialize_log_entry_with_special_characters() {
+ // Arrange
+ var logEntry = new LogEntry("Message with \"quotes\" and \n newlines", LogLevel.ERROR, 1L);
+
+ // Act
+ var json = serializer.serialize(logEntry);
+
+ // Assert
+ assertThat(json).contains("\\\"quotes\\\"");
+ assertThat(json).contains("\\n");
+ }
+
+ @Test
+ void should_serialize_metric_entry_with_integer_value() throws Exception {
+ // Arrange
+ var metricEntry = new MetricEntry("user_count", 100, MetricType.SET);
+
+ // Act
+ var json = serializer.serialize(metricEntry);
+
+ // Assert
+ ObjectMapper mapper = new ObjectMapper();
+ JsonNode node = mapper.readTree(json);
+ assertThat(node.get("name").asText()).isEqualTo("user_count");
+ assertThat(node.get("value").asInt()).isEqualTo(100);
+ assertThat(node.get("operation").asText()).isEqualTo("set");
+ }
+
+ @Test
+ void should_serialize_metric_entry_with_double_value() {
+ // Arrange
+ var metricEntry = new MetricEntry("temperature", 23.5, MetricType.MUTATE);
+
+ // Act
+ var json = serializer.serialize(metricEntry);
+
+ // Assert
+ assertThat(json).contains("\"name\":\"temperature\"");
+ assertThat(json).contains("\"value\":23.5");
+ assertThat(json).contains("\"operation\":\"change\"");
+ }
+
+ @Test
+ void should_handle_metric_with_special_characters_in_name() {
+ // Arrange
+ var metricEntry = new MetricEntry("metric/with\"special\\chars", 42, MetricType.SET);
+
+ // Act
+ var json = serializer.serialize(metricEntry);
+
+ // Assert
+ assertThat(json).contains("metric/with\\\"special\\\\chars");
+ }
+
+ @Test
+ void should_handle_null_metric_name_gracefully() {
+ assertThatThrownBy(() -> new MetricEntry(null, 42, MetricType.SET))
+ .isInstanceOf(IllegalArgumentException.class)
+ .hasMessageContaining("Metric name cannot be null");
+ }
+
+ @Test
+ void should_serialize_log_with_unicode_characters() {
+ // Arrange
+ var logEntry = new LogEntry("Message with émojis 🚀 and ñáéíóú", LogLevel.INFO, 1L);
+
+ // Act
+ var json = serializer.serialize(logEntry);
+
+ // Assert
+ assertThat(json).contains("émojis 🚀 and ñáéíóú");
+ }
+
+ @Test
+ void should_handle_empty_and_single_character_strings() {
+ // Arrange
+ var emptyEntry = new LogEntry("", LogLevel.INFO, 1L);
+ var singleCharEntry = new LogEntry("x", LogLevel.INFO, 2L);
+
+ // Act
+ var emptyJson = serializer.serialize(emptyEntry);
+ var singleCharJson = serializer.serialize(singleCharEntry);
+
+ // Assert
+ assertThat(emptyJson).contains("\"message\":\"\"");
+ assertThat(singleCharJson).contains("\"message\":\"x\"");
+ }
+
+ @Test
+ void should_handle_all_control_characters_properly() {
+ // Arrange
+ var message = "Test\b\f\r\t\u0001\u001F";
+ var logEntry = new LogEntry(message, LogLevel.INFO, 1L);
+
+ // Act
+ var json = serializer.serialize(logEntry);
+
+ // Assert
+ assertThat(json).contains("\\r");
+ assertThat(json).contains("\\t");
+ assertThat(json).doesNotContain("\\b");
+ assertThat(json).doesNotContain("\\f");
+ assertThat(json).doesNotContain("\\u0001");
+ assertThat(json).doesNotContain("\\u001F");
+ }
+
+ @Test
+ void should_handle_negative_numbers() {
+ // Arrange
+ var negativeInt = new MetricEntry("negative", -42, MetricType.SET);
+ var negativeDouble = new MetricEntry("negative_double", -3.14, MetricType.MUTATE);
+
+ // Act
+ var intJson = serializer.serialize(negativeInt);
+ var doubleJson = serializer.serialize(negativeDouble);
+
+ // Assert
+ assertThat(intJson).contains("-42");
+ assertThat(doubleJson).contains("-3.14");
+ }
+
+ @Test
+ void should_handle_zero_values() {
+ // Arrange
+ var zeroInt = new MetricEntry("zero", 0, MetricType.SET);
+ var zeroDouble = new MetricEntry("zero_double", 0.0, MetricType.SET);
+
+ // Act
+ var intJson = serializer.serialize(zeroInt);
+ var doubleJson = serializer.serialize(zeroDouble);
+
+ // Assert
+ assertThat(intJson).contains("\"value\":0");
+ assertThat(doubleJson).contains("\"value\":0");
+ }
+
+ @Test
+ void should_handle_very_long_strings() {
+ // Arrange
+ var longMessage = "A".repeat(10000);
+ var logEntry = new LogEntry(longMessage, LogLevel.INFO, 1L);
+
+ // Act
+ var json = serializer.serialize(logEntry);
+
+ // Assert
+ assertThat(json).contains(longMessage);
+ assertThat(json.length()).isGreaterThan(10000);
+ }
+
+ @Test
+ void should_handle_nan_and_infinite_double_values() {
+ // Arrange
+ var nanMetric = new MetricEntry("nan_value", Double.NaN, MetricType.SET);
+ var infiniteMetric =
+ new MetricEntry("infinite_value", Double.POSITIVE_INFINITY, MetricType.SET);
+ var negInfiniteMetric =
+ new MetricEntry("neg_infinite", Double.NEGATIVE_INFINITY, MetricType.SET);
+
+ // Act
+ var nanJson = serializer.serialize(nanMetric);
+ var infiniteJson = serializer.serialize(infiniteMetric);
+ var negInfiniteJson = serializer.serialize(negInfiniteMetric);
+
+ // Assert
+ assertThat(nanJson).contains("\"value\":0.0");
+ assertThat(infiniteJson).contains("\"value\":0.0");
+ assertThat(negInfiniteJson).contains("\"value\":0.0");
+ }
+
+ @Test
+ void should_format_large_integer_values_correctly() {
+ // Arrange
+ var largeInt = new MetricEntry("large_int", Long.MAX_VALUE, MetricType.SET);
+ var largeDouble = new MetricEntry("large_double", 1.23456789012345e14, MetricType.SET);
+
+ // Act
+ var intJson = serializer.serialize(largeInt);
+ var doubleJson = serializer.serialize(largeDouble);
+
+ // Assert
+ assertThat(intJson).contains(String.valueOf(Long.MAX_VALUE));
+ // For large doubles, accept scientific notation as valid
+ assertThat(doubleJson)
+ .satisfiesAnyOf(
+ json -> assertThat(json).contains("123456789012345"),
+ json -> assertThat(json).contains("1.23456789012345E14"),
+ json -> assertThat(json).contains("1.23456789012345e14"));
+ }
+
+ @Test
+ void should_handle_log_entries_with_timestamp_edge_cases() {
+ // Arrange
+ var epoch = Instant.EPOCH;
+ var farFuture = Instant.parse("2099-12-31T23:59:59Z");
+
+ var epochEntry = new LogEntry("Epoch", LogLevel.INFO, epoch, 1L, Map.of());
+ var futureEntry = new LogEntry("Future", LogLevel.INFO, farFuture, 2L, Map.of());
+
+ // Act
+ var epochJson = serializer.serialize(epochEntry);
+ var futureJson = serializer.serialize(futureEntry);
+
+ // Assert
+ assertThat(epochJson).contains("\"createdAt\":\"1970-01-01T00:00:00Z\"");
+ assertThat(futureJson).contains("\"createdAt\":\"2099-12-31T23:59:59Z\"");
+ }
+
+ @Test
+ void should_handle_different_number_types() {
+ // Arrange
+ var intMetric = new MetricEntry("int_metric", 42, MetricType.SET);
+ var longMetric = new MetricEntry("long_metric", 123456789L, MetricType.SET);
+ var floatMetric = new MetricEntry("float_metric", 3.14f, MetricType.SET);
+ var doubleMetric = new MetricEntry("double_metric", 2.718281828, MetricType.SET);
+
+ // Act & Assert
+ assertThat(serializer.serialize(intMetric)).contains("\"value\":42");
+ assertThat(serializer.serialize(longMetric)).contains("\"value\":123456789");
+ assertThat(serializer.serialize(floatMetric)).contains("\"value\":3.14");
+ assertThat(serializer.serialize(doubleMetric)).contains("\"value\":2.718281828");
+ }
+
+ @Test
+ void should_escape_all_json_control_characters() {
+ // Arrange
+ var message = "Test\"\\\b\f\n\r\t\u0000\u001F";
+ var logEntry = new LogEntry(message, LogLevel.INFO, 1L);
+
+ // Act
+ var json = serializer.serialize(logEntry);
+
+ // Assert
+ assertThat(json).contains("\\\""); // quote
+ assertThat(json).contains("\\\\"); // backslash
+ assertThat(json).contains("\\n"); // newline
+ assertThat(json).contains("\\r"); // carriage return
+ assertThat(json).contains("\\t"); // tab
+ assertThat(json).doesNotContain("\\b"); // backspace
+ assertThat(json).doesNotContain("\\f"); // form feed
+ assertThat(json).doesNotContain("\\u0000"); // null character
+ assertThat(json).doesNotContain("\\u001F"); // unit separator
+ }
+
+ @Test
+ void should_handle_empty_strings_and_contexts() {
+ // Arrange
+ var emptyMessage = new LogEntry("", LogLevel.INFO, 1L);
+
+ // Act
+ var logJson = serializer.serialize(emptyMessage);
+
+ // Assert
+ assertThat(logJson).contains("\"message\":\"\"");
+
+ // MetricEntry with empty name should throw
+ assertThatThrownBy(() -> new MetricEntry("", 0, MetricType.SET))
+ .isInstanceOf(IllegalArgumentException.class)
+ .hasMessageContaining("Metric name cannot be null or blank");
+ }
+}