Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
[submodule "providers/flagd/test-harness"]
path = providers/flagd/test-harness
url = https://github.com/open-feature/test-harness.git
branch = v3.0.1
branch = feat/add-env-var-tag
[submodule "providers/flagd/spec"]
path = providers/flagd/spec
url = https://github.com/open-feature/spec.git
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package dev.openfeature.contrib.providers.flagd;

import static io.cucumber.junit.platform.engine.Constants.GLUE_PROPERTY_NAME;
import static io.cucumber.junit.platform.engine.Constants.PARALLEL_EXECUTION_ENABLED_PROPERTY_NAME;
import static io.cucumber.junit.platform.engine.Constants.PLUGIN_PROPERTY_NAME;

import org.junit.jupiter.api.Order;
Expand All @@ -17,6 +18,11 @@
@Suite
@IncludeEngines("cucumber")
@SelectFile("test-harness/gherkin/config.feature")
@ConfigurationParameter(key = PLUGIN_PROPERTY_NAME, value = "pretty")
@ConfigurationParameter(key = PLUGIN_PROPERTY_NAME, value = "summary")
@ConfigurationParameter(key = GLUE_PROPERTY_NAME, value = "dev.openfeature.contrib.providers.flagd.e2e.steps.config")
// Config scenarios read System env vars in FlagdOptions.build() and some scenarios also
// mutate them. Parallel execution causes env-var races (e.g. FLAGD_PORT=3456 leaking into
// a "Default Config" scenario that expects 8015). Since the entire suite runs in <0.4s,
// parallelism offers no benefit here — run sequentially for correctness.
@ConfigurationParameter(key = PARALLEL_EXECUTION_ENABLED_PROPERTY_NAME, value = "false")
public class ConfigCucumberTest {}
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
package dev.openfeature.contrib.providers.flagd.e2e;

import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Duration;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.testcontainers.containers.ComposeContainer;
import org.testcontainers.containers.wait.strategy.Wait;

/** A single pre-warmed Docker Compose stack (flagd + envoy) and its associated temp directory. */
public class ContainerEntry {

public static final int FORBIDDEN_PORT = 9212;

public final ComposeContainer container;
public final Path tempDir;

private ContainerEntry(ComposeContainer container, Path tempDir) {
this.container = container;
this.tempDir = tempDir;
}

/** Start a new container entry. Blocks until all services are ready. */
public static ContainerEntry start() throws IOException {
Path tempDir = Files.createDirectories(
Paths.get("tmp/" + RandomStringUtils.randomAlphanumeric(8).toLowerCase() + "/"));

ComposeContainer container = new ComposeContainer(new File("test-harness/docker-compose.yaml"))
.withEnv("FLAGS_DIR", tempDir.toAbsolutePath().toString())
.withExposedService("flagd", 8013, Wait.forListeningPort())
.withExposedService("flagd", 8015, Wait.forListeningPort())
.withExposedService("flagd", 8080, Wait.forListeningPort())
.withExposedService("envoy", 9211, Wait.forListeningPort())
.withExposedService("envoy", FORBIDDEN_PORT, Wait.forListeningPort())
.withStartupTimeout(Duration.ofSeconds(45));
container.start();

return new ContainerEntry(container, tempDir);
}

/** Stop the container and clean up the temp directory. */
public void stop() throws IOException {
container.stop();
FileUtils.deleteDirectory(tempDir.toFile());
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
package dev.openfeature.contrib.providers.flagd.e2e;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.Semaphore;
import java.util.concurrent.atomic.AtomicBoolean;
import lombok.extern.slf4j.Slf4j;

/**
* A pool of pre-warmed {@link ContainerEntry} instances.
*
* <p>All containers are started in parallel on the first {@link #acquire()} call, paying the
* Docker Compose startup cost only once per JVM. Scenarios borrow a container via
* {@link #acquire()} and return it via {@link #release(ContainerEntry)} after teardown.
*
* <p>Cleanup is handled automatically via a JVM shutdown hook — no explicit lifecycle calls are
* needed from test classes. This means multiple test classes (e.g. several {@code @Suite} runners
* or {@code @TestFactory} methods) share the same pool across the entire JVM lifetime without
* redundant container startups.
*
* <p>Pool size is controlled by the system property {@code flagd.e2e.pool.size}
* (default: min(availableProcessors, 4)).
*/
@Slf4j
public class ContainerPool {

private static final int POOL_SIZE = Integer.getInteger(
"flagd.e2e.pool.size", Math.min(Runtime.getRuntime().availableProcessors(), 4));

private static final BlockingQueue<ContainerEntry> pool = new LinkedBlockingQueue<>();
private static final List<ContainerEntry> all = new ArrayList<>();
private static final AtomicBoolean initialized = new AtomicBoolean(false);

/**
* JVM-wide semaphore that serializes disruptive container operations (stop/restart) across all
* parallel Cucumber engines. Only one scenario at a time may bring a container down, preventing
* cascading initialization timeouts in sibling scenarios that are waiting for a container slot.
*/
private static final Semaphore restartSlot = new Semaphore(1);

static {
Runtime.getRuntime().addShutdownHook(new Thread(ContainerPool::stopAll, "container-pool-shutdown"));
}

/**
* Borrow a container from the pool, blocking until one becomes available.
* Initializes the pool on the first call. The caller MUST call
* {@link #release(ContainerEntry)} when done.
*/
public static ContainerEntry acquire() throws Exception {
ensureInitialized();
return pool.take();
}

/** Return a container to the pool so the next scenario can use it. */
public static void release(ContainerEntry entry) {
pool.add(entry);
}

/**
* Acquires the JVM-wide restart slot before stopping or restarting a container.
* Must be paired with {@link #releaseRestartSlot()} in the scenario {@code @After} hook.
*/
public static void acquireRestartSlot() throws InterruptedException {
log.debug("Acquiring restart slot...");
restartSlot.acquire();
log.debug("Restart slot acquired.");
}

/** Releases the JVM-wide restart slot acquired by {@link #acquireRestartSlot()}. */
public static void releaseRestartSlot() {
restartSlot.release();
log.debug("Restart slot released.");
}

private static void ensureInitialized() throws Exception {
if (initialized.get()) {
return;
}
synchronized (ContainerPool.class) {
if (!initialized.compareAndSet(false, true)) {
return;
}
log.info("Starting container pool of size {}...", POOL_SIZE);
ExecutorService executor = Executors.newFixedThreadPool(POOL_SIZE);
try {
List<Future<ContainerEntry>> futures = new ArrayList<>();
for (int i = 0; i < POOL_SIZE; i++) {
futures.add(executor.submit(ContainerEntry::start));
}
for (Future<ContainerEntry> future : futures) {
ContainerEntry entry = future.get();
pool.add(entry);
all.add(entry);
}
} catch (Exception e) {
all.forEach(entry -> {
try {
entry.stop();
} catch (IOException suppressed) {
e.addSuppressed(suppressed);
}
});
pool.clear();
all.clear();
initialized.set(false);
throw e;
} finally {
executor.shutdown();
}
log.info("Container pool ready ({} containers).", POOL_SIZE);
}
}

private static void stopAll() {
if (all.isEmpty()) return;
log.info("Shutdown hook — stopping all containers.");
all.forEach(entry -> {
try {
entry.stop();
} catch (IOException e) {
log.warn("Error stopping container entry", e);
}
});
pool.clear();
all.clear();
}
}
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
package dev.openfeature.contrib.providers.flagd.e2e;

import dev.openfeature.contrib.providers.flagd.Config;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.util.Optional;
import org.testcontainers.containers.ComposeContainer;
import org.testcontainers.containers.ContainerState;
Expand Down Expand Up @@ -29,4 +32,39 @@ public static String getLaunchpadUrl(ComposeContainer container) {
})
.orElseThrow(() -> new RuntimeException("Could not find launchpad url"));
}

/**
* Blocks until the given flagd service port accepts TCP connections, or the timeout elapses.
* The launchpad's {@code /start} endpoint polls flagd's HTTP {@code /readyz} before returning,
* but the gRPC ports (8013, 8015) may become available slightly later. Waiting here prevents
* {@code setProviderAndWait} from timing out under parallel load.
*/
public static void waitForGrpcPort(ComposeContainer container, Config.Resolver resolver, long timeoutMs)
throws InterruptedException {
int internalPort;
switch (resolver) {
case RPC:
internalPort = 8013;
break;
case IN_PROCESS:
internalPort = 8015;
break;
default:
return;
}
ContainerState state = container
.getContainerByServiceName("flagd")
.orElseThrow(() -> new RuntimeException("Could not find flagd container"));
String host = state.getHost();
int mappedPort = state.getMappedPort(internalPort);
long deadline = System.currentTimeMillis() + timeoutMs;
while (System.currentTimeMillis() < deadline) {
try (Socket s = new Socket()) {
s.connect(new InetSocketAddress(host, mappedPort), 100);
return;
} catch (IOException ignored) {
Thread.sleep(50);
}
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
package dev.openfeature.contrib.providers.flagd.e2e;

import java.util.Collections;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import lombok.extern.slf4j.Slf4j;
import org.junit.platform.engine.TestExecutionResult;
import org.junit.platform.engine.reporting.ReportEntry;
import org.junit.platform.launcher.TestExecutionListener;
import org.junit.platform.launcher.TestIdentifier;
import org.junit.platform.launcher.TestPlan;

/**
* Captures the full lifecycle of a JUnit Platform test execution, tracking start, finish, and skip
* events for every node in the test plan (both containers and tests). Results are later replayed as
* JUnit Jupiter {@link org.junit.jupiter.api.DynamicTest} instances to expose the Cucumber scenario
* tree in IDEs.
*/
@Slf4j
class CucumberResultListener implements TestExecutionListener {

private final Set<String> started = Collections.newSetFromMap(new ConcurrentHashMap<>());
private final Map<String, TestExecutionResult> results = new ConcurrentHashMap<>();
private final Map<String, String> skipped = new ConcurrentHashMap<>();

@Override
public void testPlanExecutionStarted(TestPlan testPlan) {
log.debug("Cucumber execution started");
}

@Override
public void testPlanExecutionFinished(TestPlan testPlan) {
log.debug(
"Cucumber execution finished — started={}, finished={}, skipped={}",
started.size(),
results.size(),
skipped.size());
}

@Override
public void executionStarted(TestIdentifier id) {
log.debug(" START {}", id.getDisplayName());
started.add(id.getUniqueId());
}

@Override
public void executionFinished(TestIdentifier id, TestExecutionResult result) {
results.put(id.getUniqueId(), result);
if (result.getStatus() == TestExecutionResult.Status.FAILED) {
log.debug(
" FAIL {} — {}",
id.getDisplayName(),
result.getThrowable().map(Throwable::getMessage).orElse("(no message)"));
} else {
log.debug(" {} {}", result.getStatus(), id.getDisplayName());
}
}

@Override
public void executionSkipped(TestIdentifier id, String reason) {
skipped.put(id.getUniqueId(), reason);
log.debug(" SKIP {} — {}", id.getDisplayName(), reason);
}

@Override
public void dynamicTestRegistered(TestIdentifier id) {
log.debug(" DYN {}", id.getDisplayName());
}

@Override
public void reportingEntryPublished(TestIdentifier id, ReportEntry entry) {
log.debug(" REPORT {} — {}", id.getDisplayName(), entry);
}

/** Whether the node with the given unique ID had {@code executionStarted} called. */
boolean wasStarted(String uniqueId) {
return started.contains(uniqueId);
}

/** Whether the node was skipped before starting. */
boolean wasSkipped(String uniqueId) {
return skipped.containsKey(uniqueId);
}

/** The skip reason for a skipped node, or {@code null} if not skipped. */
String getSkipReason(String uniqueId) {
return skipped.get(uniqueId);
}

/** Whether a finished result was recorded for the given node. */
boolean hasResult(String uniqueId) {
return results.containsKey(uniqueId);
}

/**
* The recorded {@link TestExecutionResult}, or {@code null} if the node never finished.
* Use {@link #hasResult} to distinguish "finished with success" from "never finished".
*/
TestExecutionResult getResult(String uniqueId) {
return results.get(uniqueId);
}
}
Loading
Loading