From e8649c6f61d5e42c61cf56c3f7737bf61e34ab65 Mon Sep 17 00:00:00 2001 From: Jingyu Date: Sun, 4 Jan 2026 18:50:59 +0800 Subject: [PATCH 1/3] Add SHA-256 checksum validation to VersionedSchema migrations --- .../database/VersionedSchema.java | 267 +++++++++++------- .../database/VersionedSchemaTests.java | 4 +- .../kademlia/storage/DatabaseStorage.java | 2 +- 3 files changed, 172 insertions(+), 101 deletions(-) diff --git a/api/src/main/java/io/bosonnetwork/database/VersionedSchema.java b/api/src/main/java/io/bosonnetwork/database/VersionedSchema.java index 1d621f9..e86e775 100644 --- a/api/src/main/java/io/bosonnetwork/database/VersionedSchema.java +++ b/api/src/main/java/io/bosonnetwork/database/VersionedSchema.java @@ -26,16 +26,17 @@ import java.io.File; import java.io.FileReader; import java.io.IOException; -import java.nio.file.FileVisitResult; -import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.SimpleFileVisitor; -import java.nio.file.attribute.BasicFileAttributes; +import java.security.MessageDigest; import java.util.ArrayList; import java.util.List; import io.vertx.core.Future; import io.vertx.core.Promise; +import io.vertx.core.Vertx; +import io.vertx.core.file.FileProps; +import io.vertx.core.file.FileSystem; +import io.vertx.core.file.OpenOptions; import io.vertx.sqlclient.Row; import io.vertx.sqlclient.RowSet; import io.vertx.sqlclient.SqlClient; @@ -43,6 +44,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import io.bosonnetwork.utils.Hex; + /** * Simple, file-based schema migration helper for Vert.x SQL clients. *

@@ -51,7 +54,8 @@ *

*/ public class VersionedSchema implements VertxDatabase { - private static final SchemaVersion EMPTY_VERSION = new SchemaVersion(0, "", "", 0, 0, true); + private static final SchemaVersion EMPTY_VERSION = new SchemaVersion(0, "", null,"", 0, 0, true); + private final Vertx vertx; private final SqlClient client; private final Path schemaPath; private String databaseProductName; @@ -64,17 +68,19 @@ public class VersionedSchema implements VertxDatabase { * * @param version schema version number * @param description human-readable description + * @param hash SHA-256 hash of the migration script * @param appliedBy user/process that applied the migration * @param appliedAt timestamp (ms) when started * @param consumedTime duration (ms) spent applying * @param success whether migration succeeded */ - public record SchemaVersion(int version, String description, String appliedBy, long appliedAt, + public record SchemaVersion(int version, String description, String hash, String appliedBy, long appliedAt, long consumedTime, boolean success) {} private static class Migration { private final int version; private String description; + private final String hash; private final Path path; /** @@ -82,11 +88,13 @@ private static class Migration { * * @param version numeric version * @param description textual description + * @param hash SHA-256 hash of the migration script * @param path file path to the SQL script */ - public Migration(int version, String description, Path path) { + public Migration(int version, String description, String hash, Path path) { this.version = version; this.description = description; + this.hash = hash; this.path = path; } @@ -109,7 +117,8 @@ public Path path() { */ } - private VersionedSchema(SqlClient client, Path schemaPath) { + private VersionedSchema(Vertx vertx, SqlClient client, Path schemaPath) { + this.vertx = vertx; this.client = client; this.schemaPath = schemaPath; this.currentVersion = EMPTY_VERSION; @@ -118,12 +127,13 @@ private VersionedSchema(SqlClient client, Path schemaPath) { /** * Initializes a new {@link VersionedSchema} instance. * + * @param vertx Vert.x instance * @param client Vert.x SQL client * @param schemaPath directory containing migration SQL files * @return a new versioned schema helper */ - public static VersionedSchema init(SqlClient client, Path schemaPath) { - return new VersionedSchema(client, schemaPath); + public static VersionedSchema init(Vertx vertx, SqlClient client, Path schemaPath) { + return new VersionedSchema(vertx, client, schemaPath); } /** @@ -161,48 +171,77 @@ public Future migrate() { databaseProductName = name; log.debug("Migration check: target database product {}", name); return query(createSchemaVersionTable()).execute(); - }).compose(v -> - getSchemaVersion() - ).compose(v -> { - int version = 0; - if (v != null) { - this.currentVersion = v; - version = this.currentVersion.version(); + }).compose(v -> { + Future> versionsFuture = getSchemaVersions().andThen(ar -> { + if (ar.succeeded()) { + List versions = ar.result(); + if (!versions.isEmpty()) + this.currentVersion = versions.get(versions.size() - 1); + } else { + log.warn("Migration check: error reading schema versions - {}", ar.cause().getMessage()); + } + }); + + Future> migrationsFuture = getMigrations().andThen(ar -> { + if (ar.failed()) + log.warn("Migration check: error reading migrations - {}", ar.cause().getMessage()); + }); + + return Future.all(versionsFuture, migrationsFuture); + }).compose(cf -> { + List versions = cf.resultAt(0); + List migrations = cf.resultAt(1); + + if (migrations.size() < versions.size()) { + log.error("Migration check: database schema version mismatch: {} migrations found, {} recorded", + migrations.size(), versions.size()); + return Future.failedFuture(new IllegalStateException("Database schema version mismatch")); } - try { - return Future.succeededFuture(getNewMigrations(version)); - } catch (IOException | IllegalStateException e) { - return Future.failedFuture(new IllegalStateException("Migration check failed", e)); + for (int i = 0; i < versions.size(); i++) { + SchemaVersion v = versions.get(i); + Migration m = migrations.get(i); + if (v.version != m.version) { + log.error("Migration check: database schema version mismatch: {} recorded, {} found - {}", + v.version, m.version, m.fileName()); + return Future.failedFuture(new IllegalStateException("Database schema version mismatch")); + } + if (!v.hash.equals(m.hash)) { + log.error("Migration check: database schema version {} hash mismatch: {} recorded, {} found - {}", + v.version, v.hash, m.hash, m.fileName()); + return Future.failedFuture(new IllegalStateException("Database schema version mismatch")); + } } - }).compose(migrations -> { - if (migrations.isEmpty()) + + if (versions.size() == migrations.size()) { + log.info("Migration check: no new migrations found"); return Future.succeededFuture(); + } - Promise promise = Promise.promise(); Future chain = Future.succeededFuture(); - for (Migration migration : migrations) + for (int i = versions.size(); i < migrations.size(); i++) { + Migration migration = migrations.get(i); chain = chain.compose(na -> applyMigration(migration).map(v -> { this.currentVersion = v; return null; }) ); + } - chain.onComplete(promise); - return promise.future(); + return chain; }); } /** - * Reads the latest successful schema version from the database. + * Reads the current applied schema versions from the database. * - * @return a future with the last applied {@link SchemaVersion} or {@code null} + * @return a future with the list of applied {@link SchemaVersion}, empty list if none */ - private Future getSchemaVersion() { - return query(selectSchemaVersion()) + private Future> getSchemaVersions() { + return query(selectSchemaVersions()) .execute() - .map(VersionedSchema::mapToSchemaVersion); + .map(VersionedSchema::mapToSchemaVersions); } private static boolean getBoolean(Row row, String columnName) { @@ -212,112 +251,143 @@ private static boolean getBoolean(Row row, String columnName) { (value instanceof String s && Boolean.parseBoolean(s))); } - private static SchemaVersion mapToSchemaVersion(RowSet rowSet) { + private static List mapToSchemaVersions(RowSet rowSet) { if (rowSet.size() == 0) - return null; + return List.of(); - // first row only - Row row = rowSet.iterator().next(); - int version = row.getInteger("version"); - String description = row.getString("description"); - String appliedBy = row.getString("applied_by"); - long appliedAt = row.getLong("applied_at"); - long consumedTime = row.getLong("consumed_time"); - boolean success = getBoolean(row, "success"); + List versions = new ArrayList<>(rowSet.size()); + for (Row row : rowSet) { + int version = row.getInteger("version"); + String description = row.getString("description"); + String hash = row.getString("hash"); + String appliedBy = row.getString("applied_by"); + long appliedAt = row.getLong("applied_at"); + long consumedTime = row.getLong("consumed_time"); + boolean success = getBoolean(row, "success"); + + SchemaVersion v = new SchemaVersion(version, description, hash, appliedBy, appliedAt, consumedTime, success); + versions.add(v); + } - return new SchemaVersion(version, description, appliedBy, appliedAt, consumedTime, success); + return versions; } /** - * Scans {@code schemaPath} and returns migrations with the version greater than {@code currentVersion}. + * Scans {@code schemaPath} and returns all the migrations. * File names must follow: {@code _.sql}. * - * @param currentVersion the latest applied version - * @return sorted list of pending migrations - * @throws IOException when reading the directory fails - * @throws IllegalStateException on duplicate versions or malformed names + * @return a future with the list of migrations, empty list if none */ - private List getNewMigrations(int currentVersion) throws IOException, IllegalStateException { + private Future> getMigrations() { if (schemaPath == null) { log.warn("Migration check: skipping, no schema migration path set"); - return List.of(); + return Future.succeededFuture(List.of()); } log.info("Migration check: checking for new migrations from {} ...", schemaPath); - List migrations = new ArrayList<>(); - Files.walkFileTree(schemaPath, new SimpleFileVisitor<>() { - @Override - @SuppressWarnings("NullableProblems") - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) { + FileSystem fs = vertx.fileSystem(); + return fs.readDir(schemaPath.toString()).compose(files -> { + List migrations = new ArrayList<>(); + Future future = Future.succeededFuture(); + + for (String f : files) { + FileProps props = fs.propsBlocking(f); + if (!props.isRegularFile()) { + log.warn("Migration check: ignore non-regular file {}", f); + continue; + } + + Path file = Path.of(f); String name = file.getFileName().toString(); if (!name.endsWith(".sql")) { log.warn("Migration check: ignore non-SQL file {}", name); - return FileVisitResult.CONTINUE; + continue; } - Migration migration; - try { - migration = parseFileName(file); - if (migration.version <= currentVersion) - return FileVisitResult.CONTINUE; - } catch (IllegalStateException e) { - log.warn("Migration check: ignore malformed file name {} - {}", name, e.getMessage()); - return FileVisitResult.CONTINUE; - } + future = future.compose(v -> buildMigration(file).andThen(ar -> { + if (ar.succeeded()) + migrations.add(ar.result()); + else + log.warn("Migration check: error build migration from {} - {}", name, ar.cause().getMessage()); + }).mapEmpty()); + } - migrations.add(migration); - return FileVisitResult.CONTINUE; + return future.map(migrations); + }).map(migrations -> { + if (migrations.isEmpty()) { + log.warn("Migration check: no any migrations found"); + return List.of(); } + + migrations.sort((m1, m2) -> { + if (m1.version == m2.version) { + log.error("Migration check: Migration file version must be unique. File names: {} and {}", + m1.fileName(), m2.fileName()); + throw new IllegalStateException("Migration file version must be unique"); + } + + // noinspection ComparatorMethodParameterNotUsed + return Integer.compare(m1.version, m2.version); + }); + + return migrations; }); + } - if (migrations.isEmpty()) { - log.info("Migration check: no new migrations found"); - return List.of(); + private Future sha256(Path path) { + MessageDigest digest; + try { + digest = MessageDigest.getInstance("SHA-256"); + } catch (Exception e) { + return Future.failedFuture(e); } - migrations.sort((m1, m2) -> { - if (m1.version == m2.version) { - log.error("Migration check: Migration file version must be unique. File names: {} and {}", - m1.fileName(), m2.fileName()); - throw new IllegalStateException("Migration file version must be unique"); - } + return vertx.fileSystem().open(path.toString(), new OpenOptions().setRead(true).setWrite(false)).compose(file -> { + Promise promise = Promise.promise(); + + file.handler(buffer -> digest.update(buffer.getBytes())) + .exceptionHandler(e -> { + file.close(); + promise.fail(e); + }) + .endHandler(v -> { + String hash = Hex.encode(digest.digest()); + file.close(); + promise.complete(hash); + }); - // noinspection ComparatorMethodParameterNotUsed - return Integer.compare(m1.version, m2.version); + return promise.future(); }); - - return migrations; } /** - * Parses a migration file name into a {@link Migration}. + * Build a migration file name into a {@link Migration}. * Expected format: {@code _.sql} * * @param file path to the migration file - * @return parsed {@link Migration} - * @throws IllegalStateException if the name does not match the expected pattern + * @return a future with the parsed migration file */ - private static Migration parseFileName(Path file) { + private Future buildMigration(Path file) { String fileName = file.getFileName().toString(); String[] parts = fileName.split("_", 2); if (parts.length != 2) - throw new IllegalStateException("Migration file name must be in format _.sql"); + return Future.failedFuture(new IllegalStateException("Migration file name must be in format _.sql")); int version; try { version = Integer.parseInt(parts[0]); } catch (NumberFormatException e) { - throw new IllegalStateException("Migration file name must be in format _.sql"); + return Future.failedFuture(new IllegalStateException("Migration file name must be in format _.sql")); } int dotIndex = parts[1].lastIndexOf('.'); String baseName = (dotIndex == -1) ? parts[1] : parts[1].substring(0, dotIndex); if (baseName.isEmpty()) - throw new IllegalStateException("Migration file name must be in format _.sql"); + return Future.failedFuture(new IllegalStateException("Migration file name must be in format _.sql")); String description = baseName.replace('_', ' '); - return new Migration(version, description, file); + return sha256(file).map(hash -> new Migration(version, description, hash, file)); } /** @@ -520,11 +590,12 @@ private Future applyMigration(Migration migration) { log.info("Migration: applied migration file {} in {} ms", migration.fileName(), duration); log.debug("Migration: updating schema version..."); SchemaVersion newVersion = new SchemaVersion(migration.version, migration.description, - "", begin, duration, true); + migration.hash, "", begin, duration, true); return connection.preparedQuery(insertSchemaVersion()) .execute( Tuple.of(newVersion.version, newVersion.description, + newVersion.hash, newVersion.appliedBy, newVersion.appliedAt, newVersion.consumedTime, @@ -555,8 +626,8 @@ protected String createSchemaVersionTable() { * * @return SQL for selecting the latest successful schema version */ - protected String selectSchemaVersion() { - return selectSchemaVersion; + protected String selectSchemaVersions() { + return selectSchemaVersions; } /** @@ -576,26 +647,26 @@ protected String insertSchemaVersion() { CREATE TABLE IF NOT EXISTS schema_versions( version INTEGER PRIMARY KEY, description VARCHAR(512) UNIQUE DEFAULT NULL, + hash VARCHAR(128) NOT NULL, applied_by VARCHAR(128), applied_at BIGINT NOT NULL, consumed_time BIGINT DEFAULT 0, success BOOLEAN NOT NULL) """; - private static final String selectSchemaVersion = """ + private static final String selectSchemaVersions = """ SELECT * FROM schema_versions WHERE success = TRUE - ORDER BY version DESC - LIMIT 1 + ORDER BY version ASC """; private static final String insertSchemaVersionWithQuestionMarks = """ - INSERT INTO schema_versions(version, description, applied_by, applied_at, consumed_time, success) - VALUES(?, ?, ?, ?, ?, ?) + INSERT INTO schema_versions(version, description, hash, applied_by, applied_at, consumed_time, success) + VALUES(?, ?, ?, ?, ?, ?, ?) """; private static final String insertSchemaVersionWithIndexedParameters = """ - INSERT INTO schema_versions(version, description, applied_by, applied_at, consumed_time, success) - VALUES($1, $2, $3, $4, $5, $6) + INSERT INTO schema_versions(version, description, hash, applied_by, applied_at, consumed_time, success) + VALUES($1, $2, $3, $4, $5, $6, $7) """; } \ No newline at end of file diff --git a/api/src/test/java/io/bosonnetwork/database/VersionedSchemaTests.java b/api/src/test/java/io/bosonnetwork/database/VersionedSchemaTests.java index 050ad85..e8abf1b 100644 --- a/api/src/test/java/io/bosonnetwork/database/VersionedSchemaTests.java +++ b/api/src/test/java/io/bosonnetwork/database/VersionedSchemaTests.java @@ -97,10 +97,10 @@ static Stream testDatabaseProvider() { @ParameterizedTest(name = "{0}") @MethodSource("testDatabaseProvider") @Timeout(value = 2, timeUnit = TimeUnit.MINUTES) - void testMigrate(String name, SqlClient client, VertxTestContext context) { + void testMigrate(String name, SqlClient client, Vertx vertx, VertxTestContext context) { Path schemaPath = Path.of(getClass().getResource("/db/schema_test/" + name).getPath()); - VersionedSchema schema = VersionedSchema.init(client, schemaPath); + VersionedSchema schema = VersionedSchema.init(vertx, client, schemaPath); schema.migrate().onComplete(context.succeeding(v -> { context.verify(() -> { var sv = schema.getCurrentVersion(); diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/storage/DatabaseStorage.java b/dht/src/main/java/io/bosonnetwork/kademlia/storage/DatabaseStorage.java index 6638f01..5d12f2b 100644 --- a/dht/src/main/java/io/bosonnetwork/kademlia/storage/DatabaseStorage.java +++ b/dht/src/main/java/io/bosonnetwork/kademlia/storage/DatabaseStorage.java @@ -64,7 +64,7 @@ public Future initialize(Vertx vertx, long valueExpiration, long peerIn this.valueExpiration = valueExpiration; this.peerInfoExpiration = peerInfoExpiration; - VersionedSchema schema = VersionedSchema.init(getClient(), getSchemaPath()); + VersionedSchema schema = VersionedSchema.init(vertx, getClient(), getSchemaPath()); return schema.migrate().andThen(ar -> { if (ar.succeeded()) { schemaVersion = schema.getCurrentVersion().version(); From c5364def6da6d41245392e2d2d2378928a5d13fe Mon Sep 17 00:00:00 2001 From: Jingyu Date: Sun, 4 Jan 2026 22:24:00 +0800 Subject: [PATCH 2/3] Add multi-schema (multi-tenant) support and javadoc improvements to VersionedSchema --- .../database/VersionedSchema.java | 296 ++++++++++++------ .../database/VersionedSchemaTests.java | 40 +++ 2 files changed, 237 insertions(+), 99 deletions(-) diff --git a/api/src/main/java/io/bosonnetwork/database/VersionedSchema.java b/api/src/main/java/io/bosonnetwork/database/VersionedSchema.java index e86e775..5923eb9 100644 --- a/api/src/main/java/io/bosonnetwork/database/VersionedSchema.java +++ b/api/src/main/java/io/bosonnetwork/database/VersionedSchema.java @@ -30,6 +30,7 @@ import java.security.MessageDigest; import java.util.ArrayList; import java.util.List; +import java.util.function.Function; import io.vertx.core.Future; import io.vertx.core.Promise; @@ -40,6 +41,7 @@ import io.vertx.sqlclient.Row; import io.vertx.sqlclient.RowSet; import io.vertx.sqlclient.SqlClient; +import io.vertx.sqlclient.SqlConnection; import io.vertx.sqlclient.Tuple; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,16 +49,36 @@ import io.bosonnetwork.utils.Hex; /** - * Simple, file-based schema migration helper for Vert.x SQL clients. - *

- * Reads migration SQL files from a directory, detects the database flavor, - * and applies pending migrations transactionally, recording versions in a schema_versions table. + * A lightweight, file-based schema migration helper for Vert.x SQL clients. + * + *

This component applies versioned SQL migrations located in a directory, + * records applied versions in a {@code schema_versions} table, and ensures + * migration integrity via SHA-256 checksum validation.

+ * + *

Features: + *

    + *
  • Versioned migrations using {@code <version>_<description>.sql} naming
  • + *
  • Transactional execution of migrations
  • + *
  • Checksum verification to detect script tampering
  • + *
  • Optional PostgreSQL schema isolation via {@code SET search_path}
  • + *
  • Compatible with PostgreSQL and SQLite (Vert.x SQL clients)
  • + *
+ * + *

This class is designed for application-managed schema migrations and + * intentionally avoids external JDBC-based migration frameworks.

+ * + *

Future extensions: + *

    + *
  • Baseline support for existing schemas
  • + *
  • Repair support for checksum mismatch recovery
  • + *
*

*/ public class VersionedSchema implements VertxDatabase { private static final SchemaVersion EMPTY_VERSION = new SchemaVersion(0, "", null,"", 0, 0, true); private final Vertx vertx; private final SqlClient client; + private final String schema; private final Path schemaPath; private String databaseProductName; private SchemaVersion currentVersion; @@ -64,15 +86,15 @@ public class VersionedSchema implements VertxDatabase { private static final Logger log = LoggerFactory.getLogger(VersionedSchema.class); /** - * Immutable record of a migration application state. + * Immutable record representing an applied schema migration. * - * @param version schema version number - * @param description human-readable description - * @param hash SHA-256 hash of the migration script - * @param appliedBy user/process that applied the migration - * @param appliedAt timestamp (ms) when started - * @param consumedTime duration (ms) spent applying - * @param success whether migration succeeded + * @param version numeric migration version + * @param description human-readable description (from file name or SQL comment) + * @param hash SHA-256 checksum of the migration script + * @param appliedBy identifier of the user or process applying the migration + * @param appliedAt timestamp in milliseconds when migration started + * @param consumedTime duration in milliseconds spent applying the migration + * @param success whether the migration completed successfully */ public record SchemaVersion(int version, String description, String hash, String appliedBy, long appliedAt, long consumedTime, boolean success) {} @@ -117,23 +139,43 @@ public Path path() { */ } - private VersionedSchema(Vertx vertx, SqlClient client, Path schemaPath) { + private VersionedSchema(Vertx vertx, SqlClient client, String schema, Path schemaPath) { this.vertx = vertx; this.client = client; + this.schema = schema; this.schemaPath = schemaPath; this.currentVersion = EMPTY_VERSION; } /** - * Initializes a new {@link VersionedSchema} instance. + * Initializes a {@link VersionedSchema} using the database default schema. + * + *

Migrations will be applied using the database's default schema + * (for example {@code public} in PostgreSQL).

* * @param vertx Vert.x instance * @param client Vert.x SQL client * @param schemaPath directory containing migration SQL files - * @return a new versioned schema helper + * @return a new {@link VersionedSchema} instance */ public static VersionedSchema init(Vertx vertx, SqlClient client, Path schemaPath) { - return new VersionedSchema(vertx, client, schemaPath); + return new VersionedSchema(vertx, client, null, schemaPath); + } + + /** + * Initializes a new instance of {@link VersionedSchema}. + * + * @param vertx the Vert.x instance used for database operations and event loops + * @param client the SQL client used for executing migrations + * @param schema the schema name where migrations will be applied + * @param schemaPath the path to the directory containing migration SQL files + * @return a new {@link VersionedSchema} instance configured for the provided parameters + */ + public static VersionedSchema init(Vertx vertx, SqlClient client, String schema, Path schemaPath) { + if (schema != null && !schema.matches("[a-z][a-z0-9_]{0,31}")) + throw new IllegalArgumentException("Invalid schema name"); + + return new VersionedSchema(vertx, client, schema, schemaPath); } /** @@ -156,31 +198,49 @@ public SchemaVersion getCurrentVersion() { } /** - * Discovers and applies pending migrations found under {@code schemaPath}. + * Discovers and applies pending schema migrations. + * + *

The migration process consists of: *

    - *
  1. Ensures the schema_versions table exists.
  2. - *
  3. Reads the latest applied version.
  4. - *
  5. Parses and sorts new migration files.
  6. - *
  7. Applies each migration in order, transactionally.
  8. + *
  9. Detecting the database product
  10. + *
  11. Creating the target schema (PostgreSQL only, if configured)
  12. + *
  13. Ensuring the {@code schema_versions} table exists
  14. + *
  15. Loading applied migration history
  16. + *
  17. Validating migration order and checksums
  18. + *
  19. Applying new migrations transactionally
  20. *
* - * @return a future completed when all pending migrations are applied + *

If an already-applied migration differs in version or checksum, + * the migration process fails immediately.

+ * + * @return a future that completes when all pending migrations have been applied, + * or fails if validation or execution fails */ public Future migrate() { return getDatabaseProductName().compose(name -> { databaseProductName = name; log.debug("Migration check: target database product {}", name); - return query(createSchemaVersionTable()).execute(); - }).compose(v -> { - Future> versionsFuture = getSchemaVersions().andThen(ar -> { - if (ar.succeeded()) { - List versions = ar.result(); - if (!versions.isEmpty()) - this.currentVersion = versions.get(versions.size() - 1); - } else { - log.warn("Migration check: error reading schema versions - {}", ar.cause().getMessage()); - } - }); + + if (!databaseProductName.toLowerCase().contains("postgres") && schema != null) + return Future.failedFuture(new IllegalStateException("Schema migration with custom schema is not supported for " + databaseProductName)); + + return Future.succeededFuture(); + }).compose(na -> { + Future> versionsFuture = withTransaction(c -> + createSchema(c) + .compose(v -> setSchema(c)) + .compose(v -> createSchemaVersionTable(c)) + .compose(v -> getSchemaVersions(c)) + .andThen(ar -> { + if (ar.succeeded()) { + List versions = ar.result(); + if (!versions.isEmpty()) + this.currentVersion = versions.get(versions.size() - 1); + } else { + log.warn("Migration check: error init or reading schema versions - {}", ar.cause().getMessage()); + } + }) + ); Future> migrationsFuture = getMigrations().andThen(ar -> { if (ar.failed()) @@ -233,50 +293,55 @@ public Future migrate() { }); } + private Future withSchemaTransaction(Function> function) { + return withTransaction(c -> setSchema(c).compose(v -> function.apply(c))); + } + /** * Reads the current applied schema versions from the database. * * @return a future with the list of applied {@link SchemaVersion}, empty list if none */ - private Future> getSchemaVersions() { - return query(selectSchemaVersions()) - .execute() - .map(VersionedSchema::mapToSchemaVersions); + private Future> getSchemaVersions(SqlClient client) { + return client.query(selectSchemaVersions()) + .execute() + .map(VersionedSchema::mapToSchemaVersions); } - private static boolean getBoolean(Row row, String columnName) { - Object value = row.getValue(columnName); - return value instanceof Boolean b ? b : - (value instanceof Number n ? n.intValue() != 0 : - (value instanceof String s && Boolean.parseBoolean(s))); + private Future createSchemaVersionTable(SqlClient client) { + return client.query(createSchemaVersion()) + .execute() + .mapEmpty(); } - private static List mapToSchemaVersions(RowSet rowSet) { - if (rowSet.size() == 0) - return List.of(); - - List versions = new ArrayList<>(rowSet.size()); - for (Row row : rowSet) { - int version = row.getInteger("version"); - String description = row.getString("description"); - String hash = row.getString("hash"); - String appliedBy = row.getString("applied_by"); - long appliedAt = row.getLong("applied_at"); - long consumedTime = row.getLong("consumed_time"); - boolean success = getBoolean(row, "success"); - - SchemaVersion v = new SchemaVersion(version, description, hash, appliedBy, appliedAt, consumedTime, success); - versions.add(v); - } + private Future createSchema(SqlClient client) { + if (schema == null) + return Future.succeededFuture(); + else + return client.query("CREATE SCHEMA IF NOT EXISTS " + schema) + .execute() + .mapEmpty(); + } - return versions; + private Future setSchema(SqlClient client) { + if (schema == null) + return Future.succeededFuture(); + else + return client.query("SET search_path TO " + schema/* + ", public" */) + .execute() + .mapEmpty(); } + /** - * Scans {@code schemaPath} and returns all the migrations. - * File names must follow: {@code _.sql}. + * Scans the migration directory and parses all migration scripts. + * + *

Migration files must follow the naming convention + * {@code <version>_<description>.sql}. Files are sorted by + * version in ascending order, and each file is checksummed using SHA-256.

* - * @return a future with the list of migrations, empty list if none + * @return a future containing the ordered list of migrations, + * or an empty list if none are found */ private Future> getMigrations() { if (schemaPath == null) { @@ -335,38 +400,13 @@ private Future> getMigrations() { }); } - private Future sha256(Path path) { - MessageDigest digest; - try { - digest = MessageDigest.getInstance("SHA-256"); - } catch (Exception e) { - return Future.failedFuture(e); - } - - return vertx.fileSystem().open(path.toString(), new OpenOptions().setRead(true).setWrite(false)).compose(file -> { - Promise promise = Promise.promise(); - - file.handler(buffer -> digest.update(buffer.getBytes())) - .exceptionHandler(e -> { - file.close(); - promise.fail(e); - }) - .endHandler(v -> { - String hash = Hex.encode(digest.digest()); - file.close(); - promise.complete(hash); - }); - - return promise.future(); - }); - } - /** - * Build a migration file name into a {@link Migration}. - * Expected format: {@code _.sql} + * Parses a migration file name and computes its checksum. * - * @param file path to the migration file - * @return a future with the parsed migration file + *

Expected file name format: {@code <version>_<description>.sql}

+ * + * @param file path to the migration SQL file + * @return a future containing the parsed {@link Migration} */ private Future buildMigration(Path file) { String fileName = file.getFileName().toString(); @@ -551,16 +591,20 @@ private static boolean startsKeyword(String line, int pos, String keyword) { } /** - * Applies a single migration inside a transaction and persists the new schema version. + * Applies a single migration inside a database transaction. + * + *

The migration SQL file is split into individual statements, which are + * executed sequentially. Upon successful execution, a new entry is recorded + * in the {@code schema_versions} table.

* * @param migration the migration to apply - * @return a future completing with the new {@link SchemaVersion} when done + * @return a future completing with the applied {@link SchemaVersion} */ private Future applyMigration(Migration migration) { log.info("Migration: applying migration version {} from {}...", migration.version, migration.fileName()); long begin = System.currentTimeMillis(); - return withTransaction(connection -> { + return withSchemaTransaction(connection -> { Promise promise = Promise.promise(); Future chain = Future.succeededFuture(); try (BufferedReader reader = new BufferedReader(new FileReader(migration.file()))) { @@ -572,7 +616,7 @@ private Future applyMigration(Migration migration) { while ((statement = nextStatement(reader)) != null) { final String sql = statement; - chain = chain.compose(v -> { + chain = chain.compose(vv -> { log.trace("Migration: executing statement {}", sql); return connection.query(sql).execute() .andThen(ar -> { @@ -585,7 +629,7 @@ private Future applyMigration(Migration migration) { return Future.failedFuture(new IllegalStateException("Failed to read migration file", e)); } - chain.compose(v -> { + chain.compose(vv -> { long duration = System.currentTimeMillis() - begin; log.info("Migration: applied migration file {} in {} ms", migration.fileName(), duration); log.debug("Migration: updating schema version..."); @@ -612,12 +656,66 @@ private Future applyMigration(Migration migration) { }); } + private static List mapToSchemaVersions(RowSet rowSet) { + if (rowSet.size() == 0) + return List.of(); + + List versions = new ArrayList<>(rowSet.size()); + for (Row row : rowSet) { + int version = row.getInteger("version"); + String description = row.getString("description"); + String hash = row.getString("hash"); + String appliedBy = row.getString("applied_by"); + long appliedAt = row.getLong("applied_at"); + long consumedTime = row.getLong("consumed_time"); + boolean success = getBoolean(row, "success"); + + SchemaVersion v = new SchemaVersion(version, description, hash, appliedBy, appliedAt, consumedTime, success); + versions.add(v); + } + + return versions; + } + + private static boolean getBoolean(Row row, String columnName) { + Object value = row.getValue(columnName); + return value instanceof Boolean b ? b : + (value instanceof Number n ? n.intValue() != 0 : + (value instanceof String s && Boolean.parseBoolean(s))); + } + + private Future sha256(Path path) { + MessageDigest digest; + try { + digest = MessageDigest.getInstance("SHA-256"); + } catch (Exception e) { + return Future.failedFuture(e); + } + + return vertx.fileSystem().open(path.toString(), new OpenOptions().setRead(true).setWrite(false)).compose(file -> { + Promise promise = Promise.promise(); + + file.handler(buffer -> digest.update(buffer.getBytes())) + .exceptionHandler(e -> { + file.close(); + promise.fail(e); + }) + .endHandler(v -> { + String hash = Hex.encode(digest.digest()); + file.close(); + promise.complete(hash); + }); + + return promise.future(); + }); + } + /** * Creates schema_versions table if it does not exist. * * @return DDL for creating the schema_versions table if it does not exist */ - protected String createSchemaVersionTable() { + protected String createSchemaVersion() { return createSchemaVersionTable; } diff --git a/api/src/test/java/io/bosonnetwork/database/VersionedSchemaTests.java b/api/src/test/java/io/bosonnetwork/database/VersionedSchemaTests.java index e8abf1b..fe1cf2d 100644 --- a/api/src/test/java/io/bosonnetwork/database/VersionedSchemaTests.java +++ b/api/src/test/java/io/bosonnetwork/database/VersionedSchemaTests.java @@ -20,6 +20,10 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestMethodOrder; import org.junit.jupiter.api.extension.ExtendWith; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; @@ -31,6 +35,7 @@ import io.bosonnetwork.utils.FileUtils; @ExtendWith(VertxExtension.class) +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) public class VersionedSchemaTests { private static final Path testRoot = Path.of(System.getProperty("java.io.tmpdir"), "boson"); private static final Path testDir = Path.of(testRoot.toString(), "utils", "VersionedSchemaTests"); @@ -97,6 +102,7 @@ static Stream testDatabaseProvider() { @ParameterizedTest(name = "{0}") @MethodSource("testDatabaseProvider") @Timeout(value = 2, timeUnit = TimeUnit.MINUTES) + @Order(1) void testMigrate(String name, SqlClient client, Vertx vertx, VertxTestContext context) { Path schemaPath = Path.of(getClass().getResource("/db/schema_test/" + name).getPath()); @@ -111,4 +117,38 @@ void testMigrate(String name, SqlClient client, Vertx vertx, VertxTestContext co context.completeNow(); })); } + + @Test + @Order(2) + void testMigrateWithSchemaFoo(Vertx vertx, VertxTestContext context) { + Path schemaPath = Path.of(getClass().getResource("/db/schema_test/postgres").getPath()); + + VersionedSchema schema = VersionedSchema.init(vertx, postgres, "foo", schemaPath); + schema.migrate().onComplete(context.succeeding(v -> { + context.verify(() -> { + var sv = schema.getCurrentVersion(); + assertEquals(10, sv.version()); + assertEquals("Trigger: log message insertions into audit_log", sv.description()); + }); + + context.completeNow(); + })); + } + + @Test + @Order(3) + void testMigrateWithSchemaBar(Vertx vertx, VertxTestContext context) { + Path schemaPath = Path.of(getClass().getResource("/db/schema_test/postgres").getPath()); + + VersionedSchema schema = VersionedSchema.init(vertx, postgres, "bar", schemaPath); + schema.migrate().onComplete(context.succeeding(v -> { + context.verify(() -> { + var sv = schema.getCurrentVersion(); + assertEquals(10, sv.version()); + assertEquals("Trigger: log message insertions into audit_log", sv.description()); + }); + + context.completeNow(); + })); + } } \ No newline at end of file From e0bbfd0471cc2b2c1add2dd5f8157da47c9d193e Mon Sep 17 00:00:00 2001 From: Jingyu Date: Tue, 6 Jan 2026 22:23:02 +0800 Subject: [PATCH 3/3] Add multi-tenant (schema-based) support to the database layer and Kademlia node --- .../DefaultNodeConfiguration.java | 178 ++++++++----- .../io/bosonnetwork/NodeConfiguration.java | 28 ++- .../database/VersionedSchema.java | 41 ++- .../bosonnetwork/database/VertxDatabase.java | 57 +++-- .../database/VersionedSchemaTests.java | 12 +- .../main/java/io/bosonnetwork/shell/Main.java | 4 +- .../io/bosonnetwork/kademlia/KadNode.java | 12 +- .../impl/SimpleNodeConfiguration.java | 22 +- .../kademlia/storage/DataStorage.java | 6 +- .../kademlia/storage/DatabaseStorage.java | 236 ++++++++++-------- .../kademlia/storage/PostgresStorage.java | 37 ++- .../kademlia/storage/SQLiteStorage.java | 18 +- dht/src/main/resources/node.yaml | 26 +- .../bosonnetwork/kademlia/InstantTests.java | 236 ------------------ .../bosonnetwork/kademlia/NodeAsyncTests.java | 4 +- .../bosonnetwork/kademlia/NodeSyncTests.java | 4 +- .../io/bosonnetwork/kademlia/SybilTests.java | 2 +- .../kademlia/TestNodeLauncher.java | 90 +++++++ dht/src/test/resources/testNode.yaml | 11 + 19 files changed, 533 insertions(+), 491 deletions(-) delete mode 100644 dht/src/test/java/io/bosonnetwork/kademlia/InstantTests.java create mode 100644 dht/src/test/java/io/bosonnetwork/kademlia/TestNodeLauncher.java create mode 100644 dht/src/test/resources/testNode.yaml diff --git a/api/src/main/java/io/bosonnetwork/DefaultNodeConfiguration.java b/api/src/main/java/io/bosonnetwork/DefaultNodeConfiguration.java index fc52039..e4fd2fe 100644 --- a/api/src/main/java/io/bosonnetwork/DefaultNodeConfiguration.java +++ b/api/src/main/java/io/bosonnetwork/DefaultNodeConfiguration.java @@ -36,6 +36,7 @@ import java.util.Collection; import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -62,7 +63,9 @@ public class DefaultNodeConfiguration implements NodeConfiguration { /** * The default port for the DHT node, chosen from the IANA unassigned range (38866-39062). - * See: https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml + * See: + * IANA unassigned range (38866-39062) + * */ private static final int DEFAULT_DHT_PORT = 39001; @@ -72,59 +75,43 @@ public class DefaultNodeConfiguration implements NodeConfiguration { */ private Vertx vertx; - /** - * IPv4 address string for the DHT node. If null or empty, disables DHT on IPv4. - */ + /** IPv4 address string for the DHT node. If null or empty, disables DHT on IPv4. */ private String host4; - /** - * IPv6 address string for the DHT node. If null or empty, disables DHT on IPv6. - */ + /** IPv6 address string for the DHT node. If null or empty, disables DHT on IPv6.*/ private String host6; - /** - * The port number for the DHT node. - */ + /** The port number for the DHT node. */ private int port; - /** - * The node's private key, encoded in Base58. - */ + /** The node's private key, encoded in Base58. */ private Signature.PrivateKey privateKey; - /** - * Path to the directory for persistent DHT data storage. disables persistence if null. - */ + /** Path to the directory for persistent DHT data storage. disables persistence if null. */ private Path dataDir; - /** - * Optional external storage URI for the node. - */ - private String storageURI; + /** Database storage URI for the node. */ + private String databaseUri; - /** - * Set of bootstrap nodes for joining the DHT network. - */ + /** Database connection pool size. */ + private int databasePoolSize; + + /** Database schema name. Available for PostgreSQL only*/ + private String databaseSchemaName; + + /** Set of bootstrap nodes for joining the DHT network. */ private final Set bootstraps; - /** - * Whether spam throttling is enabled for this node. - */ + /** Whether spam throttling is enabled for this node. */ private boolean enableSpamThrottling; - /** - * Whether suspicious node detection is enabled for this node. - */ + /** Whether suspicious node detection is enabled for this node. */ private boolean enableSuspiciousNodeDetector; - /** - * Whether developer mode is enabled for this node. - */ + /** Whether developer mode is enabled for this node. */ private boolean enableDeveloperMode; - /** - * Whether metrics is enabled for this node. - */ + /** Whether metrics is enabled for this node. */ private boolean enableMetrics; /** @@ -134,7 +121,9 @@ public class DefaultNodeConfiguration implements NodeConfiguration { */ private DefaultNodeConfiguration() { this.port = DEFAULT_DHT_PORT; - this.storageURI = "jdbc:sqlite:node.db"; + this.databaseUri = "jdbc:sqlite:node.db"; + this.databasePoolSize = 0; + this.databaseSchemaName = null; this.enableSpamThrottling = true; this.enableSuspiciousNodeDetector = true; this.enableDeveloperMode = false; @@ -212,11 +201,21 @@ public Path dataDir() { /** * {@inheritDoc} - * @return the external storage URL, or null if not set. + * @return the database storage URL, or null if not set. */ @Override - public String storageURI() { - return storageURI; + public String databaseUri() { + return databaseUri; + } + + @Override + public int databasePoolSize() { + return databasePoolSize; + } + + @Override + public String databaseSchemaName() { + return databaseSchemaName; } /** @@ -275,18 +274,20 @@ public boolean enableMetrics() { *
  • {@code port} (Integer, optional) - DHT port (defaults to 39001)
  • *
  • {@code privateKey} (String, required) - Base58 or hex-encoded private key
  • *
  • {@code dataDir} (String, optional) - Path to persistent data directory
  • - *
  • {@code storageURI} (String, required) - Storage URI (defaults to "jdbc:sqlite:node.db")
  • - *
  • {@code bootstraps} (List<List<Object>>, optional) - Bootstrap nodes as [id, host, port] triplets
  • + *
  • {@code databaseUri} (String, required) - Database URI (defaults to "jdbc:sqlite:node.db")
  • + *
  • {@code databasePoolSize} (int, optional) - Database pool size (defaults to 0)
  • + *
  • {@code databaseSchemaName} (String, optional) - Database schema name (defaults to null)
  • + *
  • {@code bootstraps} (List<List<Object>> optional) - Bootstrap nodes as [id, host, port] triplets
  • *
  • {@code enableSpamThrottling} (Boolean, optional) - Enable spam throttling (default: true)
  • *
  • {@code enableSuspiciousNodeDetector} (Boolean, optional) - Enable suspicious node detection (default: true)
  • *
  • {@code enableDeveloperMode} (Boolean, optional) - Enable developer mode (default: false)
  • *
  • {@code enableMetrics} (Boolean, optional) - Enable metrics (default: false)
  • * * - * @param map the map containing configuration data, must not be null or empty + * @param map the map containing configuration data, the map must not be null or empty * @return a new DefaultNodeConfiguration instance - * @throws NullPointerException if map is null - * @throws IllegalArgumentException if map is empty, required fields are missing, or values are invalid + * @throws NullPointerException if the map is null + * @throws IllegalArgumentException if the map is empty, required fields are missing, or values are invalid */ public static DefaultNodeConfiguration fromMap(Map map) { Objects.requireNonNull(map, "map"); @@ -317,9 +318,23 @@ public static DefaultNodeConfiguration fromMap(Map map) { if (dir != null && !dir.isEmpty()) config.dataDir = Path.of(dir); - config.storageURI = m.getString("storageURI", config.storageURI); - if (config.storageURI == null || config.storageURI.isEmpty()) - throw new IllegalArgumentException("Missing storageURI"); + ConfigMap db = m.getObject("database"); + if (db != null && !db.isEmpty()) { + config.databaseUri = db.getString("uri", config.databaseUri); + if (config.databaseUri == null || config.databaseUri.isEmpty()) + throw new IllegalArgumentException("Missing database URI"); + config.databasePoolSize = db.getInteger("poolSize", config.databasePoolSize); + if (config.databasePoolSize < 0) + throw new IllegalArgumentException("Invalid database pool size: " + config.databasePoolSize); + String schemaName = db.getString("schema", config.databaseSchemaName); + if (schemaName != null && !schemaName.isEmpty()) { + if (!schemaName.matches("[a-z][a-z0-9_]{0,31}")) + throw new IllegalArgumentException("Invalid schema name"); + config.databaseSchemaName = schemaName; + } else { + config.databaseSchemaName = null; + } + } List> lst = m.getList("bootstraps"); if (lst != null && !lst.isEmpty()) { @@ -358,7 +373,7 @@ public static DefaultNodeConfiguration fromMap(Map map) { * @return a Map containing the configuration data */ public Map toMap() { - HashMap map = new HashMap<>(); + HashMap map = new LinkedHashMap<>(); if (host4 != null) map.put("host4", host4); @@ -372,7 +387,13 @@ public Map toMap() { if (dataDir != null) map.put("dataDir", dataDir); - map.put("storageURI", storageURI); + HashMap db = new LinkedHashMap<>(); + db.put("uri", databaseUri); + if (databasePoolSize > 0) + db.put("poolSize", databasePoolSize); + if (databaseSchemaName != null) + db.put("schema", databaseSchemaName); + map.put("database", db); if (!bootstraps.isEmpty()) { List> lst = new ArrayList<>(); @@ -427,7 +448,7 @@ private DefaultNodeConfiguration config() { * * @param template the template map containing configuration data, must not be null * @return this Builder for chaining - * @throws NullPointerException if template is null + * @throws NullPointerException if the template is null * @throws IllegalArgumentException if the template is invalid * @see DefaultNodeConfiguration#fromMap(Map) */ @@ -688,16 +709,59 @@ public Path dataDir() { } /** - * Set the external storage URL for the node. - * @param storageURI the storage URL (must not be null) + * Set the database URI for the node. + * @param uri the database URI (must not be null) + * @param poolSize the database connection pool size * @return this Builder for chaining * @throws NullPointerException if storageURI is null + * @throws IllegalArgumentException if the URI is not supported or the pool size is invalid */ - public Builder storageURI(String storageURI) { - Objects.requireNonNull(storageURI, "storageURI"); - if (!storageURI.startsWith("postgresql://") && !storageURI.startsWith("jdbc:sqlite:")) - throw new IllegalArgumentException("Unsupported storage URL: " + storageURI); - config().storageURI = storageURI; + public Builder database(String uri, int poolSize) { + Objects.requireNonNull(uri, "uri"); + if (poolSize < 0) + throw new IllegalArgumentException("Invalid pool size: " + poolSize); + + if (!uri.startsWith("postgresql://") && !uri.startsWith("jdbc:sqlite:")) + throw new IllegalArgumentException("Unsupported storage URL: " + uri); + config().databaseUri = uri; + config().databasePoolSize = poolSize; + return this; + } + + /** + * Set the database URI for the node. + * @param uri the database URI (must not be null) + * @return this Builder for chaining + * @throws NullPointerException if storageURI is null + * @throws IllegalArgumentException if the URI is not supported or the pool size is invalid + */ + public Builder database(String uri) { + return database(uri, 0); + } + + /** + * Sets the database schema name to be used. The schema name must start with a + * lowercase letter and may contain lowercase letters, digits, and underscores + * with a maximum length of 32 characters. If the provided schema is null or + * empty, the schema name will be set to null. + *

    + * NOTICE: the schema only available to PostgreSQL databases. + * It will be ignored for SQLite databases. + * + * @param schema the name of the database schema + * @return the builder instance for method chaining + * @throws IllegalArgumentException if the schema name does not match the + * required pattern or exceeds the maximum length + */ + public Builder databaseSchemaName(String schema) { + if (schema == null || schema.isEmpty()) { + config().databaseSchemaName = null; + } else { + if (!schema.matches("[a-z][a-z0-9_]{0,31}")) + throw new IllegalArgumentException("Invalid schema name"); + config().databaseSchemaName = schema; + } + return this; } diff --git a/api/src/main/java/io/bosonnetwork/NodeConfiguration.java b/api/src/main/java/io/bosonnetwork/NodeConfiguration.java index 9cafe7e..933d8a5 100644 --- a/api/src/main/java/io/bosonnetwork/NodeConfiguration.java +++ b/api/src/main/java/io/bosonnetwork/NodeConfiguration.java @@ -120,11 +120,33 @@ default Path dataDir() { } /** - * Provides the URL for external storage used by the DHT node. + * Provides the URL for database storage used by the DHT node. * - * @return the external storage URL as a string, or {@code null} if not configured. + * @return the external database URL as a string, or {@code null} if not configured. */ - default String storageURI() { + default String databaseUri() { + return "jdbc:sqlite:node.db"; + } + + /** + * Provides the configured size of the database connection pool. + * + * This value determines the maximum number of database connections that can be + * simultaneously maintained by the application for performing database operations. + * + * @return the size of the database connection pool, or {@code 0} if no specific pool size is defined. + */ + default int databasePoolSize() { + return 0; + } + + /** + * Returns the database schema name. + * This typically corresponds to a namespace, such as the PostgreSQL search path. + * + * @return the name of the database schema as a string, or {@code null} if no schema is specified. + */ + default String databaseSchemaName() { return null; } diff --git a/api/src/main/java/io/bosonnetwork/database/VersionedSchema.java b/api/src/main/java/io/bosonnetwork/database/VersionedSchema.java index 5923eb9..e02d37e 100644 --- a/api/src/main/java/io/bosonnetwork/database/VersionedSchema.java +++ b/api/src/main/java/io/bosonnetwork/database/VersionedSchema.java @@ -54,8 +54,8 @@ *

    This component applies versioned SQL migrations located in a directory, * records applied versions in a {@code schema_versions} table, and ensures * migration integrity via SHA-256 checksum validation.

    - * - *

    Features: + *

    + * Features: *

      *
    • Versioned migrations using {@code <version>_<description>.sql} naming
    • *
    • Transactional execution of migrations
    • @@ -63,23 +63,22 @@ *
    • Optional PostgreSQL schema isolation via {@code SET search_path}
    • *
    • Compatible with PostgreSQL and SQLite (Vert.x SQL clients)
    • *
    - * - *

    This class is designed for application-managed schema migrations and - * intentionally avoids external JDBC-based migration frameworks.

    - * - *

    Future extensions: + *

    + * This class is designed for application-managed schema migrations and + * intentionally avoids external JDBC-based migration frameworks. + *

    + * Future extensions: *

      *
    • Baseline support for existing schemas
    • *
    • Repair support for checksum mismatch recovery
    • *
    - *

    */ public class VersionedSchema implements VertxDatabase { private static final SchemaVersion EMPTY_VERSION = new SchemaVersion(0, "", null,"", 0, 0, true); private final Vertx vertx; private final SqlClient client; private final String schema; - private final Path schemaPath; + private final Path migrationPath; private String databaseProductName; private SchemaVersion currentVersion; @@ -139,11 +138,11 @@ public Path path() { */ } - private VersionedSchema(Vertx vertx, SqlClient client, String schema, Path schemaPath) { + private VersionedSchema(Vertx vertx, SqlClient client, String schema, Path migrationPath) { this.vertx = vertx; this.client = client; this.schema = schema; - this.schemaPath = schemaPath; + this.migrationPath = migrationPath; this.currentVersion = EMPTY_VERSION; } @@ -155,11 +154,11 @@ private VersionedSchema(Vertx vertx, SqlClient client, String schema, Path schem * * @param vertx Vert.x instance * @param client Vert.x SQL client - * @param schemaPath directory containing migration SQL files + * @param migrationPath directory containing migration SQL files * @return a new {@link VersionedSchema} instance */ - public static VersionedSchema init(Vertx vertx, SqlClient client, Path schemaPath) { - return new VersionedSchema(vertx, client, null, schemaPath); + public static VersionedSchema init(Vertx vertx, SqlClient client, Path migrationPath) { + return new VersionedSchema(vertx, client, null, migrationPath); } /** @@ -168,14 +167,14 @@ public static VersionedSchema init(Vertx vertx, SqlClient client, Path schemaPat * @param vertx the Vert.x instance used for database operations and event loops * @param client the SQL client used for executing migrations * @param schema the schema name where migrations will be applied - * @param schemaPath the path to the directory containing migration SQL files + * @param migrationPath the path to the directory containing migration SQL files * @return a new {@link VersionedSchema} instance configured for the provided parameters */ - public static VersionedSchema init(Vertx vertx, SqlClient client, String schema, Path schemaPath) { + public static VersionedSchema init(Vertx vertx, SqlClient client, String schema, Path migrationPath) { if (schema != null && !schema.matches("[a-z][a-z0-9_]{0,31}")) throw new IllegalArgumentException("Invalid schema name"); - return new VersionedSchema(vertx, client, schema, schemaPath); + return new VersionedSchema(vertx, client, schema, migrationPath); } /** @@ -327,7 +326,7 @@ private Future setSchema(SqlClient client) { if (schema == null) return Future.succeededFuture(); else - return client.query("SET search_path TO " + schema/* + ", public" */) + return client.query("SET search_path TO " + schema) .execute() .mapEmpty(); } @@ -344,15 +343,15 @@ private Future setSchema(SqlClient client) { * or an empty list if none are found */ private Future> getMigrations() { - if (schemaPath == null) { + if (migrationPath == null) { log.warn("Migration check: skipping, no schema migration path set"); return Future.succeededFuture(List.of()); } - log.info("Migration check: checking for new migrations from {} ...", schemaPath); + log.info("Migration check: checking for new migrations from {} ...", migrationPath); FileSystem fs = vertx.fileSystem(); - return fs.readDir(schemaPath.toString()).compose(files -> { + return fs.readDir(migrationPath.toString()).compose(files -> { List migrations = new ArrayList<>(); Future future = Future.succeededFuture(); diff --git a/api/src/main/java/io/bosonnetwork/database/VertxDatabase.java b/api/src/main/java/io/bosonnetwork/database/VertxDatabase.java index 555661f..ddd760d 100644 --- a/api/src/main/java/io/bosonnetwork/database/VertxDatabase.java +++ b/api/src/main/java/io/bosonnetwork/database/VertxDatabase.java @@ -23,12 +23,11 @@ package io.bosonnetwork.database; import java.util.List; +import java.util.function.BiFunction; import java.util.function.Function; import io.vertx.core.Future; import io.vertx.sqlclient.Pool; -import io.vertx.sqlclient.PreparedQuery; -import io.vertx.sqlclient.Query; import io.vertx.sqlclient.Row; import io.vertx.sqlclient.RowSet; import io.vertx.sqlclient.SqlClient; @@ -72,23 +71,37 @@ default Future getDatabaseProductName() { } /** - * Creates a simple text query using the underlying client. + * Performs per-connection initialization before executing any SQL. + *

    + * This method is invoked every time a {@link SqlConnection} is acquired + * (both for pooled and non-pooled clients), before any queries or + * transactions are executed. + *

    + * Typical use cases include: + *

      + *
    • Setting PostgreSQL {@code search_path}
    • + *
    • Configuring session variables
    • + *
    • Applying tenant- or schema-specific settings
    • + *
    + *

    + * If this method fails, the associated operation or transaction will fail. + * Implementations should not suppress errors. + *

    + * Implementations must be idempotent. * - * @param sql SQL text to execute - * @return a Vert.x {@link Query} for the provided SQL + * @param connection the connection to prepare + * @return a future completing when initialization is complete */ - default Query> query(String sql) { - return getClient().query(sql); + default Future prepareConnection(SqlConnection connection) { + return Future.succeededFuture(); } - /** - * Creates a prepared query using the underlying client. - * - * @param sql SQL text with placeholders - * @return a Vert.x {@link PreparedQuery} for the provided SQL - */ - default PreparedQuery> preparedQuery(String sql) { - return getClient().preparedQuery(sql); + private BiFunction> wrapped(Function function) { + return (c, t) -> prepareConnection(c).map(v -> function.apply(t)); + } + + private Function> wrappedAsync(Function> function) { + return c -> prepareConnection(c).compose(v -> function.apply(c)); } /** @@ -105,9 +118,9 @@ default PreparedQuery> preparedQuery(String sql) { */ default Future withTransaction(Function> function) { if (getClient() instanceof Pool p) { - return p.withTransaction(function); - } else if (getClient() instanceof SqlConnection c) { - return withTransaction(c, function); + return p.withTransaction(c -> wrappedAsync(function).apply(c)); + } else if (getClient() instanceof SqlConnection connection) { + return withTransaction(connection, c -> wrappedAsync(function).apply(c)); } else { return Future.failedFuture(new IllegalStateException("Client must be an instance of SqlConnection or Pool")); } @@ -141,11 +154,9 @@ private Future withTransaction(SqlConnection connection, Function Future withConnection(Function> function) { if (getClient() instanceof SqlConnection c) { - return function.apply(c); + return wrappedAsync(function).apply(c); } else if (getClient() instanceof Pool p) { - return p.getConnection().compose(c -> - function.apply(c).onComplete(ar -> c.close()) - ); + return p.withConnection(c -> wrappedAsync(function).apply(c)); } else { return Future.failedFuture(new IllegalStateException("Client must be an instance of SqlConnection or Pool")); } @@ -264,7 +275,7 @@ default List findMany(RowSet rowSet, Function mapper) { * @param result the SQL result to check * @return true if at least one row was affected, false otherwise */ - default boolean hasEffectedRows(SqlResult result) { + default boolean hasAffectedRows(SqlResult result) { return result.rowCount() > 0; } diff --git a/api/src/test/java/io/bosonnetwork/database/VersionedSchemaTests.java b/api/src/test/java/io/bosonnetwork/database/VersionedSchemaTests.java index fe1cf2d..de7329f 100644 --- a/api/src/test/java/io/bosonnetwork/database/VersionedSchemaTests.java +++ b/api/src/test/java/io/bosonnetwork/database/VersionedSchemaTests.java @@ -104,9 +104,9 @@ static Stream testDatabaseProvider() { @Timeout(value = 2, timeUnit = TimeUnit.MINUTES) @Order(1) void testMigrate(String name, SqlClient client, Vertx vertx, VertxTestContext context) { - Path schemaPath = Path.of(getClass().getResource("/db/schema_test/" + name).getPath()); + Path migrationPath = Path.of(getClass().getResource("/db/schema_test/" + name).getPath()); - VersionedSchema schema = VersionedSchema.init(vertx, client, schemaPath); + VersionedSchema schema = VersionedSchema.init(vertx, client, migrationPath); schema.migrate().onComplete(context.succeeding(v -> { context.verify(() -> { var sv = schema.getCurrentVersion(); @@ -121,9 +121,9 @@ void testMigrate(String name, SqlClient client, Vertx vertx, VertxTestContext co @Test @Order(2) void testMigrateWithSchemaFoo(Vertx vertx, VertxTestContext context) { - Path schemaPath = Path.of(getClass().getResource("/db/schema_test/postgres").getPath()); + Path migrationPath = Path.of(getClass().getResource("/db/schema_test/postgres").getPath()); - VersionedSchema schema = VersionedSchema.init(vertx, postgres, "foo", schemaPath); + VersionedSchema schema = VersionedSchema.init(vertx, postgres, "foo", migrationPath); schema.migrate().onComplete(context.succeeding(v -> { context.verify(() -> { var sv = schema.getCurrentVersion(); @@ -138,9 +138,9 @@ void testMigrateWithSchemaFoo(Vertx vertx, VertxTestContext context) { @Test @Order(3) void testMigrateWithSchemaBar(Vertx vertx, VertxTestContext context) { - Path schemaPath = Path.of(getClass().getResource("/db/schema_test/postgres").getPath()); + Path migrationPath = Path.of(getClass().getResource("/db/schema_test/postgres").getPath()); - VersionedSchema schema = VersionedSchema.init(vertx, postgres, "bar", schemaPath); + VersionedSchema schema = VersionedSchema.init(vertx, postgres, "bar", migrationPath); schema.migrate().onComplete(context.succeeding(v -> { context.verify(() -> { var sv = schema.getCurrentVersion(); diff --git a/cmds/src/main/java/io/bosonnetwork/shell/Main.java b/cmds/src/main/java/io/bosonnetwork/shell/Main.java index d73005c..939b742 100644 --- a/cmds/src/main/java/io/bosonnetwork/shell/Main.java +++ b/cmds/src/main/java/io/bosonnetwork/shell/Main.java @@ -232,10 +232,10 @@ private void parseArgs() throws IOException { } if (storageURL != null) { - builder.storageURI(storageURL); + builder.database(storageURL); } else { if (builder.hasDataDir()) - builder.storageURI("jdbc:sqlite:" + builder.dataDir().resolve("node.db")); + builder.database("jdbc:sqlite:" + builder.dataDir().resolve("node.db")); } if (!builder.hasPrivateKey()) diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/KadNode.java b/dht/src/main/java/io/bosonnetwork/kademlia/KadNode.java index 5f40eb5..431621b 100644 --- a/dht/src/main/java/io/bosonnetwork/kademlia/KadNode.java +++ b/dht/src/main/java/io/bosonnetwork/kademlia/KadNode.java @@ -133,11 +133,11 @@ private void checkConfig(NodeConfiguration config) { } } - if (config.storageURI() != null) { - if (!DataStorage.supports(config.storageURI())) - throw new IllegalArgumentException("unsupported storage URL: " + config.storageURI()); + if (config.databaseUri() != null) { + if (!DataStorage.supports(config.databaseUri())) + throw new IllegalArgumentException("unsupported storage URL: " + config.databaseUri()); } else { - log.warn("No storage URL is configured, in-memory storage is used"); + throw new IllegalArgumentException("No database is configured"); } } @@ -229,14 +229,14 @@ public void prepare(Vertx vertx, Context context) { public Future deploy() { tokenManager = new TokenManager(); - String storageURI = config.storageURI(); + String storageURI = config.databaseUri(); // fix the sqlite database file location if (storageURI.startsWith("jdbc:sqlite:")) { Path dbFile = Path.of(storageURI.substring("jdbc:sqlite:".length())); if (!dbFile.isAbsolute()) storageURI = "jdbc:sqlite:" + config.dataDir().resolve(dbFile).toAbsolutePath(); } - storage = DataStorage.create(storageURI); + storage = DataStorage.create(storageURI, config.databasePoolSize(), config.databaseSchemaName()); // TODO: empty blacklist for now blacklist = Blacklist.empty(); diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/impl/SimpleNodeConfiguration.java b/dht/src/main/java/io/bosonnetwork/kademlia/impl/SimpleNodeConfiguration.java index 3f04a98..6dd6544 100644 --- a/dht/src/main/java/io/bosonnetwork/kademlia/impl/SimpleNodeConfiguration.java +++ b/dht/src/main/java/io/bosonnetwork/kademlia/impl/SimpleNodeConfiguration.java @@ -23,7 +23,9 @@ public class SimpleNodeConfiguration implements NodeConfiguration { private final int port; private final Signature.PrivateKey privateKey; private final Path dataDir; - private final String storageURL; + private final String databaseUri; + private final int databasePoolSize; + private final String databaseSchemaName; private final ArrayList bootstrapNodes; private final boolean enableSpamThrottling; private final boolean enableSuspiciousNodeDetector; @@ -37,7 +39,9 @@ public SimpleNodeConfiguration(NodeConfiguration config) { this.privateKey = config.privateKey(); this.dataDir = config.dataDir() != null ? config.dataDir().toAbsolutePath() : Path.of(System.getProperty("user.dir")).resolve("node"); - this.storageURL = config.storageURI() != null ? config.storageURI() : InMemoryStorage.STORAGE_URI; + this.databaseUri = config.databaseUri() != null ? config.databaseUri() : InMemoryStorage.STORAGE_URI; + this.databasePoolSize = config.databasePoolSize(); + this.databaseSchemaName = config.databaseSchemaName(); this.bootstrapNodes = new ArrayList<>(config.bootstrapNodes() != null ? config.bootstrapNodes() : Collections.emptyList()); this.enableSpamThrottling = config.enableSpamThrottling(); this.enableSuspiciousNodeDetector = config.enableSuspiciousNodeDetector(); @@ -95,8 +99,18 @@ public Path dataDir() { } @Override - public String storageURI() { - return storageURL; + public String databaseUri() { + return databaseUri; + } + + @Override + public int databasePoolSize() { + return databasePoolSize; + } + + @Override + public String databaseSchemaName() { + return databaseSchemaName; } @Override diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/storage/DataStorage.java b/dht/src/main/java/io/bosonnetwork/kademlia/storage/DataStorage.java index 1b53787..61fe856 100644 --- a/dht/src/main/java/io/bosonnetwork/kademlia/storage/DataStorage.java +++ b/dht/src/main/java/io/bosonnetwork/kademlia/storage/DataStorage.java @@ -272,15 +272,15 @@ static boolean supports(String uri) { uri.startsWith(PostgresStorage.STORAGE_URI_PREFIX); } - static DataStorage create(String uri) { + static DataStorage create(String uri, int poolSize, String schema) { Objects.requireNonNull(uri, "url"); if (uri.equals(InMemoryStorage.STORAGE_URI)) return new InMemoryStorage(); if (uri.startsWith(SQLiteStorage.STORAGE_URI_PREFIX)) - return new SQLiteStorage(uri); + return new SQLiteStorage(uri, poolSize); if (uri.startsWith(PostgresStorage.STORAGE_URI_PREFIX)) - return new PostgresStorage(uri); + return new PostgresStorage(uri, poolSize, schema); throw new IllegalArgumentException("Unsupported storage: " + uri); } diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/storage/DatabaseStorage.java b/dht/src/main/java/io/bosonnetwork/kademlia/storage/DatabaseStorage.java index 5d12f2b..914d870 100644 --- a/dht/src/main/java/io/bosonnetwork/kademlia/storage/DatabaseStorage.java +++ b/dht/src/main/java/io/bosonnetwork/kademlia/storage/DatabaseStorage.java @@ -53,7 +53,11 @@ public abstract class DatabaseStorage implements DataStorage, VertxDatabase { protected abstract void init(Vertx vertx); - protected abstract Path getSchemaPath(); + protected abstract Path getMigrationPath(); + + protected String getSchema() { + return null; + } protected abstract SqlDialect getDialect(); @@ -64,7 +68,7 @@ public Future initialize(Vertx vertx, long valueExpiration, long peerIn this.valueExpiration = valueExpiration; this.peerInfoExpiration = peerInfoExpiration; - VersionedSchema schema = VersionedSchema.init(vertx, getClient(), getSchemaPath()); + VersionedSchema schema = VersionedSchema.init(vertx, getClient(), getSchema(), getMigrationPath()); return schema.migrate().andThen(ar -> { if (ar.succeeded()) { schemaVersion = schema.getCurrentVersion().version(); @@ -165,66 +169,72 @@ public Future putValue(Value value, boolean persistent, int expectedSeque @Override public Future getValue(Id id) { getLogger().debug("Getting value with id: {}", id); - return SqlTemplate.forQuery(getClient(), getDialect().selectValueById()) - .execute(Map.of("id", id.bytes())) - .map(rows -> findUnique(rows, DatabaseStorage::rowToValue)) - .andThen(ar -> { - if (ar.succeeded()) { - if (ar.result() != null) - getLogger().debug("Got value with id: {}", id); - else - //noinspection LoggingSimilarMessage - getLogger().debug("No value found with id: {}", id); - } else { - getLogger().error("Failed to get value with id: {}", id, ar.cause()); - } - }).recover(cause -> - Future.failedFuture(new DataStorageException("Database operation failed", cause)) - ); + return withConnection(c -> + SqlTemplate.forQuery(c, getDialect().selectValueById()) + .execute(Map.of("id", id.bytes())) + .map(rows -> findUnique(rows, DatabaseStorage::rowToValue)) + .andThen(ar -> { + if (ar.succeeded()) { + if (ar.result() != null) + getLogger().debug("Got value with id: {}", id); + else + //noinspection LoggingSimilarMessage + getLogger().debug("No value found with id: {}", id); + } else { + getLogger().error("Failed to get value with id: {}", id, ar.cause()); + } + }) + ).recover(cause -> + Future.failedFuture(new DataStorageException("Database operation failed", cause)) + ); } @Override public Future> getValues() { - return query(getDialect().selectAllValues()) - .execute() - .map(rows -> findMany(rows, DatabaseStorage::rowToValue)) - .recover(cause -> - Future.failedFuture(new DataStorageException("Database operation failed", cause)) - ); + return withConnection(c -> + c.query(getDialect().selectAllValues()) + .execute() + .map(rows -> findMany(rows, DatabaseStorage::rowToValue)) + ).recover(cause -> + Future.failedFuture(new DataStorageException("Database operation failed", cause)) + ); } @Override public Future> getValues(int offset, int limit) { - return SqlTemplate.forQuery(getClient(), getDialect().selectAllValuesPaginated()) - .execute(Map.of("limit", limit, "offset", offset)) - .map(rows -> findMany(rows, DatabaseStorage::rowToValue)) - .recover(cause -> - Future.failedFuture(new DataStorageException("Database operation failed", cause)) - ); + return withConnection(c -> + SqlTemplate.forQuery(c, getDialect().selectAllValuesPaginated()) + .execute(Map.of("limit", limit, "offset", offset)) + .map(rows -> findMany(rows, DatabaseStorage::rowToValue)) + ).recover(cause -> + Future.failedFuture(new DataStorageException("Database operation failed", cause)) + ); } @Override public Future> getValues(boolean persistent, long announcedBefore) { - return SqlTemplate.forQuery(getClient(), getDialect().selectValuesByPersistentAndAnnouncedBefore()) - .execute(Map.of("persistent", persistent, "updatedBefore", announcedBefore)) - .map(rows -> findMany(rows, DatabaseStorage::rowToValue)) - .recover(cause -> - Future.failedFuture(new DataStorageException("Database operation failed", cause)) - ); + return withConnection(c -> + SqlTemplate.forQuery(c, getDialect().selectValuesByPersistentAndAnnouncedBefore()) + .execute(Map.of("persistent", persistent, "updatedBefore", announcedBefore)) + .map(rows -> findMany(rows, DatabaseStorage::rowToValue)) + ).recover(cause -> + Future.failedFuture(new DataStorageException("Database operation failed", cause)) + ); } @Override public Future> getValues(boolean persistent, long announcedBefore, int offset, int limit) { - return SqlTemplate.forQuery(getClient(), getDialect().selectValuesByPersistentAndAnnouncedBeforePaginated()) - .execute(Map.of( - "persistent", persistent, - "updatedBefore", announcedBefore, - "limit", limit, - "offset", offset)) - .map(rows -> findMany(rows, DatabaseStorage::rowToValue)) - .recover(cause -> - Future.failedFuture(new DataStorageException("Database operation failed", cause)) - ); + return withConnection(c -> + SqlTemplate.forQuery(c, getDialect().selectValuesByPersistentAndAnnouncedBeforePaginated()) + .execute(Map.of( + "persistent", persistent, + "updatedBefore", announcedBefore, + "limit", limit, + "offset", offset)) + .map(rows -> findMany(rows, DatabaseStorage::rowToValue)) + ).recover(cause -> + Future.failedFuture(new DataStorageException("Database operation failed", cause)) + ); } @Override @@ -256,7 +266,7 @@ public Future removeValue(Id id) { return withTransaction(c -> SqlTemplate.forUpdate(c, getDialect().deleteValueById()) .execute(Map.of("id", id.bytes())) - .map(this::hasEffectedRows) + .map(this::hasAffectedRows) ).andThen(ar -> { if (ar.succeeded()) { if (ar.result()) @@ -326,87 +336,95 @@ public Future> putPeers(List peerInfos) { @Override public Future getPeer(Id id, Id nodeId) { getLogger().debug("Getting peer with id: {} @ {}", id, nodeId); - return SqlTemplate.forQuery(getClient(), getDialect().selectPeerByIdAndNodeId()) - .execute(Map.of("id", id.bytes(), "nodeId", nodeId.bytes())) - .map(rows -> findUnique(rows, DatabaseStorage::rowToPeer)) - .andThen(ar -> { - if (ar.succeeded()) { - if (ar.result() != null) - getLogger().debug("Got peer with id: {} @ {}", id, nodeId); - else - //noinspection LoggingSimilarMessage - getLogger().debug("No peer found with id: {} @ {}", id, nodeId); - } else { - getLogger().error("Failed to get peer with id: {} @ {}", id, nodeId, ar.cause()); - } - }).recover(cause -> - Future.failedFuture(new DataStorageException("Database operation failed", cause)) - ); + return withConnection(c -> + SqlTemplate.forQuery(c, getDialect().selectPeerByIdAndNodeId()) + .execute(Map.of("id", id.bytes(), "nodeId", nodeId.bytes())) + .map(rows -> findUnique(rows, DatabaseStorage::rowToPeer)) + .andThen(ar -> { + if (ar.succeeded()) { + if (ar.result() != null) + getLogger().debug("Got peer with id: {} @ {}", id, nodeId); + else + //noinspection LoggingSimilarMessage + getLogger().debug("No peer found with id: {} @ {}", id, nodeId); + } else { + getLogger().error("Failed to get peer with id: {} @ {}", id, nodeId, ar.cause()); + } + }) + ).recover(cause -> + Future.failedFuture(new DataStorageException("Database operation failed", cause)) + ); } @Override public Future> getPeers(Id id) { getLogger().debug("Getting peers with id: {}", id); - return SqlTemplate.forQuery(getClient(), getDialect().selectPeersById()) - .execute(Map.of("id", id.bytes())) - .map(rows -> findMany(rows, DatabaseStorage::rowToPeer)) - .andThen(ar -> { - if (ar.succeeded()) { - if (!ar.result().isEmpty()) - getLogger().debug("Got peers with id: {}", id); - else - //noinspection LoggingSimilarMessage - getLogger().debug("No peers found with id: {}", id); - } else { - getLogger().error("Failed to get peers with id: {}", id, ar.cause()); - } - }).recover(cause -> - Future.failedFuture(new DataStorageException("Database operation failed", cause)) - ); + return withConnection(c -> + SqlTemplate.forQuery(c, getDialect().selectPeersById()) + .execute(Map.of("id", id.bytes())) + .map(rows -> findMany(rows, DatabaseStorage::rowToPeer)) + .andThen(ar -> { + if (ar.succeeded()) { + if (!ar.result().isEmpty()) + getLogger().debug("Got peers with id: {}", id); + else + //noinspection LoggingSimilarMessage + getLogger().debug("No peers found with id: {}", id); + } else { + getLogger().error("Failed to get peers with id: {}", id, ar.cause()); + } + }) + ).recover(cause -> + Future.failedFuture(new DataStorageException("Database operation failed", cause)) + ); } @Override public Future> getPeers() { - return query(getDialect().selectAllPeers()) - .execute() - .map(rows -> findMany(rows, DatabaseStorage::rowToPeer)) - .recover(cause -> - Future.failedFuture(new DataStorageException("Database operation failed", cause)) - ); + return withConnection(c -> + c.query(getDialect().selectAllPeers()) + .execute() + .map(rows -> findMany(rows, DatabaseStorage::rowToPeer)) + ).recover(cause -> + Future.failedFuture(new DataStorageException("Database operation failed", cause)) + ); } @Override public Future> getPeers(int offset, int limit) { - return SqlTemplate.forQuery(getClient(), getDialect().selectAllPeersPaginated()) - .execute(Map.of("limit", limit, "offset", offset)) - .map(rows -> findMany(rows, DatabaseStorage::rowToPeer)) - .recover(cause -> - Future.failedFuture(new DataStorageException("Database operation failed", cause)) - ); + return withConnection(c -> + SqlTemplate.forQuery(c, getDialect().selectAllPeersPaginated()) + .execute(Map.of("limit", limit, "offset", offset)) + .map(rows -> findMany(rows, DatabaseStorage::rowToPeer)) + ).recover(cause -> + Future.failedFuture(new DataStorageException("Database operation failed", cause)) + ); } @Override public Future> getPeers(boolean persistent, long announcedBefore) { - return SqlTemplate.forQuery(getClient(), getDialect().selectPeersByPersistentAndAnnouncedBefore()) - .execute(Map.of("persistent", persistent, "updatedBefore", announcedBefore)) - .map(rows -> findMany(rows, DatabaseStorage::rowToPeer)) - .recover(cause -> - Future.failedFuture(new DataStorageException("Database operation failed", cause)) - ); + return withConnection(c -> + SqlTemplate.forQuery(c, getDialect().selectPeersByPersistentAndAnnouncedBefore()) + .execute(Map.of("persistent", persistent, "updatedBefore", announcedBefore)) + .map(rows -> findMany(rows, DatabaseStorage::rowToPeer)) + ).recover(cause -> + Future.failedFuture(new DataStorageException("Database operation failed", cause)) + ); } @Override public Future> getPeers(boolean persistent, long announcedBefore, int offset, int limit) { - return SqlTemplate.forQuery(getClient(), getDialect().selectPeersByPersistentAndAnnouncedBeforePaginated()) - .execute(Map.of( - "persistent", persistent, - "updatedBefore", announcedBefore, - "limit", limit, - "offset", offset)) - .map(rows -> findMany(rows, DatabaseStorage::rowToPeer)) - .recover(cause -> - Future.failedFuture(new DataStorageException("Database operation failed", cause)) - ); + return withConnection(c -> + SqlTemplate.forQuery(c, getDialect().selectPeersByPersistentAndAnnouncedBeforePaginated()) + .execute(Map.of( + "persistent", persistent, + "updatedBefore", announcedBefore, + "limit", limit, + "offset", offset)) + .map(rows -> findMany(rows, DatabaseStorage::rowToPeer)) + ).recover(cause -> + Future.failedFuture(new DataStorageException("Database operation failed", cause)) + ); } @Override @@ -437,7 +455,7 @@ public Future removePeer(Id id, Id nodeId) { return withTransaction(c -> SqlTemplate.forUpdate(c, getDialect().deletePeerByIdAndNodeId()) .execute(Map.of("id", id.bytes(), "nodeId", nodeId.bytes())) - .map(this::hasEffectedRows) + .map(this::hasAffectedRows) ).andThen(ar -> { if (ar.succeeded()) { if (ar.result()) @@ -459,7 +477,7 @@ public Future removePeers(Id id) { return withTransaction(c -> SqlTemplate.forUpdate(c, getDialect().deletePeersById()) .execute(Map.of("id", id.bytes())) - .map(this::hasEffectedRows) + .map(this::hasAffectedRows) ).andThen(ar -> { if (ar.succeeded()) { if (ar.result()) diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/storage/PostgresStorage.java b/dht/src/main/java/io/bosonnetwork/kademlia/storage/PostgresStorage.java index 6dca088..9421a26 100644 --- a/dht/src/main/java/io/bosonnetwork/kademlia/storage/PostgresStorage.java +++ b/dht/src/main/java/io/bosonnetwork/kademlia/storage/PostgresStorage.java @@ -25,12 +25,14 @@ import java.net.URL; import java.nio.file.Path; +import io.vertx.core.Future; import io.vertx.core.Vertx; import io.vertx.pgclient.PgBuilder; import io.vertx.pgclient.PgConnectOptions; import io.vertx.sqlclient.Pool; import io.vertx.sqlclient.PoolOptions; import io.vertx.sqlclient.SqlClient; +import io.vertx.sqlclient.SqlConnection; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,20 +40,28 @@ public class PostgresStorage extends DatabaseStorage implements DataStorage { protected static final String STORAGE_URI_PREFIX = "postgresql://"; private final String connectionUri; + private final int poolSize; + private final String schema; private Pool client; private SqlDialect sqlDialect; private static final Logger log = LoggerFactory.getLogger(PostgresStorage.class); - protected PostgresStorage(String connectionUri) { + protected PostgresStorage(String connectionUri, int poolSize, String schema) { this.connectionUri = connectionUri; + this.poolSize = poolSize > 0 ? poolSize : 8; + this.schema = schema; + } + + protected PostgresStorage(String connectionUri) { + this(connectionUri, 0, null); } // postgresql://[user[:password]@][host][:port][,...][/dbname][?param1=value1&...] @Override protected void init(Vertx vertx) { PgConnectOptions connectOptions = PgConnectOptions.fromUri(connectionUri); - PoolOptions poolOptions = new PoolOptions().setMaxSize(8); + PoolOptions poolOptions = new PoolOptions().setMaxSize(poolSize); client = PgBuilder.pool() .with(poolOptions) .connectingTo(connectOptions) @@ -61,12 +71,17 @@ protected void init(Vertx vertx) { } @Override - protected Path getSchemaPath() { - URL schemaPath = getClass().getResource("/db/kadnode/postgres"); - if (schemaPath == null || schemaPath.getPath() == null) + protected Path getMigrationPath() { + URL migrationResource = getClass().getResource("/db/kadnode/postgres"); + if (migrationResource == null || migrationResource.getPath() == null) throw new IllegalStateException("Migration path not exists"); - return Path.of(schemaPath.getPath()); + return Path.of(migrationResource.getPath()); + } + + @Override + protected String getSchema() { + return schema; } @Override @@ -74,6 +89,16 @@ public SqlClient getClient() { return client; } + @Override + public Future prepareConnection(SqlConnection connection) { + if (schema == null) + return Future.succeededFuture(); + else + return connection.query("SET search_path TO " + schema) + .execute() + .mapEmpty(); + } + @Override protected SqlDialect getDialect() { return sqlDialect; diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/storage/SQLiteStorage.java b/dht/src/main/java/io/bosonnetwork/kademlia/storage/SQLiteStorage.java index 1b2f2a7..55e6313 100644 --- a/dht/src/main/java/io/bosonnetwork/kademlia/storage/SQLiteStorage.java +++ b/dht/src/main/java/io/bosonnetwork/kademlia/storage/SQLiteStorage.java @@ -38,13 +38,19 @@ public class SQLiteStorage extends DatabaseStorage implements DataStorage { protected static final String STORAGE_URI_PREFIX = "jdbc:sqlite:"; private final String connectionUri; + private final int poolSize; private Pool client; private SqlDialect sqlDialect; private static final Logger log = LoggerFactory.getLogger(SQLiteStorage.class); - protected SQLiteStorage(String connectionUri) { + protected SQLiteStorage(String connectionUri, int poolSize) { this.connectionUri = connectionUri; + this.poolSize = poolSize > 0 ? poolSize : 1; + } + + protected SQLiteStorage(String connectionUri) { + this(connectionUri, 0); } @Override @@ -60,7 +66,7 @@ protected void init(Vertx vertx) { dataSource.setFullSync(true); // Single connection recommended for SQLite - PoolOptions poolOptions = new PoolOptions().setMaxSize(1); + PoolOptions poolOptions = new PoolOptions().setMaxSize(poolSize); client = JDBCPool.pool(vertx, dataSource, poolOptions); sqlDialect = new SqlDialect() {}; @@ -73,12 +79,12 @@ protected void init(Vertx vertx) { } @Override - protected Path getSchemaPath() { - URL schemaPath = getClass().getResource("/db/kadnode/sqlite"); - if (schemaPath == null || schemaPath.getPath() == null) + protected Path getMigrationPath() { + URL migrationResource = getClass().getResource("/db/kadnode/sqlite"); + if (migrationResource == null || migrationResource.getPath() == null) throw new IllegalStateException("Migration path not exists"); - return Path.of(schemaPath.getPath()); + return Path.of(migrationResource.getPath()); } @Override diff --git a/dht/src/main/resources/node.yaml b/dht/src/main/resources/node.yaml index f759105..d93a40d 100644 --- a/dht/src/main/resources/node.yaml +++ b/dht/src/main/resources/node.yaml @@ -62,14 +62,32 @@ privateKey: 0x751a9612f9bd80e6e37a77a704dc2a99dbfb162c35cb138ca46eaacd656de9bedf # - Windows: %ProgramData%\boson\node dataDir: ~/.local/share/boson/node -# Storage backend used by the node. -# Supported values: +# Database configuration. +# +# Supported database URIs: # - PostgreSQL: postgresql://user:password@host:port/database # - SQLite: jdbc:sqlite:/path/to/sqlite.db # # For lightweight or embedded deployments, SQLite is recommended. -# For super node deployments, prefer PostgreSQL. -storageURL: jdbc:sqlite:node.db +# For super node or high-concurrency deployments, prefer PostgreSQL. +# +# Configuration fields: +# uri: +# Database connection URI. +# +# poolSize: +# Size of the database connection pool. +# A value of 0 means "use the database implementation default". +# +# schema: +# Database schema (namespace) name. +# This option is only applicable to PostgreSQL. +# For other databases (e.g., SQLite), this field is ignored. +# +database: + uri: jdbc:sqlite:node.db + # poolSize: 0 + # schema: kademlia # Bootstrap nodes used when joining the DHT. # Each bootstrap node is a node info triple: diff --git a/dht/src/test/java/io/bosonnetwork/kademlia/InstantTests.java b/dht/src/test/java/io/bosonnetwork/kademlia/InstantTests.java deleted file mode 100644 index 830d1e8..0000000 --- a/dht/src/test/java/io/bosonnetwork/kademlia/InstantTests.java +++ /dev/null @@ -1,236 +0,0 @@ -package io.bosonnetwork.kademlia; - -import java.util.concurrent.TimeUnit; - -import io.micrometer.core.instrument.DistributionSummary; -import io.micrometer.core.instrument.MeterRegistry; -import io.micrometer.core.instrument.Timer; -import io.vertx.core.AbstractVerticle; -import io.vertx.core.DeploymentOptions; -import io.vertx.core.Promise; -import io.vertx.core.Vertx; -import io.vertx.core.VertxOptions; -import io.vertx.core.buffer.Buffer; -import io.vertx.core.datagram.DatagramSocket; -import io.vertx.core.datagram.DatagramSocketOptions; -import io.vertx.core.http.HttpServerOptions; -import io.vertx.core.net.SocketAddress; -import io.vertx.micrometer.MicrometerMetricsOptions; -import io.vertx.micrometer.VertxPrometheusOptions; -import io.vertx.micrometer.backends.BackendRegistries; -import org.slf4j.Logger; - -import io.bosonnetwork.crypto.Random; - -public class InstantTests { - private static final int CLIENT_INSTANCES = 30; - private static final int TOTAL_MESSAGES = 10000; - - static class EchoServer extends AbstractVerticle { - private static final int PORT = 1234; - private static final String HOST = "0.0.0.0"; - - private static final Logger log = org.slf4j.LoggerFactory.getLogger(EchoServer.class); - - private DatagramSocket socket; - - private MeterRegistry registry; - private Timer sendTimer; - private DistributionSummary packetSizeSummary; - - private int count = 0; - private long begin = 0; - private long end = 0; - - @Override - public void start(Promise startPromise) throws Exception { - registry = BackendRegistries.getDefaultNow(); - sendTimer = Timer.builder("boson_datagram_send_time") - .description("Time to send a packet") - .tag("module", "DHT") - .publishPercentiles(0.5, 0.95, 0.99) - .register(registry); - packetSizeSummary = DistributionSummary.builder("boson_datagram_packet_size_bytes") - .description("Size of sent packets") - .tag("module", "DHT") - .publishPercentiles(0.5, 0.95, 0.99) - .register(registry); - - socket = vertx.createDatagramSocket(new DatagramSocketOptions() - .setSendBufferSize(1024 * 1024) - .setReceiveBufferSize(1024 * 1024) - .setTrafficClass(0x10)); - - // Set up the packet handler - socket.handler(packet -> { - if (count == 0) - begin = System.currentTimeMillis(); - - SocketAddress sender = packet.sender(); - echo(packet.data(), packet.sender()); - }); - - socket.exceptionHandler(e -> log.error("Socket exception", e)); - - // Bind the socket to the specified host and port - socket.listen(PORT, HOST).onComplete(ar -> { - if (ar.succeeded()) { - log.info("UDP Echo Server listening on {}:{}", HOST, PORT); - startPromise.complete(); - } else { - log.error("Failed to bind server on {}:{}", HOST, PORT, ar.cause()); - startPromise.fail(ar.cause()); - } - }); - } - - @Override - public void stop(Promise stopPromise) throws Exception { - socket.close().onComplete(ar -> { - log.info("UDP Echo Server stopped"); - stopPromise.complete(); - }); - } - - private void echo(Buffer data, SocketAddress addr) { - context.runOnContext(v -> { - long startTime = System.nanoTime(); - packetSizeSummary.record(data.length()); - - socket.send(data, addr.port(), addr.host()).onComplete(ar -> { - sendTimer.record(System.nanoTime() - startTime, TimeUnit.NANOSECONDS); - - if (ar.succeeded()) { - ++count; - //log.info("Echoed packet to {}, total echoed: {}", addr, count); - //if (count % 1000 == 0) - // log.info("Echoed {} packets", count); - - if (count == CLIENT_INSTANCES * TOTAL_MESSAGES) { - end = System.currentTimeMillis(); - System.out.println(">>>>>>>>>>>>>>>> " + (end - begin) + " ms"); - } - } else { - log.error("Failed to send packet to {}", addr, ar.cause()); - } - }); - }); - } - - public static void main(String[] args) { - VertxOptions options = new VertxOptions().setMetricsOptions( - new MicrometerMetricsOptions() - .setPrometheusOptions(new VertxPrometheusOptions() - .setEnabled(true) - //.setPublishQuantiles(true) - .setStartEmbeddedServer(true) - .setEmbeddedServerOptions(new HttpServerOptions().setPort(8080)) - .setEmbeddedServerEndpoint("/metrics")) - .setEnabled(true)); - - Vertx vertx = Vertx.vertx(options); - vertx.deployVerticle(new EchoServer()).onComplete(ar -> { - if (ar.succeeded()) { - log.info("Echo server deployed successfully"); - } else { - log.error("Failed to deploy Echo server", ar.cause()); - } - }); - } - } - - public static class EchoClient extends AbstractVerticle { - private static final int SEND_DELAY_MS = 1; - - private static final String SERVER_HOST = "127.0.0.1"; - private static final int SERVER_PORT = 1234; - private static final Logger log = org.slf4j.LoggerFactory.getLogger(EchoClient.class); - - private DatagramSocket socket; - private int totalSent = 0; - private int totalReceived = 0; - - public EchoClient() { - } - - @Override - public void start(Promise startPromise) throws Exception { - socket = vertx.createDatagramSocket(new DatagramSocketOptions() - .setSendBufferSize(1024 * 1024) - .setReceiveBufferSize(1024 * 1024)); - - socket.handler(packet -> { - ++totalReceived; - //log.info("Received response from {}, total received: {}/{}", packet.sender(), totalReceived, TOTAL_MESSAGES); - if (totalReceived == TOTAL_MESSAGES) { - log.info("Finished receiving messages! Total received {} messages", totalReceived); - undeployIfFinished(); - } - }); - - socket.exceptionHandler(e -> log.error("Socket exception", e)); - - log.info("UDP Echo client {} started", deploymentID()); - - context.runOnContext(this::sendMessage); - startPromise.complete(); - } - - @Override - public void stop(Promise stopPromise) throws Exception { - socket.close().onComplete(ar -> { - vertx.close(); - log.info("UDP Echo client stopped"); - stopPromise.complete(); - }); - } - - private void sendMessage(Void arg) { - byte[] message = Random.randomBytes(Random.random().nextInt(32, 1024)); - - socket.send(Buffer.buffer(message), SERVER_PORT, SERVER_HOST).onComplete(ar -> { - if (ar.succeeded()) { - ++totalSent; - //log.info("Message sent successfully to server {}/{}", totalSent, TOTAL_MESSAGES); - - if (totalSent < TOTAL_MESSAGES) { - vertx.setTimer(SEND_DELAY_MS, id -> context.runOnContext(this::sendMessage)); - } else { - log.info("Finished sending messages! Total sent {} messages", totalSent); - } - } else { - log.error("Failed to send message", ar.cause()); - } - }); - } - - private void undeployIfFinished() { - vertx.sharedData().getLocalCounter("ECHO_CLIENT_FINISHED").onSuccess(counter -> { - counter.incrementAndGet().onSuccess(v -> { - if (v == CLIENT_INSTANCES) - vertx.undeploy(deploymentID()); - }).onFailure(e -> { - log.error("Failed to increment counter", e); - }); - }).onFailure(e -> { - log.error("Failed to get shared counter", e); - }); - } - - public static void main(String[] args) { - Vertx vertx = Vertx.vertx(); - - DeploymentOptions options = new DeploymentOptions() - .setInstances(CLIENT_INSTANCES); - - vertx.deployVerticle(EchoClient.class, options).onComplete(ar -> { - if (ar.succeeded()) { - log.info("Echo client[{} instances] deployed successfully", CLIENT_INSTANCES); - } else { - log.error("Failed to deploy Echo client", ar.cause()); - vertx.close(); - } - }); - } - } -} \ No newline at end of file diff --git a/dht/src/test/java/io/bosonnetwork/kademlia/NodeAsyncTests.java b/dht/src/test/java/io/bosonnetwork/kademlia/NodeAsyncTests.java index 78f8834..18ae575 100644 --- a/dht/src/test/java/io/bosonnetwork/kademlia/NodeAsyncTests.java +++ b/dht/src/test/java/io/bosonnetwork/kademlia/NodeAsyncTests.java @@ -75,7 +75,7 @@ private static VertxFuture startBootstrap() { .address4(localAddr) .port(TEST_NODES_PORT_START - 1) .dataDir(testDir.resolve("nodes" + File.separator + "node-bootstrap")) - .storageURI("jdbc:sqlite:" + testDir.resolve("nodes" + File.separator + "node-bootstrap" + File.separator + "storage.db")) + .database("jdbc:sqlite:" + testDir.resolve("nodes" + File.separator + "node-bootstrap" + File.separator + "storage.db")) .enableDeveloperMode() .build(); @@ -128,7 +128,7 @@ private static VertxFuture createTestNode(int index) { .address4(localAddr) .port(TEST_NODES_PORT_START + index) .dataDir(testDir.resolve("nodes" + File.separator + "node-" + index)) - .storageURI("jdbc:sqlite:" + testDir.resolve("nodes" + File.separator + "node-" + index + File.separator + "storage.db")) + .database("jdbc:sqlite:" + testDir.resolve("nodes" + File.separator + "node-" + index + File.separator + "storage.db")) .addBootstrap(bootstrap.getNodeInfo().getV4()) .enableDeveloperMode() .build(); diff --git a/dht/src/test/java/io/bosonnetwork/kademlia/NodeSyncTests.java b/dht/src/test/java/io/bosonnetwork/kademlia/NodeSyncTests.java index e36f235..bbdcc48 100644 --- a/dht/src/test/java/io/bosonnetwork/kademlia/NodeSyncTests.java +++ b/dht/src/test/java/io/bosonnetwork/kademlia/NodeSyncTests.java @@ -59,7 +59,7 @@ private static void startBootstrap() throws Exception { .address4(localAddr) .port(TEST_NODES_PORT_START - 1) .dataDir(testDir.resolve("nodes" + File.separator + "node-bootstrap")) - .storageURI("jdbc:sqlite:" + testDir.resolve("nodes" + File.separator + "node-bootstrap" + File.separator + "storage.db")) + .database("jdbc:sqlite:" + testDir.resolve("nodes" + File.separator + "node-bootstrap" + File.separator + "storage.db")) .enableDeveloperMode() .build(); @@ -81,7 +81,7 @@ private static void startTestNodes() throws Exception { .address4(localAddr) .port(TEST_NODES_PORT_START + i) .dataDir(testDir.resolve("nodes" + File.separator + "node-" + i)) - .storageURI("jdbc:sqlite:" + testDir.resolve("nodes" + File.separator + "node-" + i + File.separator + "storage.db")) + .database("jdbc:sqlite:" + testDir.resolve("nodes" + File.separator + "node-" + i + File.separator + "storage.db")) .addBootstrap(bootstrap.getNodeInfo().getV4()) .enableDeveloperMode() .build(); diff --git a/dht/src/test/java/io/bosonnetwork/kademlia/SybilTests.java b/dht/src/test/java/io/bosonnetwork/kademlia/SybilTests.java index 2fd6afa..a9a6641 100644 --- a/dht/src/test/java/io/bosonnetwork/kademlia/SybilTests.java +++ b/dht/src/test/java/io/bosonnetwork/kademlia/SybilTests.java @@ -80,7 +80,7 @@ void setUp() throws Exception { .port(39001) .generatePrivateKey() .dataDir(testDir.resolve("nodes" + File.separator + "node-target")) - .storageURI("jdbc:sqlite:" + testDir.resolve("nodes" + File.separator + "node-target" + File.separator + "storage.db")) + .database("jdbc:sqlite:" + testDir.resolve("nodes" + File.separator + "node-target" + File.separator + "storage.db")) .enableDeveloperMode() .build()); target.start().get(); diff --git a/dht/src/test/java/io/bosonnetwork/kademlia/TestNodeLauncher.java b/dht/src/test/java/io/bosonnetwork/kademlia/TestNodeLauncher.java new file mode 100644 index 0000000..2a63e0b --- /dev/null +++ b/dht/src/test/java/io/bosonnetwork/kademlia/TestNodeLauncher.java @@ -0,0 +1,90 @@ +package io.bosonnetwork.kademlia; + +import java.io.InputStream; +import java.net.Inet4Address; +import java.nio.file.Path; +import java.util.Map; +import java.util.Objects; + +import io.vertx.core.Vertx; +import io.vertx.core.VertxOptions; + +import io.bosonnetwork.ConnectionStatusListener; +import io.bosonnetwork.DefaultNodeConfiguration; +import io.bosonnetwork.Network; +import io.bosonnetwork.Node; +import io.bosonnetwork.NodeConfiguration; +import io.bosonnetwork.crypto.Signature; +import io.bosonnetwork.utils.AddressUtils; +import io.bosonnetwork.utils.Json; + +public class TestNodeLauncher { + private static final Path dataPath = Path.of(System.getProperty("java.io.tmpdir"), "boson", "KademliaNode"); + private static Vertx vertx; + private static Signature.KeyPair nodeKey; + private static Node node; + + private static NodeConfiguration loadConfiguration() throws Exception { + try (InputStream s = TestNodeLauncher.class.getResourceAsStream("/testNode.yaml")) { + Map map = Json.yamlMapper().readValue(s, Json.mapType()); + // fix the host + if (map.containsKey("host4")) + map.put("host4", Objects.requireNonNull(AddressUtils.getDefaultRouteAddress(Inet4Address.class)).getHostAddress()); + + // fix the dataDir + map.put("dataDir", dataPath.toAbsolutePath().toString()); + + return DefaultNodeConfiguration.fromMap(map); + } catch (Exception e) { + System.err.println("Failed to load configuration file: " + e.getMessage()); + throw e; + } + } + + public static void main(String[] args) { + Runtime.getRuntime().addShutdownHook(new Thread(() -> { + if (node != null) { + System.out.println("Shutting down the Boson Kademlia node ..."); + node.stop().thenRun(() -> + System.out.println("Boson node stopped.") + ).join(); + + // Cannot chain vertx.close() to the above future because closing Vert.x will terminate its event loop, + // preventing any pending future handlers from executing. + System.out.print("Shutting down Vert.x gracefully..."); + vertx.close().toCompletionStage().toCompletableFuture().join(); + System.out.println("Done!"); + } + })); + + vertx = Vertx.vertx(new VertxOptions() + .setWorkerPoolSize(4) + .setEventLoopPoolSize(4) + .setPreferNativeTransport(true)); + + try { + NodeConfiguration config = loadConfiguration(); + node = Node.kadNode(config); + node.addConnectionStatusListener(new ConnectionStatusListener() { + @Override + public void connected(Network network) { + System.out.println("Kademlia node connected to " + network); + } + + @Override + public void disconnected(Network network) { + System.out.println("Kademlia node disconnected from " + network); + } + }); + + System.out.println("Starting the Boson Kademlia node ..."); + node.start().thenRun(() -> { + System.out.printf("Started the Boson Kademlia node %s at %s:%d\n", + node.getId(), config.host4(), config.port()); + }).join(); + } catch (Exception e) { + e.printStackTrace(System.err); + vertx.close(); + } + } +} \ No newline at end of file diff --git a/dht/src/test/resources/testNode.yaml b/dht/src/test/resources/testNode.yaml new file mode 100644 index 0000000..b63bfef --- /dev/null +++ b/dht/src/test/resources/testNode.yaml @@ -0,0 +1,11 @@ +host4: 192.168.8.10 +port: 39001 +# node id: GMDVFJ5zdvS88Do5TbPAHnMHMsZt282A84KjfptVXQAb +privateKey: 253vmGfhqrrGrzqzxu9NBPGgZFA1iHqyeEJMMZem7ebBn8nApYgFX8diYpLFdb34vdPMutt1eAeW2tvWEWwJH9nP +dataDir: . +database: + uri: jdbc:sqlite:node.db +enableSpamThrottling: true +enableSuspiciousNodeDetector: true +enableDeveloperMode: true +enableMetrics: false \ No newline at end of file