From 8cf8cdf06637b0592fa0b371193daeaded126737 Mon Sep 17 00:00:00 2001
From: Jingyu
Date: Sat, 11 Oct 2025 16:37:42 +0800
Subject: [PATCH 1/4] rename PeerInfo.alternativeURL to alternativeURI to
support various protocol schemes
---
.../main/java/io/bosonnetwork/PeerInfo.java | 130 +++++++++---------
.../main/java/io/bosonnetwork/utils/Json.java | 18 +--
.../io/bosonnetwork/utils/JsonPerfTests.java | 18 +--
.../io/bosonnetwork/kademlia/KadNode.java | 7 +-
.../protocol/AnnouncePeerRequest.java | 10 +-
.../kademlia/storage/DatabaseStorage.java | 10 +-
.../kademlia/storage/PostgresStorage.java | 6 +-
.../kademlia/storage/SQLiteStorage.java | 6 +-
8 files changed, 101 insertions(+), 104 deletions(-)
diff --git a/api/src/main/java/io/bosonnetwork/PeerInfo.java b/api/src/main/java/io/bosonnetwork/PeerInfo.java
index 08a7b49..8bb0503 100644
--- a/api/src/main/java/io/bosonnetwork/PeerInfo.java
+++ b/api/src/main/java/io/bosonnetwork/PeerInfo.java
@@ -51,9 +51,9 @@ public class PeerInfo {
private final Id publicKey; // Peer ID
private final byte[] privateKey; // Private key to sign the peer info
private final Id nodeId; // The node that provide the service peer
- private final Id origin; // The node that announce the peer
+ private final Id origin; // The node that announces the peer
private final int port;
- private final String alternativeURL;
+ private final String alternativeURI;
private final byte[] signature;
/**
@@ -63,14 +63,14 @@ public class PeerInfo {
* @param privateKey the private key associated with the peer; should be of length {@link Signature.PrivateKey#BYTES}, or null if not provided
* @param nodeId the node identifier of the peer; must not be null
* @param origin the origin identifier of the peer; can be null or the same as nodeId
- * @param port the port number associated with the peer; must be greater than 0 and less than or equal to 65535
- * @param alternativeURL an optional alternative URL associated with the peer; may be null or an empty string
+ * @param port the port number associated with the peer; must be greater than 0 and less than or equal to 65,535
+ * @param alternativeURI an optional alternative URL associated with the peer; may be null or an empty string
* @param signature the signature associated with the peer; must not be null and should be of length {@link Signature#BYTES}
* @throws IllegalArgumentException if peerId is null, the privateKey length is invalid, nodeId is null,
* the port is out of the valid range, or the signature is invalid
*/
private PeerInfo(Id peerId, byte[] privateKey, Id nodeId, Id origin, int port,
- String alternativeURL, byte[] signature) {
+ String alternativeURI, byte[] signature) {
if (peerId == null)
throw new IllegalArgumentException("Invalid peer id");
@@ -91,10 +91,10 @@ private PeerInfo(Id peerId, byte[] privateKey, Id nodeId, Id origin, int port,
this.nodeId = nodeId;
this.origin = origin == null || origin.equals(nodeId) ? null : origin;
this.port = port;
- if (alternativeURL != null && !alternativeURL.isEmpty())
- this.alternativeURL = Normalizer.normalize(alternativeURL, Normalizer.Form.NFC);
+ if (alternativeURI != null && !alternativeURI.isEmpty())
+ this.alternativeURI = Normalizer.normalize(alternativeURI, Normalizer.Form.NFC);
else
- this.alternativeURL = null;
+ this.alternativeURI = null;
this.signature = signature;
}
@@ -105,11 +105,11 @@ private PeerInfo(Id peerId, byte[] privateKey, Id nodeId, Id origin, int port,
* @param nodeId The unique identifier for the node.
* @param origin The origin identifier associated with the peer.
* @param port The port number used by the peer.
- * @param alternativeURL An alternative URL for accessing the peer.
+ * @param alternativeURI An alternative URI for accessing the peer.
* @param signature A byte array representing the signature for security validation.
*/
- protected PeerInfo(Id peerId, Id nodeId, Id origin, int port, String alternativeURL, byte[] signature) {
- this(peerId, null, nodeId, origin, port, alternativeURL, signature);
+ protected PeerInfo(Id peerId, Id nodeId, Id origin, int port, String alternativeURI, byte[] signature) {
+ this(peerId, null, nodeId, origin, port, alternativeURI, signature);
}
/**
@@ -119,10 +119,10 @@ protected PeerInfo(Id peerId, Id nodeId, Id origin, int port, String alternative
* @param nodeId the unique identifier of the node; must not be null
* @param origin the origin node's identifier; can be null or equal to nodeId
* @param port the port number for the peer; must be between 1 and 65535
- * @param alternativeURL an optional alternative URL for the peer; can be null or empty
+ * @param alternativeURI an optional alternative URI for the peer; can be null or empty
* @throws IllegalArgumentException if the keypair is null, nodeId is null, or port is invalid
*/
- private PeerInfo(Signature.KeyPair keypair, Id nodeId, Id origin, int port, String alternativeURL) {
+ private PeerInfo(Signature.KeyPair keypair, Id nodeId, Id origin, int port, String alternativeURI) {
if (keypair == null)
throw new IllegalArgumentException("Invalid keypair");
@@ -137,10 +137,10 @@ private PeerInfo(Signature.KeyPair keypair, Id nodeId, Id origin, int port, Stri
this.nodeId = nodeId;
this.origin = origin == null || origin.equals(nodeId) ? null : origin;
this.port = port;
- if (alternativeURL != null && !alternativeURL.isEmpty())
- this.alternativeURL = Normalizer.normalize(alternativeURL, Normalizer.Form.NFC);
+ if (alternativeURI != null && !alternativeURI.isEmpty())
+ this.alternativeURI = Normalizer.normalize(alternativeURI, Normalizer.Form.NFC);
else
- this.alternativeURL = null;
+ this.alternativeURI = null;
this.signature = Signature.sign(getSignData(), keypair.privateKey());
}
@@ -177,12 +177,12 @@ public static PeerInfo of(Id peerId, byte[] privateKey, Id nodeId, int port, byt
* @param peerId the peer ID.
* @param nodeId the ID of the node providing the service peer.
* @param port the port on which the peer is available.
- * @param alternativeURL an alternative URL for the peer.
+ * @param alternativeURI an alternative URI for the peer.
* @param signature the signature of the peer info.
* @return a created PeerInfo object.
*/
- public static PeerInfo of(Id peerId, Id nodeId, int port, String alternativeURL, byte[] signature) {
- return new PeerInfo(peerId, null, nodeId, null, port, alternativeURL, signature);
+ public static PeerInfo of(Id peerId, Id nodeId, int port, String alternativeURI, byte[] signature) {
+ return new PeerInfo(peerId, null, nodeId, null, port, alternativeURI, signature);
}
/**
@@ -192,13 +192,13 @@ public static PeerInfo of(Id peerId, Id nodeId, int port, String alternativeURL,
* @param privateKey the private key associated with the peer.
* @param nodeId the ID of the node providing the service peer.
* @param port the port on which the peer is available.
- * @param alternativeURL an alternative URL for the peer.
+ * @param alternativeURI an alternative URI for the peer.
* @param signature the signature of the peer info.
* @return a created PeerInfo object.
*/
public static PeerInfo of(Id peerId, byte[] privateKey, Id nodeId, int port,
- String alternativeURL, byte[] signature) {
- return new PeerInfo(peerId, privateKey, nodeId, null, port, alternativeURL, signature);
+ String alternativeURI, byte[] signature) {
+ return new PeerInfo(peerId, privateKey, nodeId, null, port, alternativeURI, signature);
}
/**
@@ -237,12 +237,12 @@ public static PeerInfo of(Id peerId, byte[] privateKey, Id nodeId, Id origin, i
* @param nodeId the ID of the node providing the service peer.
* @param origin the node that announces the peer.
* @param port the port on which the peer is available.
- * @param alternativeURL an alternative URL for the peer.
+ * @param alternativeURI an alternative URI for the peer.
* @param signature the signature of the peer info.
* @return a created PeerInfo object.
*/
- public static PeerInfo of(Id peerId, Id nodeId, Id origin, int port, String alternativeURL, byte[] signature) {
- return new PeerInfo(peerId, null, nodeId, origin, port, alternativeURL, signature);
+ public static PeerInfo of(Id peerId, Id nodeId, Id origin, int port, String alternativeURI, byte[] signature) {
+ return new PeerInfo(peerId, null, nodeId, origin, port, alternativeURI, signature);
}
/**
@@ -253,18 +253,18 @@ public static PeerInfo of(Id peerId, Id nodeId, Id origin, int port, String alte
* @param nodeId the ID of the node providing the service peer.
* @param origin the node that announces the peer.
* @param port the port on which the peer is available.
- * @param alternativeURL an alternative URL for the peer.
+ * @param alternativeURI an alternative URI for the peer.
* @param signature the signature of the peer info.
* @return a created PeerInfo object.
*/
public static PeerInfo of(Id peerId, byte[] privateKey, Id nodeId, Id origin, int port,
- String alternativeURL, byte[] signature) {
- return new PeerInfo(peerId, privateKey, nodeId, origin, port, alternativeURL, signature);
+ String alternativeURI, byte[] signature) {
+ return new PeerInfo(peerId, privateKey, nodeId, origin, port, alternativeURI, signature);
}
/**
- * Creates a PeerInfo object with specified information. the new created PeerInfo will
- * be signed by a new generated random key pair.
+ * Creates a PeerInfo object with specified information, newly created
+ * PeerInfo will be signed by a new generated random key pair.
*
* @param nodeId the ID of the node providing the service peer.
* @param port the port on which the peer is available.
@@ -287,8 +287,8 @@ public static PeerInfo create(Signature.KeyPair keypair, Id nodeId, int port) {
}
/**
- * Creates a PeerInfo object with specified information. the new created PeerInfo will
- * be signed by a new generated random key pair.
+ * Creates a PeerInfo object with specified information, newly created
+ * PeerInfo will be signed by a new generated random key pair.
*
* @param nodeId the ID of the node providing the service peer.
* @param origin the node that announces the peer.
@@ -313,16 +313,16 @@ public static PeerInfo create(Signature.KeyPair keypair, Id nodeId, Id origin, i
}
/**
- * Creates a PeerInfo object with specified information. the new created PeerInfo will
- * be signed by a new generated random key pair.
+ * Creates a PeerInfo object with specified information, newly created
+ * PeerInfo will be signed by a new generated random key pair.
*
* @param nodeId the ID of the node providing the service peer.
* @param port the port on which the peer is available.
- * @param alternativeURL an alternative URL for the peer.
+ * @param alternativeURI an alternative URI for the peer.
* @return a created PeerInfo object.
*/
- public static PeerInfo create(Id nodeId, int port, String alternativeURL) {
- return create(null, nodeId, null, port, alternativeURL);
+ public static PeerInfo create(Id nodeId, int port, String alternativeURI) {
+ return create(null, nodeId, null, port, alternativeURI);
}
/**
@@ -331,25 +331,25 @@ public static PeerInfo create(Id nodeId, int port, String alternativeURL) {
* @param keypair the key pair key to sign the peer information.
* @param nodeId the ID of the node providing the service peer.
* @param port the port on which the peer is available.
- * @param alternativeURL an alternative URL for the peer.
+ * @param alternativeURI an alternative URI for the peer.
* @return a created PeerInfo object.
*/
- public static PeerInfo create(Signature.KeyPair keypair, Id nodeId, int port, String alternativeURL) {
- return create(keypair, nodeId, null, port, alternativeURL);
+ public static PeerInfo create(Signature.KeyPair keypair, Id nodeId, int port, String alternativeURI) {
+ return create(keypair, nodeId, null, port, alternativeURI);
}
/**
- * Creates a PeerInfo object with specified information. the new created PeerInfo will
+ * Creates a PeerInfo object with specified information, newly created PeerInfo will
* be signed by a new generated random key pair.
*
* @param nodeId the ID of the node providing the service peer.
* @param origin the node that announces the peer.
* @param port the port on which the peer is available.
- * @param alternativeURL an alternative URL for the peer.
+ * @param alternativeURI an alternative URI for the peer.
* @return a created PeerInfo object.
*/
- public static PeerInfo create(Id nodeId, Id origin, int port, String alternativeURL) {
- return create(null, nodeId, origin, port, alternativeURL);
+ public static PeerInfo create(Id nodeId, Id origin, int port, String alternativeURI) {
+ return create(null, nodeId, origin, port, alternativeURI);
}
/**
@@ -359,15 +359,15 @@ public static PeerInfo create(Id nodeId, Id origin, int port, String alternative
* @param nodeId the ID of the node providing the service peer.
* @param origin the node that announces the peer.
* @param port the port on which the peer is available.
- * @param alternativeURL an alternative URL for the peer.
+ * @param alternativeURI an alternative URI for the peer.
* @return a created PeerInfo object.
*/
public static PeerInfo create(Signature.KeyPair keypair, Id nodeId, Id origin,
- int port, String alternativeURL) {
+ int port, String alternativeURI) {
if (keypair == null)
keypair = Signature.KeyPair.random();
- return new PeerInfo(keypair, nodeId, origin, port, alternativeURL);
+ return new PeerInfo(keypair, nodeId, origin, port, alternativeURI);
}
/**
@@ -434,21 +434,21 @@ public int getPort() {
}
/**
- * Gets the alternative URL for the peer.
+ * Gets the alternative URI for the peer.
*
- * @return The alternative URL.
+ * @return The alternative URI.
*/
- public String getAlternativeURL() {
- return alternativeURL;
+ public String getAlternativeURI() {
+ return alternativeURI;
}
/**
- * Checks if the peer has an alternative URL.
+ * Checks if the peer has an alternative URI.
*
- * @return {@code true} if the peer has an alternative URL, {@code false} otherwise.
+ * @return {@code true} if the peer has an alternative URI, {@code false} otherwise.
*/
- public boolean hasAlternativeURL() {
- return alternativeURL != null && !alternativeURL.isEmpty();
+ public boolean hasAlternativeURI() {
+ return alternativeURI != null && !alternativeURI.isEmpty();
}
/**
@@ -462,9 +462,9 @@ public byte[] getSignature() {
private byte[] getSignData() {
// TODO: optimize with incremental digest, and return sha256 hash as sign input
- /*
- byte[] alt = alternativeURL == null || alternativeURL.isEmpty() ?
- null : alternativeURL.getBytes(UTF_8);
+ /*/
+ byte[] alt = alternativeURI == null || alternativeURI.isEmpty() ?
+ null : alternativeURI.getBytes(UTF_8);
byte[] toSign = new byte[Id.BYTES * 2 + Short.BYTES + (alt == null ? 0 : alt.length)];
ByteBuffer buf = ByteBuffer.wrap(toSign);
@@ -483,8 +483,8 @@ private byte[] getSignData() {
if (origin != null)
sha.update(origin.bytes());
sha.update(ByteBuffer.allocate(Short.BYTES).putShort((short)port).array());
- if (alternativeURL != null)
- sha.update(alternativeURL.getBytes(UTF_8));
+ if (alternativeURI != null)
+ sha.update(alternativeURI.getBytes(UTF_8));
return sha.digest();
}
@@ -515,12 +515,12 @@ public PeerInfo withoutPrivateKey() {
if (privateKey == null)
return this;
- return new PeerInfo(publicKey, null, nodeId, origin, port, alternativeURL, signature);
+ return new PeerInfo(publicKey, null, nodeId, origin, port, alternativeURI, signature);
}
@Override
public int hashCode() {
- return 0x6030A + Objects.hash(publicKey, nodeId, origin, port, alternativeURL, Arrays.hashCode(signature));
+ return 0x6030A + Objects.hash(publicKey, nodeId, origin, port, alternativeURI, Arrays.hashCode(signature));
}
@Override
@@ -533,7 +533,7 @@ public boolean equals(Object o) {
Objects.equals(this.nodeId, that.nodeId) &&
Objects.equals(this.origin, that.origin) &&
this.port == that.port &&
- Objects.equals(this.alternativeURL, that.alternativeURL) &&
+ Objects.equals(this.alternativeURI, that.alternativeURI) &&
Arrays.equals(this.signature, that.signature);
}
@@ -549,8 +549,8 @@ public String toString() {
if (isDelegated())
sb.append(getOrigin().toString()).append(',');
sb.append(port);
- if (hasAlternativeURL())
- sb.append(",").append(alternativeURL);
+ if (hasAlternativeURI())
+ sb.append(",").append(alternativeURI);
sb.append(">");
return sb.toString();
}
diff --git a/api/src/main/java/io/bosonnetwork/utils/Json.java b/api/src/main/java/io/bosonnetwork/utils/Json.java
index 01a20bd..f93be4f 100644
--- a/api/src/main/java/io/bosonnetwork/utils/Json.java
+++ b/api/src/main/java/io/bosonnetwork/utils/Json.java
@@ -426,7 +426,7 @@ public NodeInfo deserialize(JsonParser p, DeserializationContext ctx) throws IOE
/**
* Serializer for {@link PeerInfo} objects.
*
- * Encodes PeerInfo as a 6-element array: [peerId, nodeId, originNodeId, port, alternativeURL, signature].
+ * Encodes PeerInfo as a 6-element array: [peerId, nodeId, originNodeId, port, alternativeURI, signature].
* In binary formats, ids and signature are written as Base64-encoded binary; in text formats, ids are Base58 strings.
* Special behavior: the peerId can be omitted if the context attribute
* {@link io.bosonnetwork.PeerInfo#ATTRIBUTE_OMIT_PEER_ID} is set.
@@ -447,9 +447,9 @@ public void serialize(PeerInfo value, JsonGenerator gen, SerializerProvider prov
final boolean binaryFormat = isBinaryFormat(gen);
// Format: 6-tuple
- // [peerId, nodeId, originNodeId, port, alternativeURL, signature]
+ // [peerId, nodeId, originNodeId, port, alternativeURI, signature]
// If omit the peer id, format:
- // [null, nodeId, originNodeId, port, alternativeURL, signature]
+ // [null, nodeId, originNodeId, port, alternativeURI, signature]
gen.writeStartArray();
@@ -486,8 +486,8 @@ public void serialize(PeerInfo value, JsonGenerator gen, SerializerProvider prov
gen.writeNumber(value.getPort());
// alternative url
- if (value.hasAlternativeURL())
- gen.writeString(value.getAlternativeURL());
+ if (value.hasAlternativeURI())
+ gen.writeString(value.getAlternativeURI());
else
gen.writeNull();
@@ -502,7 +502,7 @@ public void serialize(PeerInfo value, JsonGenerator gen, SerializerProvider prov
/**
* Deserializer for {@link PeerInfo} objects.
*
- * Expects a 6-element array: [peerId, nodeId, originNodeId, port, alternativeURL, signature].
+ * Expects a 6-element array: [peerId, nodeId, originNodeId, port, alternativeURI, signature].
* In binary formats, ids and signature are decoded from Base64-encoded binary; in text formats, ids are Base58 strings.
* Special behavior: if peerId is omitted (null), it is taken from the context attribute
* {@link io.bosonnetwork.PeerInfo#ATTRIBUTE_PEER_ID}.
@@ -529,7 +529,7 @@ public PeerInfo deserialize(JsonParser p, DeserializationContext ctx) throws IOE
Id nodeId = null;
Id origin = null;
int port = 0;
- String alternativeURL = null;
+ String alternativeURI = null;
byte[] signature = null;
// peer id
@@ -556,7 +556,7 @@ public PeerInfo deserialize(JsonParser p, DeserializationContext ctx) throws IOE
// alternative url
if (p.nextToken() != JsonToken.VALUE_NULL)
- alternativeURL = p.getText();
+ alternativeURI = p.getText();
// signature
if (p.nextToken() != JsonToken.VALUE_NULL)
@@ -565,7 +565,7 @@ public PeerInfo deserialize(JsonParser p, DeserializationContext ctx) throws IOE
if (p.nextToken() != JsonToken.END_ARRAY)
throw MismatchedInputException.from(p, PeerInfo.class, "Invalid PeerInfo: too many elements in array");
- return PeerInfo.of(peerId, nodeId, origin, port, alternativeURL, signature);
+ return PeerInfo.of(peerId, nodeId, origin, port, alternativeURI, signature);
}
}
diff --git a/api/src/test/java/io/bosonnetwork/utils/JsonPerfTests.java b/api/src/test/java/io/bosonnetwork/utils/JsonPerfTests.java
index 0658817..c189b13 100644
--- a/api/src/test/java/io/bosonnetwork/utils/JsonPerfTests.java
+++ b/api/src/test/java/io/bosonnetwork/utils/JsonPerfTests.java
@@ -459,9 +459,9 @@ static void serializePeerInfo(JsonGenerator gen, PeerInfo value, JsonContext con
boolean binaryFormat = isBinaryFormat(gen);
// Format: 6-tuple
- // [peerId, nodeId, originNodeId, port, alternativeURL, signature]
+ // [peerId, nodeId, originNodeId, port, alternativeURI, signature]
// If omit the peer id, format:
- // [null, nodeId, originNodeId, port, alternativeURL, signature]
+ // [null, nodeId, originNodeId, port, alternativeURI, signature]
gen.writeStartArray();
@@ -497,8 +497,8 @@ static void serializePeerInfo(JsonGenerator gen, PeerInfo value, JsonContext con
gen.writeNumber(value.getPort());
// alternative url
- if (value.hasAlternativeURL())
- gen.writeString(value.getAlternativeURL());
+ if (value.hasAlternativeURI())
+ gen.writeString(value.getAlternativeURI());
else
gen.writeNull();
@@ -519,7 +519,7 @@ static PeerInfo deserializePeerInfo(JsonParser p, JsonContext context) throws IO
Id nodeId;
Id origin = null;
int port;
- String alternativeURL;
+ String alternativeURI;
byte[] signature;
// peer id
@@ -553,7 +553,7 @@ static PeerInfo deserializePeerInfo(JsonParser p, JsonContext context) throws IO
// alternative url
p.nextToken();
- alternativeURL = p.currentToken() == JsonToken.VALUE_NULL ? null : p.getText();
+ alternativeURI = p.currentToken() == JsonToken.VALUE_NULL ? null : p.getText();
// signature
p.nextToken();
@@ -565,7 +565,7 @@ static PeerInfo deserializePeerInfo(JsonParser p, JsonContext context) throws IO
if (p.nextToken() != JsonToken.END_ARRAY)
throw MismatchedInputException.from(p, PeerInfo.class, "Invalid PeerInfo: too many elements in array");
- return PeerInfo.of(peerId, nodeId, origin, port, alternativeURL, signature);
+ return PeerInfo.of(peerId, nodeId, origin, port, alternativeURI, signature);
}
static String toString(PeerInfo value, JsonContext context) throws IOException {
@@ -630,7 +630,7 @@ public PeerInfoMixin(@JsonProperty(value = "id", required = true) Id peerId,
@JsonProperty(value = "n", required = true) Id nodeId,
@JsonProperty(value = "o") Id origin,
@JsonProperty(value = "p", required = true) int port,
- @JsonProperty(value = "alt") String alternativeURL,
+ @JsonProperty(value = "alt") String alternativeURI,
@JsonProperty(value = "sig", required = true) byte[] signature) { }
@JsonProperty("id")
@@ -646,7 +646,7 @@ public PeerInfoMixin(@JsonProperty(value = "id", required = true) Id peerId,
public abstract int getPort();
@JsonProperty("alt")
- public abstract String getAlternativeURL();
+ public abstract String getAlternativeURI();
@JsonProperty("sig")
public abstract byte[] getSignature();
diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/KadNode.java b/dht/src/main/java/io/bosonnetwork/kademlia/KadNode.java
index ade10e0..432f458 100644
--- a/dht/src/main/java/io/bosonnetwork/kademlia/KadNode.java
+++ b/dht/src/main/java/io/bosonnetwork/kademlia/KadNode.java
@@ -13,7 +13,6 @@
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
-import com.github.benmanes.caffeine.cache.Caffeine;
import io.vertx.core.CompositeFuture;
import io.vertx.core.Context;
import io.vertx.core.Future;
@@ -229,10 +228,8 @@ public VertxFuture shutdown() {
@Override
public void init(Vertx vertx, Context context) {
super.init(vertx, context);
-
- Caffeine caffeine = VertxCaffeine.newBuilder(vertx)
- .expireAfterAccess(KBucketEntry.OLD_AND_STALE_TIME, TimeUnit.MILLISECONDS);
- identity.initCache(caffeine);
+ identity.initCache(VertxCaffeine.newBuilder(vertx)
+ .expireAfterAccess(KBucketEntry.OLD_AND_STALE_TIME, TimeUnit.MILLISECONDS));
}
@Override
diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/protocol/AnnouncePeerRequest.java b/dht/src/main/java/io/bosonnetwork/kademlia/protocol/AnnouncePeerRequest.java
index f188aac..e34538d 100644
--- a/dht/src/main/java/io/bosonnetwork/kademlia/protocol/AnnouncePeerRequest.java
+++ b/dht/src/main/java/io/bosonnetwork/kademlia/protocol/AnnouncePeerRequest.java
@@ -112,8 +112,8 @@ public void serialize(AnnouncePeerRequest value, JsonGenerator gen, SerializerPr
gen.writeNumberField("p", value.peer.getPort());
- if (value.peer.getAlternativeURL() != null)
- gen.writeStringField("alt", value.peer.getAlternativeURL());
+ if (value.peer.getAlternativeURI() != null)
+ gen.writeStringField("alt", value.peer.getAlternativeURI());
byte[] sig = value.peer.getSignature();
gen.writeFieldName("sig");
@@ -146,7 +146,7 @@ public AnnouncePeerRequest deserialize(JsonParser p, DeserializationContext ctxt
Id peerId = null;
Id origin = null;
int port = 0;
- String alternativeURL = null;
+ String alternativeURI = null;
byte[] signature = null;
Id nodeId = (Id) ctxt.getAttribute(Message.ATTR_NODE_ID);
@@ -172,7 +172,7 @@ public AnnouncePeerRequest deserialize(JsonParser p, DeserializationContext ctxt
break;
case "alt":
if (token != JsonToken.VALUE_NULL)
- alternativeURL = p.getText();
+ alternativeURI = p.getText();
break;
case "sig":
signature = p.getBinaryValue(Base64Variants.MODIFIED_FOR_URL);
@@ -182,7 +182,7 @@ public AnnouncePeerRequest deserialize(JsonParser p, DeserializationContext ctxt
}
}
- return new AnnouncePeerRequest(PeerInfo.of(peerId, nodeId, origin, port, alternativeURL, signature), tok);
+ return new AnnouncePeerRequest(PeerInfo.of(peerId, nodeId, origin, port, alternativeURI, signature), tok);
}
}
}
\ No newline at end of file
diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/storage/DatabaseStorage.java b/dht/src/main/java/io/bosonnetwork/kademlia/storage/DatabaseStorage.java
index e75ef55..3fd83ca 100644
--- a/dht/src/main/java/io/bosonnetwork/kademlia/storage/DatabaseStorage.java
+++ b/dht/src/main/java/io/bosonnetwork/kademlia/storage/DatabaseStorage.java
@@ -432,7 +432,7 @@ protected Future putPeer(SqlClient sqlClient, PeerInfo peerInfo, boole
peerInfo.getPrivateKey(),
peerInfo.getOrigin() != null ? peerInfo.getOrigin().bytes() : null,
peerInfo.getPort(),
- peerInfo.getAlternativeURL(),
+ peerInfo.getAlternativeURI(),
peerInfo.getSignature(),
now,
now))
@@ -481,7 +481,7 @@ public Future> putPeers(List peerInfos) {
peerInfo.getPrivateKey(),
peerInfo.getOrigin() != null ? peerInfo.getOrigin().bytes() : null,
peerInfo.getPort(),
- peerInfo.getAlternativeURL(),
+ peerInfo.getAlternativeURI(),
peerInfo.getSignature(),
now,
now
@@ -705,7 +705,7 @@ protected static PeerInfo rowToPeer(Row row) {
// 3: privateKey (BLOB, nullable)
// 4: origin (BLOB, nullable)
// 5: port (INTEGER, NOT NULL)
- // 6: alternativeURL (TEXT, nullable)
+ // 6: alternativeURI (TEXT, nullable)
// 7: signature (BLOB, nullable)
// 8: created (BIGINT, NOT NULL)
// 9: updated (BIGINT, NOT NULL)
@@ -717,10 +717,10 @@ protected static PeerInfo rowToPeer(Row row) {
buffer = row.getBuffer(4);
Id origin = buffer == null ? null : Id.of(buffer.getBytes());
int port = row.getInteger(5); // NOT NULL
- String alternativeURL = row.getString(6); // Nullable
+ String alternativeURI = row.getString(6); // Nullable
buffer = row.getBuffer(7);
byte[] signature = buffer == null ? null : buffer.getBytes();
- return PeerInfo.of(id, privateKey, nodeId, origin, port, alternativeURL, signature);
+ return PeerInfo.of(id, privateKey, nodeId, origin, port, alternativeURI, signature);
}
}
\ No newline at end of file
diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/storage/PostgresStorage.java b/dht/src/main/java/io/bosonnetwork/kademlia/storage/PostgresStorage.java
index 03e89d1..8eac3fe 100644
--- a/dht/src/main/java/io/bosonnetwork/kademlia/storage/PostgresStorage.java
+++ b/dht/src/main/java/io/bosonnetwork/kademlia/storage/PostgresStorage.java
@@ -69,7 +69,7 @@ CREATE TABLE IF NOT EXISTS peers (
privateKey BYTEA,
origin BYTEA,
port INTEGER NOT NULL,
- alternativeURL VARCHAR(512),
+ alternativeURI VARCHAR(512),
signature BYTEA NOT NULL,
created BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW()) * 1000,
updated BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW()) * 1000,
@@ -220,14 +220,14 @@ protected String upsertPeer() {
return """
INSERT INTO peers (
id, nodeId, persistent, privateKey, origin, port,
- alternativeURL, signature, created, updated
+ alternativeURI, signature, created, updated
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
ON CONFLICT(id, nodeId) DO UPDATE SET
persistent = excluded.persistent,
privateKey = excluded.privateKey,
origin = excluded.origin,
port = excluded.port,
- alternativeURL = excluded.alternativeURL,
+ alternativeURI = excluded.alternativeURI,
signature = excluded.signature,
updated = excluded.updated
""";
diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/storage/SQLiteStorage.java b/dht/src/main/java/io/bosonnetwork/kademlia/storage/SQLiteStorage.java
index b57df71..ac7a906 100644
--- a/dht/src/main/java/io/bosonnetwork/kademlia/storage/SQLiteStorage.java
+++ b/dht/src/main/java/io/bosonnetwork/kademlia/storage/SQLiteStorage.java
@@ -71,7 +71,7 @@ CREATE TABLE IF NOT EXISTS peers (
privateKey BLOB,
origin BLOB,
port INTEGER NOT NULL,
- alternativeURL TEXT,
+ alternativeURI TEXT,
signature BLOB NOT NULL,
created INTEGER NOT NULL DEFAULT (CAST(unixepoch('subsec') * 1000 AS INTEGER)),
updated INTEGER NOT NULL DEFAULT (CAST(unixepoch('subsec') * 1000 AS INTEGER)),
@@ -218,14 +218,14 @@ protected String upsertPeer() {
return """
INSERT INTO peers (
id, nodeId, persistent, privateKey, origin, port,
- alternativeURL, signature, created, updated
+ alternativeURI, signature, created, updated
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(id, nodeId) DO UPDATE SET
persistent = excluded.persistent,
privateKey = excluded.privateKey,
origin = excluded.origin,
port = excluded.port,
- alternativeURL = excluded.alternativeURL,
+ alternativeURI = excluded.alternativeURI,
signature = excluded.signature,
updated = excluded.updated
""";
From e7a7752771500da2bdc6bbfaec35b88a3e408bab Mon Sep 17 00:00:00 2001
From: Jingyu
Date: Thu, 16 Oct 2025 15:08:22 +0800
Subject: [PATCH 2/4] Downgrade Vert.x from 5.0.x to 4.5.x due to critical
NetSocket bug - Downgraded Vert.x from 5.0.x to 4.5.x to avoid a critical
issue where NetSocket.endHandler is called too early. - Introduced
BosonVerticle abstract base class to simplify future migration between Vert.x
4.x and 5.x. - Hide Vert.x defaults lifecycle method names (init, start,
stop). - Fixed SQLite database compatibility issues after reverting to Vert.x
4.5.x.
---
.../access/impl/AccessManagerTests.java | 4 +-
api/src/main/java/io/bosonnetwork/Node.java | 4 +-
.../identifier/AbstractResolver.java | 4 +-
.../bosonnetwork/identifier/DHTRegistry.java | 2 +-
.../service/DefaultServiceContext.java | 1 +
.../io/bosonnetwork/vertx/BosonVerticle.java | 259 ++++++++++++++++++
.../{utils => }/vertx/VertxCaffeine.java | 2 +-
.../{utils => }/vertx/VertxFuture.java | 28 +-
.../identifier/DHTRegistryTest.java | 6 +-
.../{utils => }/vertx/VertxCaffeineTests.java | 2 +-
.../{utils => }/vertx/VertxFutureTests.java | 2 +-
.../java/io/bosonnetwork/launcher/Main.java | 4 +-
.../shell/AnnouncePeerCommand.java | 2 +-
.../main/java/io/bosonnetwork/shell/Main.java | 2 +-
.../shell/RoutingTableCommand.java | 2 +-
.../io/bosonnetwork/shell/StopCommand.java | 2 +-
.../io/bosonnetwork/shell/StorageCommand.java | 2 +-
.../bosonnetwork/shell/StoreValueCommand.java | 2 +-
.../io/bosonnetwork/kademlia/KadNode.java | 48 ++--
.../io/bosonnetwork/kademlia/impl/DHT.java | 28 +-
.../impl/SimpleNodeConfiguration.java | 4 +-
.../kademlia/storage/DataStorage.java | 9 +-
.../kademlia/storage/DatabaseStorage.java | 2 +-
.../kademlia/storage/InMemoryStorage.java | 2 +
.../kademlia/storage/PostgresStorage.java | 2 +
.../kademlia/storage/SQLiteStorage.java | 13 +-
.../kademlia/tasks/NodeLookupTask.java | 2 +-
.../kademlia/tasks/PeerLookupTask.java | 2 +-
.../kademlia/tasks/ValueLookupTask.java | 2 +-
.../bosonnetwork/kademlia/NodeAsyncTests.java | 38 +--
.../bosonnetwork/kademlia/NodeSyncTests.java | 16 +-
.../io/bosonnetwork/kademlia/SybilTests.java | 14 +-
.../kademlia/rpc/RPCServerTests.java | 16 +-
.../kademlia/storage/DataStorageTests.java | 8 +-
34 files changed, 406 insertions(+), 130 deletions(-)
create mode 100644 api/src/main/java/io/bosonnetwork/vertx/BosonVerticle.java
rename api/src/main/java/io/bosonnetwork/{utils => }/vertx/VertxCaffeine.java (98%)
rename api/src/main/java/io/bosonnetwork/{utils => }/vertx/VertxFuture.java (98%)
rename api/src/test/java/io/bosonnetwork/{utils => }/vertx/VertxCaffeineTests.java (98%)
rename api/src/test/java/io/bosonnetwork/{utils => }/vertx/VertxFutureTests.java (99%)
diff --git a/accesscontrol/src/test/java/io/bosonnetwork/access/impl/AccessManagerTests.java b/accesscontrol/src/test/java/io/bosonnetwork/access/impl/AccessManagerTests.java
index 0aa7dba..cdfc889 100644
--- a/accesscontrol/src/test/java/io/bosonnetwork/access/impl/AccessManagerTests.java
+++ b/accesscontrol/src/test/java/io/bosonnetwork/access/impl/AccessManagerTests.java
@@ -239,12 +239,12 @@ static void setup() throws Exception {
node = Node.kadNode(getNodeConfiguration());
am.init(node);
- node.run();
+ node.start();
}
@AfterAll
static void teardown() throws Exception {
- node.shutdown();
+ node.stop();
FileUtils.deleteFile(testDir);
}
diff --git a/api/src/main/java/io/bosonnetwork/Node.java b/api/src/main/java/io/bosonnetwork/Node.java
index af87b49..73b7b2c 100644
--- a/api/src/main/java/io/bosonnetwork/Node.java
+++ b/api/src/main/java/io/bosonnetwork/Node.java
@@ -97,14 +97,14 @@ public interface Node extends Identity {
*
* @return a {@link CompletableFuture} that completes when the node is running
*/
- CompletableFuture run();
+ CompletableFuture start();
/**
* Shutdown the node asynchronously.
*
* @return a {@link CompletableFuture} that completes when the node has shut down
*/
- CompletableFuture shutdown();
+ CompletableFuture stop();
/**
* Checks whether the node is currently running.
diff --git a/api/src/main/java/io/bosonnetwork/identifier/AbstractResolver.java b/api/src/main/java/io/bosonnetwork/identifier/AbstractResolver.java
index 54a13f5..cc0e29b 100644
--- a/api/src/main/java/io/bosonnetwork/identifier/AbstractResolver.java
+++ b/api/src/main/java/io/bosonnetwork/identifier/AbstractResolver.java
@@ -33,8 +33,8 @@
import org.slf4j.Logger;
import io.bosonnetwork.Id;
-import io.bosonnetwork.utils.vertx.VertxCaffeine;
-import io.bosonnetwork.utils.vertx.VertxFuture;
+import io.bosonnetwork.vertx.VertxCaffeine;
+import io.bosonnetwork.vertx.VertxFuture;
/**
* Abstract base class for Boson {@link Resolver} implementations.
diff --git a/api/src/main/java/io/bosonnetwork/identifier/DHTRegistry.java b/api/src/main/java/io/bosonnetwork/identifier/DHTRegistry.java
index ef23901..ac3b987 100644
--- a/api/src/main/java/io/bosonnetwork/identifier/DHTRegistry.java
+++ b/api/src/main/java/io/bosonnetwork/identifier/DHTRegistry.java
@@ -34,7 +34,7 @@
import io.bosonnetwork.Node;
import io.bosonnetwork.Value;
import io.bosonnetwork.crypto.CryptoBox;
-import io.bosonnetwork.utils.vertx.VertxFuture;
+import io.bosonnetwork.vertx.VertxFuture;
/**
* Singleton implementation of {@link Registry} that uses a distributed hash table (DHT) to store and resolve Cards.
diff --git a/api/src/main/java/io/bosonnetwork/service/DefaultServiceContext.java b/api/src/main/java/io/bosonnetwork/service/DefaultServiceContext.java
index b5fb672..e9e384a 100644
--- a/api/src/main/java/io/bosonnetwork/service/DefaultServiceContext.java
+++ b/api/src/main/java/io/bosonnetwork/service/DefaultServiceContext.java
@@ -24,6 +24,7 @@ public class DefaultServiceContext implements ServiceContext {
/**
* Creates a new {@link ServiceContext} instance.
*
+ * @param vertx the Vert.x instance.
* @param node the host Boson node.
* @param accessManager the {@link io.bosonnetwork.access.AccessManager} instance that
* provided by the host node.
diff --git a/api/src/main/java/io/bosonnetwork/vertx/BosonVerticle.java b/api/src/main/java/io/bosonnetwork/vertx/BosonVerticle.java
new file mode 100644
index 0000000..17f7641
--- /dev/null
+++ b/api/src/main/java/io/bosonnetwork/vertx/BosonVerticle.java
@@ -0,0 +1,259 @@
+package io.bosonnetwork.vertx;
+
+import java.util.List;
+import java.util.concurrent.Callable;
+
+import io.vertx.core.Context;
+import io.vertx.core.Future;
+import io.vertx.core.Handler;
+import io.vertx.core.Promise;
+import io.vertx.core.Verticle;
+import io.vertx.core.Vertx;
+import io.vertx.core.impl.ContextInternal;
+import io.vertx.core.json.JsonObject;
+
+/**
+ * An abstract base class for Verticles in the Boson project that unifies the Vert.x 4.x
+ * and 5.x {@code Verticle} API changes.
+ *
+ *
+ * Vert.x 5 deprecated {@code Verticle} and introduced {@code Deployable}, while Vert.x 4
+ * requires Verticles to implement {@code Verticle}. This base class hides that difference,
+ * so user code can remain compatible across versions.
+ *
+ *
+ * Usage
+ *
+ * Extend this class and implement {@link #deploy()} and {@link #undeploy()} to define
+ * startup and shutdown logic respectively. Do not override {@code start(Promise)}
+ * or {@code stop(Promise)} — those are managed internally and delegate to your methods.
+ *
+ *
+ * Method naming
+ *
+ * The Vert.x lifecycle methods {@code init}, {@code start}, and {@code stop} are reserved
+ * for Vert.x itself and mapped internally to your implementation via {@link #deploy()} and
+ * {@link #undeploy()}. This ensures forward compatibility with Vert.x 5’s {@code VerticleBase}.
+ *
+ */
+public abstract class BosonVerticle implements Verticle /*, Deployable */ {
+ /**
+ * Reference to the Vert.x instance that deployed this verticle
+ */
+ protected Vertx vertx;
+
+ /**
+ * Reference to the context of the verticle
+ */
+ protected Context vertxContext;
+
+ /**
+ * Returns the Vert.x instance that deployed this Verticle.
+ *
+ * @return the Vert.x instance
+ */
+ public final Vertx getVertx() {
+ return vertx;
+ }
+
+ /**
+ * Returns the Vert.x context associated with this Verticle.
+ *
+ * @return the Vert.x context
+ */
+ public final Context vertxContext() {
+ return vertxContext;
+ }
+
+ /**
+ * Returns the deployment ID of this Verticle deployment.
+ *
+ * @return the deployment ID
+ */
+ public final String deploymentID() {
+ return vertxContext.deploymentID();
+ }
+
+ /**
+ * Returns the configuration object of this Verticle deployment.
+ *
+ * This configuration can be specified when the Verticle is deployed.
+ *
+ *
+ * @return the configuration as a {@link JsonObject}
+ */
+ public final JsonObject vertxConfig() {
+ return vertxContext.config();
+ }
+
+ /**
+ * Returns the process arguments for the current Vert.x instance.
+ *
+ * @return a list of process arguments
+ */
+ public final List processArgs() {
+ return vertxContext.processArgs();
+ }
+
+ /**
+ * Initializes the Verticle.
+ *
+ * This method is called by Vert.x when the Verticle instance is deployed.
+ * User code should not call this directly.
+ *
+ *
+ * @param vertx the Vert.x instance
+ * @param context the context associated with this Verticle
+ */
+ @Override
+ public final void init(Vertx vertx, Context context) {
+ prepare(vertx, context);
+ }
+
+ /**
+ * Prepares this verticle for execution.
+ *
+ * This method is invoked internally by {@link #init(Vertx, Context)} and can be overridden
+ * if additional setup is needed before deployment.
+ *
+ *
+ * @param vertx the Vert.x instance
+ * @param context the Vert.x context
+ */
+ public void prepare(Vertx vertx, Context context) {
+ this.vertx = vertx;
+ this.vertxContext = context;
+ }
+
+ /**
+ * Called when the Verticle is started.
+ *
+ * This implementation delegates to {@link #deploy()}, which should return a {@link Future}
+ * that completes when startup is done.
+ *
+ *
+ * @param startPromise a promise that should be completed when startup is done
+ * @throws Exception if startup fails
+ */
+ @Override
+ public final void start(Promise startPromise) throws Exception {
+ deploy().onComplete(startPromise);
+ }
+
+ /**
+ * Called when the Verticle is stopped.
+ *
+ * This implementation delegates to {@link #undeploy()}, which should return a {@link Future}
+ * that completes when shutdown is done.
+ *
+ *
+ * @param stopPromise a promise that should be completed when shutdown is done
+ * @throws Exception if shutdown fails
+ */
+ @Override
+ public final void stop(Promise stopPromise) throws Exception {
+ undeploy().onComplete(stopPromise);
+ }
+
+ /**
+ * Called during startup to perform asynchronous initialization logic.
+ *
+ * This method is invoked by {@link #start(Promise)}.
+ *
+ *
+ * @return a future that completes when setup is finished
+ */
+ public abstract Future deploy();
+
+ /**
+ * Called during shutdown to perform asynchronous cleanup logic.
+ *
+ * This method is invoked by {@link #stop(Promise)}.
+ *
+ *
+ * @return a future that completes when teardown is finished
+ */
+ public abstract Future undeploy();
+
+ /**
+ * Internal helper method to simulate deployment under Vert.x 5.x’s {@code Deployable} interface.
+ *
+ * This should not be called directly by user code. It is used by Vert.x internals or
+ * integration layers that work with Vert.x 5.x’s deployment model.
+ *
+ *
+ * @param context the Vert.x context
+ * @return a future that completes when deployment is finished
+ * @throws Exception if deployment fails
+ */
+ public final Future> deploy(Context context) throws Exception {
+ prepare(context.owner(), context);
+ ContextInternal internal = (ContextInternal) context;
+ Promise promise = internal.promise();
+ try {
+ deploy().onComplete(promise);
+ } catch (Throwable t) {
+ if (!promise.tryFail(t))
+ internal.reportException(t);
+ }
+ return promise.future();
+ }
+
+ /**
+ * Internal helper method to simulate undeployment under Vert.x 5.x’s {@code Deployable} interface.
+ *
+ * This should not be called directly by user code. It is used by Vert.x internals or
+ * integration layers that work with Vert.x 5.x’s deployment model.
+ *
+ *
+ * @param context the Vert.x context
+ * @return a future that completes when undeployment is finished
+ * @throws Exception if undeployment fails
+ */
+ public final Future> undeploy(Context context) throws Exception {
+ ContextInternal internal = (ContextInternal) context;
+ Promise promise = internal.promise();
+ try {
+ undeploy().onComplete(promise);
+ } catch (Throwable t) {
+ if (!promise.tryFail(t))
+ internal.reportException(t);
+ }
+ return promise.future();
+ }
+
+ /**
+ * Executes the given handler on this verticle's context.
+ *
+ * @param action the handler to run
+ */
+ public void runOnContext(Handler action) {
+ vertxContext.runOnContext(action);
+ }
+
+
+ /**
+ * Executes blocking code asynchronously, returning a {@link Future} that completes
+ * when the blocking operation is done.
+ *
+ * @param blockingCodeHandler the blocking code to execute
+ * @param the result type
+ * @return a future representing the blocking operation result
+ */
+ public Future executeBlocking(Callable blockingCodeHandler) {
+ return vertxContext.executeBlocking(blockingCodeHandler);
+ }
+
+ /**
+ * Executes blocking code asynchronously, optionally ordering execution relative
+ * to other blocking operations in the same context.
+ *
+ * @param blockingCodeHandler the blocking code to execute
+ * @param ordered whether execution should be ordered
+ * @param the result type
+ * @return a future representing the blocking operation result
+ */
+ public Future executeBlocking(Callable blockingCodeHandler, boolean ordered) {
+ return vertxContext.executeBlocking(blockingCodeHandler, ordered);
+ }
+}
\ No newline at end of file
diff --git a/api/src/main/java/io/bosonnetwork/utils/vertx/VertxCaffeine.java b/api/src/main/java/io/bosonnetwork/vertx/VertxCaffeine.java
similarity index 98%
rename from api/src/main/java/io/bosonnetwork/utils/vertx/VertxCaffeine.java
rename to api/src/main/java/io/bosonnetwork/vertx/VertxCaffeine.java
index 2f559b2..497d98f 100644
--- a/api/src/main/java/io/bosonnetwork/utils/vertx/VertxCaffeine.java
+++ b/api/src/main/java/io/bosonnetwork/vertx/VertxCaffeine.java
@@ -20,7 +20,7 @@
* SOFTWARE.
*/
-package io.bosonnetwork.utils.vertx;
+package io.bosonnetwork.vertx;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Executor;
diff --git a/api/src/main/java/io/bosonnetwork/utils/vertx/VertxFuture.java b/api/src/main/java/io/bosonnetwork/vertx/VertxFuture.java
similarity index 98%
rename from api/src/main/java/io/bosonnetwork/utils/vertx/VertxFuture.java
rename to api/src/main/java/io/bosonnetwork/vertx/VertxFuture.java
index b3f7c8c..e5e01a4 100644
--- a/api/src/main/java/io/bosonnetwork/utils/vertx/VertxFuture.java
+++ b/api/src/main/java/io/bosonnetwork/vertx/VertxFuture.java
@@ -20,7 +20,7 @@
* SOFTWARE.
*/
-package io.bosonnetwork.utils.vertx;
+package io.bosonnetwork.vertx;
import java.util.concurrent.CancellationException;
import java.util.concurrent.CompletableFuture;
@@ -71,7 +71,7 @@ public class VertxFuture extends CompletableFuture implements java.util.co
protected VertxFuture(Future future) {
this.future = future;
- future.onComplete(ar -> {
+ future.andThen(ar -> {
// update the internal state of CompletableFuture
if (ar.succeeded())
super.complete(ar.result());
@@ -687,10 +687,11 @@ public T get() throws InterruptedException, ExecutionException {
if (Context.isOnVertxThread() || Context.isOnEventLoopThread())
throw new IllegalStateException("Cannot not be called on vertx thread or event loop thread");
- final CountDownLatch latch = new CountDownLatch(1);
- future.onComplete(ar -> latch.countDown());
-
- latch.await();
+ if (!future.isComplete()) {
+ final CountDownLatch latch = new CountDownLatch(1);
+ future.andThen(ar -> latch.countDown());
+ latch.await();
+ }
if (future.succeeded())
return future.result();
@@ -718,11 +719,12 @@ public T get(long timeout, TimeUnit unit) throws InterruptedException, Execution
if (Context.isOnVertxThread() || Context.isOnEventLoopThread())
throw new IllegalStateException("Cannot not be called on vertx thread or event loop thread");
- final CountDownLatch latch = new CountDownLatch(1);
- future.onComplete(ar -> latch.countDown());
-
- if (!latch.await(timeout, unit))
- throw new TimeoutException();
+ if (!future.isComplete()) {
+ final CountDownLatch latch = new CountDownLatch(1);
+ future.andThen(ar -> latch.countDown());
+ if (!latch.await(timeout, unit))
+ throw new TimeoutException();
+ }
if (future.succeeded())
return future.result();
@@ -831,7 +833,7 @@ public void obtrudeException(Throwable ex) {
@Override
public VertxFuture copy() {
Promise promise = Promise.promise();
- future.onComplete(promise);
+ future.andThen(promise);
return of(promise.future());
}
@@ -940,7 +942,7 @@ public MinimalStage completeOnTimeout(T value, long timeout, TimeUnit unit) {
@Override
public CompletableFuture toCompletableFuture() {
Promise promise = Promise.promise();
- future.onComplete(promise);
+ future.andThen(promise);
return of(promise.future());
}
}
diff --git a/api/src/test/java/io/bosonnetwork/identifier/DHTRegistryTest.java b/api/src/test/java/io/bosonnetwork/identifier/DHTRegistryTest.java
index e5f5569..f134a8b 100644
--- a/api/src/test/java/io/bosonnetwork/identifier/DHTRegistryTest.java
+++ b/api/src/test/java/io/bosonnetwork/identifier/DHTRegistryTest.java
@@ -84,12 +84,12 @@ public CompletableFuture bootstrap(Collection bootstrapNodes){
}
@Override
- public CompletableFuture run() {
+ public CompletableFuture start() {
return CompletableFuture.completedFuture(null);
}
@Override
- public CompletableFuture shutdown() {
+ public CompletableFuture stop() {
return CompletableFuture.completedFuture(null);
}
@@ -191,7 +191,7 @@ public String getVersion() {
@AfterAll
public static void cleanup() throws Exception {
- node.shutdown();
+ node.stop();
}
private static Identity alice;
diff --git a/api/src/test/java/io/bosonnetwork/utils/vertx/VertxCaffeineTests.java b/api/src/test/java/io/bosonnetwork/vertx/VertxCaffeineTests.java
similarity index 98%
rename from api/src/test/java/io/bosonnetwork/utils/vertx/VertxCaffeineTests.java
rename to api/src/test/java/io/bosonnetwork/vertx/VertxCaffeineTests.java
index 6743176..47564f9 100644
--- a/api/src/test/java/io/bosonnetwork/utils/vertx/VertxCaffeineTests.java
+++ b/api/src/test/java/io/bosonnetwork/vertx/VertxCaffeineTests.java
@@ -1,4 +1,4 @@
-package io.bosonnetwork.utils.vertx;
+package io.bosonnetwork.vertx;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
diff --git a/api/src/test/java/io/bosonnetwork/utils/vertx/VertxFutureTests.java b/api/src/test/java/io/bosonnetwork/vertx/VertxFutureTests.java
similarity index 99%
rename from api/src/test/java/io/bosonnetwork/utils/vertx/VertxFutureTests.java
rename to api/src/test/java/io/bosonnetwork/vertx/VertxFutureTests.java
index 7beba5a..ae86a13 100644
--- a/api/src/test/java/io/bosonnetwork/utils/vertx/VertxFutureTests.java
+++ b/api/src/test/java/io/bosonnetwork/vertx/VertxFutureTests.java
@@ -1,4 +1,4 @@
-package io.bosonnetwork.utils.vertx;
+package io.bosonnetwork.vertx;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
diff --git a/cmds/src/main/java/io/bosonnetwork/launcher/Main.java b/cmds/src/main/java/io/bosonnetwork/launcher/Main.java
index 837f6df..c9f4874 100644
--- a/cmds/src/main/java/io/bosonnetwork/launcher/Main.java
+++ b/cmds/src/main/java/io/bosonnetwork/launcher/Main.java
@@ -72,7 +72,7 @@ private static void initBosonNode() {
// TODO: initialize the user defined access manager
accessManager = AccessManager.getDefault();
- node.run().thenRun(() -> System.out.format("Boson node %s is running.\n", node.getId())).get();
+ node.start().thenRun(() -> System.out.format("Boson node %s is running.\n", node.getId())).get();
} catch (Exception e) {
System.out.println("Start boson super node failed, error: " + e.getMessage());
e.printStackTrace(System.err);
@@ -283,7 +283,7 @@ public static void main(String[] args) {
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
if (node != null) {
unloadServices();
- node.shutdown().whenComplete((v, t) -> {
+ node.stop().whenComplete((v, t) -> {
synchronized(shutdown) {
shutdown.notifyAll();
}
diff --git a/cmds/src/main/java/io/bosonnetwork/shell/AnnouncePeerCommand.java b/cmds/src/main/java/io/bosonnetwork/shell/AnnouncePeerCommand.java
index 2489c3f..b82439c 100644
--- a/cmds/src/main/java/io/bosonnetwork/shell/AnnouncePeerCommand.java
+++ b/cmds/src/main/java/io/bosonnetwork/shell/AnnouncePeerCommand.java
@@ -33,7 +33,7 @@
import io.bosonnetwork.PeerInfo;
import io.bosonnetwork.crypto.Signature;
import io.bosonnetwork.utils.Hex;
-import io.bosonnetwork.utils.vertx.VertxFuture;
+import io.bosonnetwork.vertx.VertxFuture;
/**
* @hidden
diff --git a/cmds/src/main/java/io/bosonnetwork/shell/Main.java b/cmds/src/main/java/io/bosonnetwork/shell/Main.java
index 7a629c8..bd1c288 100644
--- a/cmds/src/main/java/io/bosonnetwork/shell/Main.java
+++ b/cmds/src/main/java/io/bosonnetwork/shell/Main.java
@@ -290,7 +290,7 @@ public void disconnected(Network network) {
}
});
- bosonNode.run().thenRun(() -> System.out.println("Boson node started.")).get();
+ bosonNode.start().thenRun(() -> System.out.println("Boson node started.")).get();
}
static KadNode getBosonNode() {
diff --git a/cmds/src/main/java/io/bosonnetwork/shell/RoutingTableCommand.java b/cmds/src/main/java/io/bosonnetwork/shell/RoutingTableCommand.java
index 4267b5a..d4d788d 100644
--- a/cmds/src/main/java/io/bosonnetwork/shell/RoutingTableCommand.java
+++ b/cmds/src/main/java/io/bosonnetwork/shell/RoutingTableCommand.java
@@ -29,7 +29,7 @@
import io.bosonnetwork.Network;
import io.bosonnetwork.kademlia.impl.DHT;
-import io.bosonnetwork.utils.vertx.VertxFuture;
+import io.bosonnetwork.vertx.VertxFuture;
/**
* @hidden
diff --git a/cmds/src/main/java/io/bosonnetwork/shell/StopCommand.java b/cmds/src/main/java/io/bosonnetwork/shell/StopCommand.java
index 1e991a8..f020ab9 100644
--- a/cmds/src/main/java/io/bosonnetwork/shell/StopCommand.java
+++ b/cmds/src/main/java/io/bosonnetwork/shell/StopCommand.java
@@ -35,7 +35,7 @@
public class StopCommand implements Callable {
@Override
public Integer call() throws Exception {
- Main.getBosonNode().shutdown().thenRun(() -> System.out.println("Boson node stopped.")).get();
+ Main.getBosonNode().stop().thenRun(() -> System.out.println("Boson node stopped.")).get();
return 0;
}
}
\ No newline at end of file
diff --git a/cmds/src/main/java/io/bosonnetwork/shell/StorageCommand.java b/cmds/src/main/java/io/bosonnetwork/shell/StorageCommand.java
index de09930..657c309 100644
--- a/cmds/src/main/java/io/bosonnetwork/shell/StorageCommand.java
+++ b/cmds/src/main/java/io/bosonnetwork/shell/StorageCommand.java
@@ -34,7 +34,7 @@
import io.bosonnetwork.shell.StorageCommand.ListValueCommand;
import io.bosonnetwork.shell.StorageCommand.PeerCommand;
import io.bosonnetwork.shell.StorageCommand.ValueCommand;
-import io.bosonnetwork.utils.vertx.VertxFuture;
+import io.bosonnetwork.vertx.VertxFuture;
/**
* @hidden
diff --git a/cmds/src/main/java/io/bosonnetwork/shell/StoreValueCommand.java b/cmds/src/main/java/io/bosonnetwork/shell/StoreValueCommand.java
index 03f539a..1258bb9 100644
--- a/cmds/src/main/java/io/bosonnetwork/shell/StoreValueCommand.java
+++ b/cmds/src/main/java/io/bosonnetwork/shell/StoreValueCommand.java
@@ -32,7 +32,7 @@
import io.bosonnetwork.Id;
import io.bosonnetwork.Node;
import io.bosonnetwork.Value;
-import io.bosonnetwork.utils.vertx.VertxFuture;
+import io.bosonnetwork.vertx.VertxFuture;
/**
* @hidden
diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/KadNode.java b/dht/src/main/java/io/bosonnetwork/kademlia/KadNode.java
index 432f458..1662bc4 100644
--- a/dht/src/main/java/io/bosonnetwork/kademlia/KadNode.java
+++ b/dht/src/main/java/io/bosonnetwork/kademlia/KadNode.java
@@ -17,7 +17,6 @@
import io.vertx.core.Context;
import io.vertx.core.Future;
import io.vertx.core.Promise;
-import io.vertx.core.VerticleBase;
import io.vertx.core.Vertx;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -46,10 +45,11 @@
import io.bosonnetwork.kademlia.storage.DataStorage;
import io.bosonnetwork.utils.Base58;
import io.bosonnetwork.utils.Variable;
-import io.bosonnetwork.utils.vertx.VertxCaffeine;
-import io.bosonnetwork.utils.vertx.VertxFuture;
+import io.bosonnetwork.vertx.BosonVerticle;
+import io.bosonnetwork.vertx.VertxCaffeine;
+import io.bosonnetwork.vertx.VertxFuture;
-public class KadNode extends VerticleBase implements Node {
+public class KadNode extends BosonVerticle implements Node {
public static final String NAME = "Orca";
public static final String SHORT_NAME = "OR";
public static final int VERSION_NUMBER = 1;
@@ -200,7 +200,7 @@ else if (current instanceof ListenerArray listeners)
}
@Override
- public synchronized VertxFuture run() {
+ public synchronized VertxFuture start() {
if (this.vertx != null)
throw new IllegalStateException("Already started");
@@ -209,13 +209,13 @@ public synchronized VertxFuture run() {
}
@Override
- public VertxFuture shutdown() {
+ public VertxFuture stop() {
if (!isRunning())
throw new IllegalStateException("Not started");
Promise promise = Promise.promise();
- context.runOnContext(v -> {
- String deploymentId = context != null ? context.deploymentID() : null;
+ runOnContext(v -> {
+ String deploymentId = vertxContext != null ? vertxContext.deploymentID() : null;
if (deploymentId == null)
promise.fail(new IllegalStateException("Not started"));
@@ -226,14 +226,14 @@ public VertxFuture shutdown() {
}
@Override
- public void init(Vertx vertx, Context context) {
- super.init(vertx, context);
+ public void prepare(Vertx vertx, Context context) {
+ super.prepare(vertx, context);
identity.initCache(VertxCaffeine.newBuilder(vertx)
.expireAfterAccess(KBucketEntry.OLD_AND_STALE_TIME, TimeUnit.MILLISECONDS));
}
@Override
- public Future start() {
+ public Future deploy() {
tokenManager = new TokenManager();
storage = DataStorage.create(config.storageURL());
@@ -244,25 +244,25 @@ public Future start() {
@Override
public void statusChanged(Network network, ConnectionStatus newStatus, ConnectionStatus oldStatus) {
if (connectionStatusListener != null)
- context.runOnContext(unused -> connectionStatusListener.statusChanged(network, newStatus, oldStatus));
+ runOnContext(unused -> connectionStatusListener.statusChanged(network, newStatus, oldStatus));
}
@Override
public void connecting(Network network) {
if (connectionStatusListener != null)
- context.runOnContext(unused -> connectionStatusListener.connecting(network));
+ runOnContext(unused -> connectionStatusListener.connecting(network));
}
@Override
public void connected(Network network) {
if (connectionStatusListener != null)
- context.runOnContext(unused -> connectionStatusListener.connected(network));
+ runOnContext(unused -> connectionStatusListener.connected(network));
}
@Override
public void disconnected(Network network) {
if (connectionStatusListener != null)
- context.runOnContext(unused -> connectionStatusListener.disconnected(network));
+ runOnContext(unused -> connectionStatusListener.disconnected(network));
}
};
@@ -320,14 +320,14 @@ public void disconnected(Network network) {
running = true;
log.info("Kademlia node started.");
} else {
- stop();
+ undeploy();
log.error("Failed to start Kademlia node.", ar.cause());
}
}).mapEmpty();
}
@Override
- public Future stop() {
+ public Future undeploy() {
running = false;
return Future.succeededFuture().andThen(ar -> {
@@ -383,7 +383,7 @@ public VertxFuture bootstrap(Collection bootstrapNodes) {
Promise promise = Promise.promise();
- context.runOnContext(v -> {
+ runOnContext(v -> {
if (dht4 == null || dht6 == null) {
DHT dht = dht4 != null ? dht4 : dht6;
dht.bootstrap(bootstrapNodes).onComplete(promise);
@@ -411,7 +411,7 @@ public VertxFuture> findNode(Id id, LookupOption option) {
final LookupOption lookupOption = option == null ? defaultLookupOption : option;
Promise> promise = Promise.promise();
- context.runOnContext(v -> doFindNode(id, lookupOption).onComplete(promise));
+ runOnContext(v -> doFindNode(id, lookupOption).onComplete(promise));
return VertxFuture.of(promise.future());
}
@@ -449,7 +449,7 @@ public VertxFuture findValue(Id id, int expectedSequenceNumber, LookupOpt
final LookupOption lookupOption = option == null ? defaultLookupOption : option;
Promise promise = Promise.promise();
- context.runOnContext(v -> {
+ runOnContext(v -> {
Variable localValue = Variable.empty();
storage.getValue(id).map(local -> {
@@ -534,7 +534,7 @@ public VertxFuture storeValue(Value value, int expectedSequenceNumber, boo
Promise promise = Promise.promise();
- context.runOnContext(na ->
+ runOnContext(na ->
storage.putValue(value, persistent, expectedSequenceNumber).compose(v ->
doStoreValue(value, expectedSequenceNumber)
).compose(v ->
@@ -565,7 +565,7 @@ public VertxFuture> findPeer(Id id, int expected, LookupOption op
final LookupOption lookupOption = option == null ? defaultLookupOption : option;
Promise> promise = Promise.promise();
- context.runOnContext(v -> {
+ runOnContext(v -> {
Variable> localPeers = Variable.empty();
storage.getPeers(id).compose(local -> {
@@ -645,7 +645,7 @@ public VertxFuture announcePeer(PeerInfo peer, boolean persistent) {
Promise promise = Promise.promise();
- context.runOnContext(na ->
+ runOnContext(na ->
storage.putPeer(peer, persistent).compose(v ->
doAnnouncePeer(peer)
).compose(v ->
@@ -677,7 +677,7 @@ public Future execute(Callable action) {
checkRunning();
Promise promise = Promise.promise();
- context.runOnContext(v -> {
+ runOnContext(v -> {
try {
T result = action.call();
promise.complete(result);
diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/impl/DHT.java b/dht/src/main/java/io/bosonnetwork/kademlia/impl/DHT.java
index f0960e0..4e4d74e 100644
--- a/dht/src/main/java/io/bosonnetwork/kademlia/impl/DHT.java
+++ b/dht/src/main/java/io/bosonnetwork/kademlia/impl/DHT.java
@@ -17,7 +17,6 @@
import io.vertx.core.Context;
import io.vertx.core.Future;
import io.vertx.core.Promise;
-import io.vertx.core.VerticleBase;
import io.vertx.core.Vertx;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -67,8 +66,9 @@
import io.bosonnetwork.kademlia.tasks.ValueAnnounceTask;
import io.bosonnetwork.kademlia.tasks.ValueLookupTask;
import io.bosonnetwork.utils.AddressUtils;
+import io.bosonnetwork.vertx.BosonVerticle;
-public class DHT extends VerticleBase {
+public class DHT extends BosonVerticle {
public static final int BOOTSTRAP_MIN_INTERVAL = 4 * 60 * 1000; // 4 minutes
public static final int SELF_LOOKUP_INTERVAL = 30 * 60 * 1000; // 30 minutes
public static final int ROUTING_TABLE_PERSIST_INTERVAL = 10 * 60 * 1000; // 10 minutes
@@ -202,13 +202,13 @@ public void setConnectionStatusListener(ConnectionStatusListener listener) {
}
@Override
- public void init(Vertx vertx, Context context) {
- super.init(vertx, context);
+ public void prepare(Vertx vertx, Context context) {
+ super.prepare(vertx, context);
this.kadContext = new KadContext(vertx, context, identity, network, this, enableDeveloperMode);
}
@Override
- public Future start() {
+ public Future deploy() {
if (running)
return Future.succeededFuture();
@@ -280,7 +280,7 @@ public Future start() {
}
@Override
- public Future stop() {
+ public Future undeploy() {
if (!running)
return Future.succeededFuture();
@@ -419,7 +419,7 @@ public Future bootstrap(Collection nodes) {
Promise promise = Promise.promise();
- kadContext.runOnContext(v -> {
+ runOnContext(v -> {
addBootstrapNodes(nodes);
if (bootstrapping) {
promise.fail(new IllegalStateException("DHT is bootstrapping"));
@@ -442,7 +442,7 @@ public Future bootstrap() {
Promise promise = Promise.promise();
- kadContext.runOnContext(v -> {
+ runOnContext(v -> {
if (bootstrapping) {
promise.fail(new IllegalStateException("DHT is bootstrapping"));
return;
@@ -991,7 +991,7 @@ private Result> populateClosestNodes(Id target, int v4,
public Future findNode(Id id, LookupOption option) {
Promise promise = Promise.promise();
- kadContext.runOnContext(() -> {
+ runOnContext(v -> {
NodeInfo node = routingTable.getEntry(id, true);
if (option == LookupOption.LOCAL)
promise.complete(node);
@@ -1021,7 +1021,7 @@ public Future findNode(Id id, LookupOption option) {
public Future findValue(Id id, int expectedSequenceNumber, LookupOption option) {
Promise promise = Promise.promise();
- kadContext.runOnContext(() -> {
+ runOnContext(v -> {
ValueLookupTask task = new ValueLookupTask(kadContext, id, expectedSequenceNumber)
.setName("Lookup value: " + id)
.setResultFilter((previous, next) -> {
@@ -1052,7 +1052,7 @@ public Future findValue(Id id, int expectedSequenceNumber, LookupOption o
public Future storeValue(Value value, int expectedSequenceNumber) {
Promise promise = Promise.promise();
- kadContext.runOnContext(() -> {
+ runOnContext(v -> {
ValueAnnounceTask announceTask = new ValueAnnounceTask(kadContext, value, expectedSequenceNumber)
.setName("Store value: " + value.getId())
.addListener(t -> promise.complete());
@@ -1087,7 +1087,7 @@ public Future storeValue(Value value, int expectedSequenceNumber) {
public Future> findPeer(Id id, int expected, LookupOption option) {
Promise> promise = Promise.promise();
- kadContext.runOnContext(() -> {
+ runOnContext(v -> {
PeerLookupTask task = new PeerLookupTask(kadContext, id)
.setName("Lookup peer: " + id)
.setResultFilter((previous, next) -> {
@@ -1108,7 +1108,7 @@ public Future> findPeer(Id id, int expected, LookupOption option)
public Future announcePeer(PeerInfo peer) {
Promise promise = Promise.promise();
- kadContext.runOnContext(() -> {
+ runOnContext(v -> {
PeerAnnounceTask announceTask = new PeerAnnounceTask(kadContext, peer)
.setName("Announce peer: " + peer.getId())
.addListener(t -> promise.complete());
@@ -1141,7 +1141,7 @@ public Future announcePeer(PeerInfo peer) {
public Future dumpRoutingTable(PrintStream out) {
Promise promise = Promise.promise();
- kadContext.runOnContext(v -> {
+ runOnContext(v -> {
routingTable.dump(out);
promise.complete();
});
diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/impl/SimpleNodeConfiguration.java b/dht/src/main/java/io/bosonnetwork/kademlia/impl/SimpleNodeConfiguration.java
index ee9e863..8a2e234 100644
--- a/dht/src/main/java/io/bosonnetwork/kademlia/impl/SimpleNodeConfiguration.java
+++ b/dht/src/main/java/io/bosonnetwork/kademlia/impl/SimpleNodeConfiguration.java
@@ -13,7 +13,7 @@
import io.bosonnetwork.NodeConfiguration;
import io.bosonnetwork.NodeInfo;
-import io.bosonnetwork.kademlia.storage.SQLiteStorage;
+import io.bosonnetwork.kademlia.storage.InMemoryStorage;
public class SimpleNodeConfiguration implements NodeConfiguration {
private final Vertx vertx;
@@ -35,7 +35,7 @@ public SimpleNodeConfiguration(NodeConfiguration config) {
this.port = config.port();
this.privateKey = config.privateKey();
this.dataPath = config.dataPath();
- this.storageURL = config.storageURL() != null ? config.storageURL() : SQLiteStorage.IN_MEMORY_STORAGE_URL;
+ this.storageURL = config.storageURL() != null ? config.storageURL() : InMemoryStorage.STORAGE_URL;
this.bootstrapNodes = new ArrayList<>(config.bootstrapNodes() != null ? config.bootstrapNodes() : Collections.emptyList());
this.enableSpamThrottling = config.enableSpamThrottling();
this.enableSuspiciousNodeDetector = config.enableSuspiciousNodeDetector();
diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/storage/DataStorage.java b/dht/src/main/java/io/bosonnetwork/kademlia/storage/DataStorage.java
index 4fe0d83..3456bc6 100644
--- a/dht/src/main/java/io/bosonnetwork/kademlia/storage/DataStorage.java
+++ b/dht/src/main/java/io/bosonnetwork/kademlia/storage/DataStorage.java
@@ -268,17 +268,18 @@ public interface DataStorage {
static boolean supports(String url) {
// now only support inmemory, sqlite and postgres
- return url.equals("inmemory") || url.startsWith("jdbc:sqlite:") || url.startsWith("postgresql://");
+ return url.equals(InMemoryStorage.STORAGE_URL) || url.startsWith(SQLiteStorage.STORAGE_URL_PREFIX) ||
+ url.startsWith(PostgresStorage.STORAGE_URL_PREFIX);
}
static DataStorage create(String url) {
Objects.requireNonNull(url, "url");
- if (url.equals("inmemory"))
+ if (url.equals(InMemoryStorage.STORAGE_URL))
return new InMemoryStorage();
- if (url.startsWith("jdbc:sqlite:"))
+ if (url.startsWith(SQLiteStorage.STORAGE_URL_PREFIX))
return new SQLiteStorage(url);
- if (url.startsWith("postgresql://"))
+ if (url.startsWith(PostgresStorage.STORAGE_URL_PREFIX))
return new PostgresStorage(url);
throw new IllegalArgumentException("Unsupported storage: " + url);
diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/storage/DatabaseStorage.java b/dht/src/main/java/io/bosonnetwork/kademlia/storage/DatabaseStorage.java
index 3fd83ca..9c65642 100644
--- a/dht/src/main/java/io/bosonnetwork/kademlia/storage/DatabaseStorage.java
+++ b/dht/src/main/java/io/bosonnetwork/kademlia/storage/DatabaseStorage.java
@@ -69,7 +69,7 @@ protected Future executeSequentially(SqlConnection connection, List executeSequentially(connection, statements, index + 1)) // Move to next statement
diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/storage/InMemoryStorage.java b/dht/src/main/java/io/bosonnetwork/kademlia/storage/InMemoryStorage.java
index 5351622..08744d2 100644
--- a/dht/src/main/java/io/bosonnetwork/kademlia/storage/InMemoryStorage.java
+++ b/dht/src/main/java/io/bosonnetwork/kademlia/storage/InMemoryStorage.java
@@ -42,6 +42,8 @@
import io.bosonnetwork.kademlia.exceptions.SequenceNotMonotonic;
public class InMemoryStorage implements DataStorage {
+ public static final String STORAGE_URL = "inmemory";
+
private static final int SCHEMA_VERSION = 5;
private static final int DEFAULT_MAP_CAPACITY = 32;
diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/storage/PostgresStorage.java b/dht/src/main/java/io/bosonnetwork/kademlia/storage/PostgresStorage.java
index 8eac3fe..f994c4c 100644
--- a/dht/src/main/java/io/bosonnetwork/kademlia/storage/PostgresStorage.java
+++ b/dht/src/main/java/io/bosonnetwork/kademlia/storage/PostgresStorage.java
@@ -30,6 +30,8 @@
import io.vertx.sqlclient.PoolOptions;
public class PostgresStorage extends DatabaseStorage implements DataStorage {
+ protected static final String STORAGE_URL_PREFIX = "postgresql://";
+
private static final List SCHEMA = List.of(
// Schema version
"""
diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/storage/SQLiteStorage.java b/dht/src/main/java/io/bosonnetwork/kademlia/storage/SQLiteStorage.java
index ac7a906..21427d3 100644
--- a/dht/src/main/java/io/bosonnetwork/kademlia/storage/SQLiteStorage.java
+++ b/dht/src/main/java/io/bosonnetwork/kademlia/storage/SQLiteStorage.java
@@ -25,12 +25,11 @@
import java.util.List;
import io.vertx.core.Vertx;
-import io.vertx.jdbcclient.JDBCConnectOptions;
import io.vertx.jdbcclient.JDBCPool;
-import io.vertx.sqlclient.PoolOptions;
+import org.sqlite.SQLiteDataSource;
public class SQLiteStorage extends DatabaseStorage implements DataStorage {
- public static final String IN_MEMORY_STORAGE_URL = "jdbc:sqlite:file:node?mode=memory";
+ protected static final String STORAGE_URL_PREFIX = "jdbc:sqlite:";
private static final List SCHEMA = List.of(
// Schema version
@@ -94,11 +93,19 @@ protected SQLiteStorage(String connectionUri) {
@Override
protected void setupSqlClient(Vertx vertx, String connectionUri) {
+ /*/
+ // Vert.x 5.x style
JDBCConnectOptions connectOptions = new JDBCConnectOptions()
.setJdbcUrl(connectionUri);
// Single connection recommended for SQLite
PoolOptions poolOptions = new PoolOptions().setMaxSize(1);
client = JDBCPool.pool(vertx, connectOptions, poolOptions);
+ */
+
+ // Vert.x 4.x style
+ SQLiteDataSource ds = new SQLiteDataSource();
+ ds.setUrl(connectionUri);
+ client = JDBCPool.pool(vertx, ds);
}
@Override
diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/tasks/NodeLookupTask.java b/dht/src/main/java/io/bosonnetwork/kademlia/tasks/NodeLookupTask.java
index 4540d1f..9b0ddd1 100644
--- a/dht/src/main/java/io/bosonnetwork/kademlia/tasks/NodeLookupTask.java
+++ b/dht/src/main/java/io/bosonnetwork/kademlia/tasks/NodeLookupTask.java
@@ -147,7 +147,7 @@ protected void iterate() {
CandidateNode cn = getNextCandidate();
if (cn == null) {
// no eligible candidates right now, check in the next iteration
- log.warn("{}#{} no eligible candidates in non-empty queue", getName(), getId());
+ log.debug("{}#{} no eligible candidates in non-empty queue", getName(), getId());
break;
}
diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/tasks/PeerLookupTask.java b/dht/src/main/java/io/bosonnetwork/kademlia/tasks/PeerLookupTask.java
index d2e5902..91c640c 100644
--- a/dht/src/main/java/io/bosonnetwork/kademlia/tasks/PeerLookupTask.java
+++ b/dht/src/main/java/io/bosonnetwork/kademlia/tasks/PeerLookupTask.java
@@ -91,7 +91,7 @@ protected void iterate() {
CandidateNode cn = getNextCandidate();
if (cn == null) {
// no eligible candidates right now, check in the next iteration
- log.warn("{}#{} no eligible candidates in non-empty queue", getName(), getId());
+ log.debug("{}#{} no eligible candidates in non-empty queue", getName(), getId());
break;
}
diff --git a/dht/src/main/java/io/bosonnetwork/kademlia/tasks/ValueLookupTask.java b/dht/src/main/java/io/bosonnetwork/kademlia/tasks/ValueLookupTask.java
index c051dfc..ae37838 100644
--- a/dht/src/main/java/io/bosonnetwork/kademlia/tasks/ValueLookupTask.java
+++ b/dht/src/main/java/io/bosonnetwork/kademlia/tasks/ValueLookupTask.java
@@ -91,7 +91,7 @@ protected void iterate() {
CandidateNode cn = getNextCandidate();
if (cn == null) {
// no eligible candidates right now, check in the next iteration
- log.warn("{}#{} no eligible candidates in non-empty queue", getName(), getId());
+ log.debug("{}#{} no eligible candidates in non-empty queue", getName(), getId());
break;
}
diff --git a/dht/src/test/java/io/bosonnetwork/kademlia/NodeAsyncTests.java b/dht/src/test/java/io/bosonnetwork/kademlia/NodeAsyncTests.java
index 65feed6..00b1238 100644
--- a/dht/src/test/java/io/bosonnetwork/kademlia/NodeAsyncTests.java
+++ b/dht/src/test/java/io/bosonnetwork/kademlia/NodeAsyncTests.java
@@ -45,18 +45,19 @@
import io.bosonnetwork.crypto.Signature.KeyPair;
import io.bosonnetwork.utils.AddressUtils;
import io.bosonnetwork.utils.FileUtils;
-import io.bosonnetwork.utils.vertx.VertxFuture;
+import io.bosonnetwork.vertx.VertxFuture;
@ExtendWith(VertxExtension.class)
+@Timeout(value = NodeAsyncTests.TEST_NODES + 1, timeUnit = TimeUnit.MINUTES)
public class NodeAsyncTests {
- private static Vertx vertx;
- private static final int TEST_NODES = 32;
+ static final int TEST_NODES = 32;
private static final int TEST_NODES_PORT_START = 39001;
private static final Path testDir = Path.of(System.getProperty("java.io.tmpdir"), "boson", "NodeAsyncTests");
private static InetAddress localAddr;
+ private static Vertx vertx;
private static KadNode bootstrap;
private static final List testNodes = new ArrayList<>(TEST_NODES);
@@ -73,12 +74,12 @@ private static VertxFuture startBootstrap() {
.build();
bootstrap = new KadNode(config);
- return bootstrap.run();
+ return bootstrap.start();
}
private static VertxFuture stopBootstrap() {
System.out.println("\n\n\007🟢 Stopping the bootstrap nodes ...\n");
- return bootstrap.shutdown();
+ return bootstrap.stop();
}
private static VertxFuture executeSequentially(int max, int index, Function> action) {
@@ -128,7 +129,7 @@ public void connected(Network network) {
}
});
- node.run();
+ node.start();
return VertxFuture.of(promise.future());
}
@@ -145,7 +146,7 @@ private static VertxFuture startTestNodes() {
private static VertxFuture stopTestNodes() {
System.out.println("\n\n\007🟢 Stopping all the nodes ...\n");
// cannot stop all the nodes in parallel, it will cause vertx internal error.
- return executeSequentially(testNodes, 0, KadNode::shutdown);
+ return executeSequentially(testNodes, 0, KadNode::stop);
}
private static VertxFuture dumpRoutingTable(String name, KadNode node) {
@@ -180,6 +181,9 @@ private static VertxFuture dumpRoutingTables() {
return VertxFuture.of(Future.all(futures).mapEmpty());
}
+ // in Vert.x 4.5.x, not support asynchronous lifecycle on static @BeforeAll and @AfterAll methods.
+ // So we use synchronous method to setup and teardown to make it compatible with Vert.x 4.5.x and 5.0.x
+
@BeforeAll
@Timeout(value = TEST_NODES + 1, timeUnit = TimeUnit.MINUTES)
static void setup(VertxTestContext context) throws Exception {
@@ -206,12 +210,11 @@ static void setup(VertxTestContext context) throws Exception {
.setBlockedThreadCheckIntervalUnit(TimeUnit.SECONDS)
.setBlockedThreadCheckInterval(120));
- var future = startBootstrap().thenCompose(v -> startTestNodes());
-
- future.toVertxFuture().onComplete(context.succeeding(v -> {
- System.out.println("\n\n\007🟢 All the nodes are ready!!! starting to run the test cases");
- context.completeNow();
- }));
+ startBootstrap().thenCompose(v -> startTestNodes()).toVertxFuture()
+ .onComplete(context.succeeding(v -> {
+ System.out.println("\n\n\007🟢 All the nodes are ready!!! starting to run the test cases");
+ context.completeNow();
+ }));
}
@AfterAll
@@ -221,13 +224,12 @@ static void teardown(VertxTestContext context) throws Exception {
}).thenCompose(v -> {
return stopBootstrap();
}).thenRun(() -> {
- /*/
try {
FileUtils.deleteFile(testDir);
} catch (Exception e) {
- context.failNow(e);
+ fail(e);
}
- */
+
System.out.format("\n\n\007🟢 Test cases finished\n");
}).toVertxFuture().onComplete(context.succeedingThenComplete());
}
@@ -246,9 +248,9 @@ void testNodeWithPresetKey(VertxTestContext context) {
.build();
var node = new KadNode(config);
- node.run()
+ node.start()
.thenRun(() -> context.verify(() -> assertEquals(nodeId, node.getId())))
- .thenCompose(v -> node.shutdown())
+ .thenCompose(v -> node.stop())
.toVertxFuture().onComplete(context.succeedingThenComplete());
}
diff --git a/dht/src/test/java/io/bosonnetwork/kademlia/NodeSyncTests.java b/dht/src/test/java/io/bosonnetwork/kademlia/NodeSyncTests.java
index 6e2418e..190967c 100644
--- a/dht/src/test/java/io/bosonnetwork/kademlia/NodeSyncTests.java
+++ b/dht/src/test/java/io/bosonnetwork/kademlia/NodeSyncTests.java
@@ -37,7 +37,7 @@
import io.bosonnetwork.crypto.Signature.KeyPair;
import io.bosonnetwork.utils.AddressUtils;
import io.bosonnetwork.utils.FileUtils;
-import io.bosonnetwork.utils.vertx.VertxFuture;
+import io.bosonnetwork.vertx.VertxFuture;
public class NodeSyncTests {
private static Vertx vertx;
@@ -64,12 +64,12 @@ private static void startBootstrap() throws Exception {
.build();
bootstrap = new KadNode(config);
- bootstrap.run().get();
+ bootstrap.start().get();
}
private static void stopBootstrap() throws Exception {
System.out.println("\n\n\007🟢 Stopping the bootstrap node ...\n");
- bootstrap.shutdown().get();
+ bootstrap.stop().get();
}
private static void startTestNodes() throws Exception {
@@ -94,7 +94,7 @@ public void connected(Network network) {
future.complete(null);
}
});
- node.run().get();
+ node.start().get();
testNodes.add(node);
System.out.printf("\n\n\007⌛ Wainting for the test node %d - %s ready ...\n", i, node.getId());
@@ -110,7 +110,7 @@ private static void stopTestNodes() throws Exception {
System.out.println("\n\n\007🟢 Stopping all the nodes ...\n");
for (var node : testNodes)
- node.shutdown().get();
+ node.stop().get();
}
private static void dumpRoutingTables() throws Exception {
@@ -172,7 +172,7 @@ static void teardown() throws Exception {
VertxFuture.of(vertx.close()).get();
- //FileUtils.deleteFile(testDir);
+ FileUtils.deleteFile(testDir);
}
@Test
@@ -189,11 +189,11 @@ void testNodeWithPresetKey() throws Exception {
.build();
var node = new KadNode(config);
- node.run().get();
+ node.start().get();
assertEquals(nodeId, node.getId());
- node.shutdown().get();
+ node.stop().get();
}
@Test
diff --git a/dht/src/test/java/io/bosonnetwork/kademlia/SybilTests.java b/dht/src/test/java/io/bosonnetwork/kademlia/SybilTests.java
index bf13a9d..a2ad597 100644
--- a/dht/src/test/java/io/bosonnetwork/kademlia/SybilTests.java
+++ b/dht/src/test/java/io/bosonnetwork/kademlia/SybilTests.java
@@ -53,7 +53,7 @@
import io.bosonnetwork.utils.AddressUtils;
import io.bosonnetwork.utils.Base58;
import io.bosonnetwork.utils.FileUtils;
-import io.bosonnetwork.utils.vertx.VertxFuture;
+import io.bosonnetwork.vertx.VertxFuture;
public class SybilTests {
private static final Path testDir = Path.of(System.getProperty("java.io.tmpdir"), "boson", "SybilTests");
@@ -88,14 +88,14 @@ void setUp() throws Exception {
.storageURL("jdbc:sqlite:" + testDir.resolve("nodes" + File.separator + "node-target" + File.separator + "storage.db"))
.enableDeveloperMode()
.build());
- target.run().get();
+ target.start().get();
targetInfo = target.getNodeInfo().getV4();
}
@AfterEach
void tearDown() throws Exception {
- target.shutdown().get();
+ target.stop().get();
VertxFuture.of(vertx.close()).get();
@@ -121,7 +121,7 @@ void TestAddresses() throws Exception {
.build();
sybil = new KadNode(sybilConfig);
- sybil.run().get();
+ sybil.start().get();
Message request = Message.findNodeRequest(Id.random(), true, false);
RpcCall call = new RpcCall(targetInfo, request);
@@ -159,7 +159,7 @@ public void onTimeout(RpcCall c) {
else
assertFalse(result.get());
- sybil.shutdown().get();
+ sybil.stop().get();
TimeUnit.SECONDS.sleep(2);
}
@@ -184,7 +184,7 @@ void TestIds() throws Exception {
.build();
sybil = new KadNode(sybilConfig);
- sybil.run().get();
+ sybil.start().get();
Message request = Message.findNodeRequest(Id.random(), true, false);
RpcCall call = new RpcCall(targetInfo, request);
@@ -221,7 +221,7 @@ public void onTimeout(RpcCall c) {
else
assertFalse(result.get());
- sybil.shutdown().get();
+ sybil.stop().get();
TimeUnit.SECONDS.sleep(2);
}
diff --git a/dht/src/test/java/io/bosonnetwork/kademlia/rpc/RPCServerTests.java b/dht/src/test/java/io/bosonnetwork/kademlia/rpc/RPCServerTests.java
index 3816390..ec4c92f 100644
--- a/dht/src/test/java/io/bosonnetwork/kademlia/rpc/RPCServerTests.java
+++ b/dht/src/test/java/io/bosonnetwork/kademlia/rpc/RPCServerTests.java
@@ -39,7 +39,6 @@
import io.vertx.core.Context;
import io.vertx.core.Future;
import io.vertx.core.Promise;
-import io.vertx.core.VerticleBase;
import io.vertx.core.Vertx;
import net.datafaker.Faker;
@@ -66,6 +65,7 @@
import io.bosonnetwork.kademlia.security.Blacklist;
import io.bosonnetwork.kademlia.security.SuspiciousNodeDetector;
import io.bosonnetwork.utils.AddressUtils;
+import io.bosonnetwork.vertx.BosonVerticle;
@ExtendWith(VertxExtension.class)
public class RPCServerTests {
@@ -84,7 +84,7 @@ public class RPCServerTests {
private final static Map values = new HashMap<>();
private final static Map> peers = new HashMap<>();
- static class TestNode extends VerticleBase {
+ static class TestNode extends BosonVerticle {
final Identity identity;
final String host;
final int port;
@@ -129,8 +129,8 @@ public Network getNetwork() {
}
@Override
- public void init(Vertx vertx, Context context) {
- super.init(vertx, context);
+ public void prepare(Vertx vertx, Context context) {
+ super.prepare(vertx, context);
kadContext = new KadContext(vertx, context, identity, getNetwork(), null);
rpcServer = new RpcServer(kadContext, host, port, Blacklist.empty(), SuspiciousNodeDetector.disabled(), true, null);
@@ -139,12 +139,12 @@ public void init(Vertx vertx, Context context) {
}
@Override
- public Future start() {
+ public Future deploy() {
return rpcServer.start();
}
@Override
- public Future stop() {
+ public Future undeploy() {
if (rpcServer != null)
return rpcServer.stop().andThen(ar -> rpcServer = null);
else
@@ -161,7 +161,7 @@ public void setSimulateAbnormal(boolean simulateAbnormal) {
protected void sendCall(RpcCall call) {
//noinspection CodeBlock2Expr
- context.runOnContext(v -> {
+ runOnContext(v -> {
rpcServer.sendCall(call).andThen(ar -> {
if (ar.succeeded()) {
sentMessages++;
@@ -177,7 +177,7 @@ protected void sendCall(RpcCall call) {
protected void sendMessage(Message> message) {
//noinspection CodeBlock2Expr
- context.runOnContext(v -> {
+ runOnContext(v -> {
rpcServer.sendMessage(message).andThen(ar -> {
if (ar.succeeded()) {
sentMessages++;
diff --git a/dht/src/test/java/io/bosonnetwork/kademlia/storage/DataStorageTests.java b/dht/src/test/java/io/bosonnetwork/kademlia/storage/DataStorageTests.java
index fb6136a..938d320 100644
--- a/dht/src/test/java/io/bosonnetwork/kademlia/storage/DataStorageTests.java
+++ b/dht/src/test/java/io/bosonnetwork/kademlia/storage/DataStorageTests.java
@@ -95,8 +95,8 @@ static void setupDataStorage(Vertx vertx, VertxTestContext context) {
}));
futures.add(future1);
- var connectionURL = "jdbc:sqlite:" + testDir.resolve("storage.db");
- sqliteStorage = new SQLiteStorage(connectionURL);
+ var sqliteURL = "jdbc:sqlite:" + testDir.resolve("storage.db");
+ sqliteStorage = new SQLiteStorage(sqliteURL);
var future2 = sqliteStorage.initialize(vertx, valueExpiration, peerInfoExpiration).onComplete(context.succeeding(version -> {
context.verify(() -> assertEquals(CURRENT_SCHEMA_VERSION, version));
dataStorages.add(Arguments.of("SQLiteStorage", sqliteStorage));
@@ -104,8 +104,8 @@ static void setupDataStorage(Vertx vertx, VertxTestContext context) {
futures.add(future2);
/*
- connectionURL = "postgresql://jingyu@localhost:5432/test";
- postgresStorage = new PostgresStorage(connectionURL);
+ var postgresqlURL = "postgresql://jingyu@localhost:5432/test";
+ postgresStorage = new PostgresStorage(postgresqlURL);
var future3 = postgresStorage.initialize(vertx, valueExpiration, peerInfoExpiration).onComplete(context.succeeding(version -> {
context.verify(() -> assertEquals(CURRENT_SCHEMA_VERSION, version));
dataStorages.add(Arguments.of("PostgresStorage", postgresStorage));
From 844589a39b2fd3553892ca99331f5f8cef118e37 Mon Sep 17 00:00:00 2001
From: Jingyu
Date: Mon, 10 Nov 2025 23:38:58 +0800
Subject: [PATCH 3/4] Add abstraction layer for Vert.x SqlClient and basic
schema versioning/migration support
---
api/pom.xml | 371 +++++------
.../io/bosonnetwork/vertx/BosonVerticle.java | 31 +-
.../bosonnetwork/vertx/VersionedSchema.java | 584 ++++++++++++++++++
.../io/bosonnetwork/vertx/VertxCaffeine.java | 13 +-
.../io/bosonnetwork/vertx/VertxDatabase.java | 252 ++++++++
.../vertx/VersionedSchemaTests.java | 90 +++
.../resources/db/postgres/10_add_trigger.sql | 23 +
.../resources/db/postgres/1_init_schema.sql | 15 +
.../resources/db/postgres/2_add_index.sql | 3 +
.../db/postgres/3_insert_sample_data.sql | 12 +
.../db/postgres/4_add_profile_table.sql | 6 +
.../db/postgres/5_add_last_login_column.sql | 2 +
.../db/postgres/6_normalize_email_case.sql | 4 +
.../db/postgres/7_add_message_likes_table.sql | 8 +
.../8_add_view_user_messages_view.sql | 12 +
.../db/postgres/9_add_audit_log_table.sql | 6 +
.../resources/db/sqlite/10_add_trigger.sql | 7 +
.../resources/db/sqlite/1_init_schema.sql | 15 +
.../test/resources/db/sqlite/2_add_index.sql | 3 +
.../db/sqlite/3_insert_sample_data.sql | 12 +
.../db/sqlite/4_add_profile_table.sql | 6 +
.../db/sqlite/5_add_last_login_column.sql | 2 +
.../db/sqlite/6_normalize_email_case.sql | 4 +
.../db/sqlite/7_add_message_likes_table.sql | 8 +
.../sqlite/8_add_view_user_messages_view.sql | 10 +
.../db/sqlite/9_add_audit_log_table.sql | 6 +
26 files changed, 1314 insertions(+), 191 deletions(-)
create mode 100644 api/src/main/java/io/bosonnetwork/vertx/VersionedSchema.java
create mode 100644 api/src/main/java/io/bosonnetwork/vertx/VertxDatabase.java
create mode 100644 api/src/test/java/io/bosonnetwork/vertx/VersionedSchemaTests.java
create mode 100644 api/src/test/resources/db/postgres/10_add_trigger.sql
create mode 100644 api/src/test/resources/db/postgres/1_init_schema.sql
create mode 100644 api/src/test/resources/db/postgres/2_add_index.sql
create mode 100644 api/src/test/resources/db/postgres/3_insert_sample_data.sql
create mode 100644 api/src/test/resources/db/postgres/4_add_profile_table.sql
create mode 100644 api/src/test/resources/db/postgres/5_add_last_login_column.sql
create mode 100644 api/src/test/resources/db/postgres/6_normalize_email_case.sql
create mode 100644 api/src/test/resources/db/postgres/7_add_message_likes_table.sql
create mode 100644 api/src/test/resources/db/postgres/8_add_view_user_messages_view.sql
create mode 100644 api/src/test/resources/db/postgres/9_add_audit_log_table.sql
create mode 100644 api/src/test/resources/db/sqlite/10_add_trigger.sql
create mode 100644 api/src/test/resources/db/sqlite/1_init_schema.sql
create mode 100644 api/src/test/resources/db/sqlite/2_add_index.sql
create mode 100644 api/src/test/resources/db/sqlite/3_insert_sample_data.sql
create mode 100644 api/src/test/resources/db/sqlite/4_add_profile_table.sql
create mode 100644 api/src/test/resources/db/sqlite/5_add_last_login_column.sql
create mode 100644 api/src/test/resources/db/sqlite/6_normalize_email_case.sql
create mode 100644 api/src/test/resources/db/sqlite/7_add_message_likes_table.sql
create mode 100644 api/src/test/resources/db/sqlite/8_add_view_user_messages_view.sql
create mode 100644 api/src/test/resources/db/sqlite/9_add_audit_log_table.sql
diff --git a/api/pom.xml b/api/pom.xml
index ee717cd..e445c23 100644
--- a/api/pom.xml
+++ b/api/pom.xml
@@ -1,184 +1,199 @@
-
- 4.0.0
+
+ 4.0.0
+
+
+ io.bosonnetwork
+ boson-parent
+ 3-SNAPSHOT
+
+
-
io.bosonnetwork
- boson-parent
- 3-SNAPSHOT
-
-
-
- io.bosonnetwork
- boson-api
- 2.0.8-SNAPSHOT
- jar
-
- Boson API
-
- The public APIs for the Boson Kademlia DHT node.
-
- https://github.com/bosonnetwork/Boson.Core
-
-
-
- MIT License
- https://github.com/bosonnetwork/Boson.Core/blob/master/LICENSE
- repo
-
-
-
-
-
- boson-network-dev
- Boson Network
- support@bosonnetwork.io
- BosonNetwork
- https://github.com/bosonnetwork
-
- architect
- developer
-
-
- https://avatars.githubusercontent.com/u/152134507
-
-
-
-
-
- scm:git:git@github.com:bosonnetwork/Boson.Core.git
- scm:git:git@github.com:bosonnetwork/Boson.Core.git
- git@github.com:bosonnetwork/Boson.Core.git
-
-
-
+ boson-api
+ 2.0.8-SNAPSHOT
+ jar
+
+ Boson API
+
+ The public APIs for the Boson Kademlia DHT node.
+
+ https://github.com/bosonnetwork/Boson.Core
+
+
+
+ MIT License
+ https://github.com/bosonnetwork/Boson.Core/blob/master/LICENSE
+ repo
+
+
+
+
+
+ boson-network-dev
+ Boson Network
+ support@bosonnetwork.io
+ BosonNetwork
+ https://github.com/bosonnetwork
+
+ architect
+ developer
+
+
+ https://avatars.githubusercontent.com/u/152134507
+
+
+
+
+
+ scm:git:git@github.com:bosonnetwork/Boson.Core.git
+ scm:git:git@github.com:bosonnetwork/Boson.Core.git
+ git@github.com:bosonnetwork/Boson.Core.git
+
+
+
+
+
+ io.bosonnetwork
+ boson-dependencies
+ 2.0.8-SNAPSHOT
+ pom
+ import
+
+
+
+
-
- io.bosonnetwork
- boson-dependencies
- 2.0.8-SNAPSHOT
- pom
- import
-
+
+
+ com.fasterxml.jackson.core
+ jackson-core
+ true
+
+
+ com.fasterxml.jackson.core
+ jackson-databind
+ true
+
+
+ com.fasterxml.jackson.dataformat
+ jackson-dataformat-cbor
+ true
+
+
+ com.fasterxml.jackson.dataformat
+ jackson-dataformat-yaml
+ true
+
+
+
+ com.github.jnr
+ jnr-ffi
+
+
+ io.tmio
+ tuweni-crypto
+
+
+
+ io.vertx
+ vertx-core
+ true
+
+
+ io.vertx
+ vertx-sql-client
+ true
+
+
+
+ com.github.ben-manes.caffeine
+ caffeine
+ true
+
+
+
+ org.jdbi
+ jdbi3-core
+ true
+
+
+
+
+ org.slf4j
+ slf4j-api
+ provided
+
+
+ ch.qos.logback
+ logback-classic
+ provided
+
+
+
+
+ org.junit.jupiter
+ junit-jupiter
+ test
+
+
+
+ io.vertx
+ vertx-junit5
+ test
+
+
+ io.vertx
+ vertx-pg-client
+ test
+
+
+ io.vertx
+ vertx-jdbc-client
+ test
+
+
+
+ org.xerial
+ sqlite-jdbc
+ test
+
-
-
-
-
-
- com.fasterxml.jackson.core
- jackson-core
- true
-
-
- com.fasterxml.jackson.core
- jackson-databind
- true
-
-
- com.fasterxml.jackson.dataformat
- jackson-dataformat-cbor
- true
-
-
- com.fasterxml.jackson.dataformat
- jackson-dataformat-yaml
- true
-
-
-
- com.github.jnr
- jnr-ffi
-
-
- io.tmio
- tuweni-crypto
-
-
-
- io.vertx
- vertx-core
- true
-
-
-
- com.github.ben-manes.caffeine
- caffeine
- true
-
-
-
- org.jdbi
- jdbi3-core
- true
-
-
-
-
- org.slf4j
- slf4j-api
- provided
-
-
- ch.qos.logback
- logback-classic
- provided
-
-
-
-
- org.junit.jupiter
- junit-jupiter
- test
-
-
-
- io.vertx
- vertx-junit5
- test
-
-
-
- org.xerial
- sqlite-jdbc
- test
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-jar-plugin
-
-
-
- org.apache.maven.plugins
- maven-source-plugin
-
-
-
- org.apache.maven.plugins
- maven-javadoc-plugin
-
-
-
- org.apache.maven.plugins
- maven-gpg-plugin
-
-
-
- org.apache.maven.plugins
- maven-surefire-plugin
-
-
-
+
+
+
+
+ org.apache.maven.plugins
+ maven-jar-plugin
+
+
+
+ org.apache.maven.plugins
+ maven-source-plugin
+
+
+
+ org.apache.maven.plugins
+ maven-javadoc-plugin
+
+
+
+ org.apache.maven.plugins
+ maven-gpg-plugin
+
+
+
+ org.apache.maven.plugins
+ maven-surefire-plugin
+
+
+
\ No newline at end of file
diff --git a/api/src/main/java/io/bosonnetwork/vertx/BosonVerticle.java b/api/src/main/java/io/bosonnetwork/vertx/BosonVerticle.java
index 17f7641..86eefee 100644
--- a/api/src/main/java/io/bosonnetwork/vertx/BosonVerticle.java
+++ b/api/src/main/java/io/bosonnetwork/vertx/BosonVerticle.java
@@ -1,15 +1,37 @@
+/*
+ * Copyright (c) 2023 - bosonnetwork.io
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
package io.bosonnetwork.vertx;
import java.util.List;
import java.util.concurrent.Callable;
import io.vertx.core.Context;
+import io.vertx.core.Deployable;
import io.vertx.core.Future;
import io.vertx.core.Handler;
import io.vertx.core.Promise;
-import io.vertx.core.Verticle;
import io.vertx.core.Vertx;
-import io.vertx.core.impl.ContextInternal;
+import io.vertx.core.internal.ContextInternal;
import io.vertx.core.json.JsonObject;
/**
@@ -36,7 +58,7 @@
* {@link #undeploy()}. This ensures forward compatibility with Vert.x 5’s {@code VerticleBase}.
*
*/
-public abstract class BosonVerticle implements Verticle /*, Deployable */ {
+public abstract class BosonVerticle implements /* Verticle, */ Deployable {
/**
* Reference to the Vert.x instance that deployed this verticle
*/
@@ -105,7 +127,6 @@ public final List processArgs() {
* @param vertx the Vert.x instance
* @param context the context associated with this Verticle
*/
- @Override
public final void init(Vertx vertx, Context context) {
prepare(vertx, context);
}
@@ -135,7 +156,6 @@ public void prepare(Vertx vertx, Context context) {
* @param startPromise a promise that should be completed when startup is done
* @throws Exception if startup fails
*/
- @Override
public final void start(Promise startPromise) throws Exception {
deploy().onComplete(startPromise);
}
@@ -150,7 +170,6 @@ public final void start(Promise startPromise) throws Exception {
* @param stopPromise a promise that should be completed when shutdown is done
* @throws Exception if shutdown fails
*/
- @Override
public final void stop(Promise stopPromise) throws Exception {
undeploy().onComplete(stopPromise);
}
diff --git a/api/src/main/java/io/bosonnetwork/vertx/VersionedSchema.java b/api/src/main/java/io/bosonnetwork/vertx/VersionedSchema.java
new file mode 100644
index 0000000..0dbb649
--- /dev/null
+++ b/api/src/main/java/io/bosonnetwork/vertx/VersionedSchema.java
@@ -0,0 +1,584 @@
+/*
+ * Copyright (c) 2023 - bosonnetwork.io
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package io.bosonnetwork.vertx;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.ArrayList;
+import java.util.List;
+
+import io.vertx.core.Future;
+import io.vertx.core.Promise;
+import io.vertx.sqlclient.Row;
+import io.vertx.sqlclient.RowSet;
+import io.vertx.sqlclient.SqlClient;
+import io.vertx.sqlclient.Tuple;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Simple, file-based schema migration helper for Vert.x SQL clients.
+ *
+ * Reads migration SQL files from a directory, detects the database flavor,
+ * and applies pending migrations transactionally, recording versions in a schema_versions table.
+ *
+ */
+public class VersionedSchema implements VertxDatabase {
+ private final SqlClient client;
+ private final Path schemaPath;
+ private String databaseProductName;
+ private SchemaVersion currentVersion;
+
+ private static final Logger log = LoggerFactory.getLogger(VersionedSchema.class);
+
+ /**
+ * Immutable record of a migration application state.
+ *
+ * @param version schema version number
+ * @param description human-readable description
+ * @param appliedBy user/process that applied the migration
+ * @param appliedAt timestamp (ms) when started
+ * @param consumedTime duration (ms) spent applying
+ * @param success whether migration succeeded
+ */
+ public record SchemaVersion(int version, String description, String appliedBy, long appliedAt,
+ long consumedTime, boolean success) {}
+
+ private static class Migration {
+ private final int version;
+ private String description;
+ private final Path path;
+
+ /**
+ * Internal holder for a parsed migration file.
+ *
+ * @param version numeric version
+ * @param description textual description
+ * @param path file path to the SQL script
+ */
+ public Migration(int version, String description, Path path) {
+ this.version = version;
+ this.description = description;
+ this.path = path;
+ }
+
+ public void setDescription(String description) {
+ this.description = description;
+ }
+
+ public String fileName() {
+ return path.getFileName().toString();
+ }
+
+ public File file() {
+ return path.toFile();
+ }
+
+ /*/
+ public Path path() {
+ return path;
+ }
+ */
+ }
+
+ private VersionedSchema(SqlClient client, Path schemaPath) {
+ this.client = client;
+ this.schemaPath = schemaPath;
+ }
+
+ /**
+ * Initializes a new {@link VersionedSchema} instance.
+ *
+ * @param client Vert.x SQL client
+ * @param schemaPath directory containing migration SQL files
+ * @return a new versioned schema helper
+ */
+ public static VersionedSchema init(SqlClient client, Path schemaPath) {
+ return new VersionedSchema(client, schemaPath);
+ }
+
+ /**
+ * The underlying SQL client used for migrations.
+ *
+ * @return the client
+ */
+ @Override
+ public SqlClient getClient() {
+ return client;
+ }
+
+ /**
+ * Returns the last successfully applied schema version, if any.
+ *
+ * @return the current version or {@code null} if none recorded
+ */
+ public SchemaVersion getCurrentVersion() {
+ return currentVersion;
+ }
+
+ /**
+ * Discovers and applies pending migrations found under {@code schemaPath}.
+ *
+ * Ensures the schema_versions table exists.
+ * Reads the latest applied version.
+ * Parses and sorts new migration files.
+ * Applies each migration in order, transactionally.
+ *
+ *
+ * @return a future completed when all pending migrations are applied
+ */
+ public Future migrate() {
+ return getDatabaseProductName().compose(name -> {
+ databaseProductName = name;
+ log.debug("Migration check: target database product {}", name);
+ return query(createSchemaVersionTable()).execute();
+ }).compose(v ->
+ getSchemaVersion()
+ ).compose(v -> {
+ int version = 0;
+ if (v != null) {
+ this.currentVersion = v;
+ version = this.currentVersion.version();
+ }
+
+ try {
+ return Future.succeededFuture(getNewMigrations(version));
+ } catch (IOException | IllegalStateException e) {
+ return Future.failedFuture(new IllegalStateException("Migration check failed", e));
+ }
+ }).compose(migrations -> {
+ if (migrations.isEmpty())
+ return Future.succeededFuture();
+
+ Promise promise = Promise.promise();
+ Future chain = Future.succeededFuture();
+ for (Migration migration : migrations)
+ chain = chain.compose(na ->
+ applyMigration(migration).map(v -> {
+ this.currentVersion = v;
+ return null;
+ })
+ );
+
+ chain.onComplete(promise);
+ return promise.future();
+ });
+ }
+
+ /**
+ * Reads the latest successful schema version from the database.
+ *
+ * @return a future with the last applied {@link SchemaVersion} or {@code null}
+ */
+ private Future getSchemaVersion() {
+ return query(selectSchemaVersion())
+ .execute()
+ .map(VersionedSchema::mapToSchemaVersion);
+ }
+
+ private static SchemaVersion mapToSchemaVersion(RowSet rowSet) {
+ if (rowSet.size() == 0)
+ return null;
+
+ // first row only
+ Row row = rowSet.iterator().next();
+ int version = row.getInteger("version");
+ String description = row.getString("description");
+ String appliedBy = row.getString("applied_by");
+ long appliedAt = row.getLong("applied_at");
+ long consumedTime = row.getLong("consumed_time");
+ boolean success = row.getBoolean("success");
+
+ return new SchemaVersion(version, description, appliedBy, appliedAt, consumedTime, success);
+ }
+
+ /**
+ * Scans {@code schemaPath} and returns migrations with the version greater than {@code currentVersion}.
+ * File names must follow: {@code _.sql}.
+ *
+ * @param currentVersion the latest applied version
+ * @return sorted list of pending migrations
+ * @throws IOException when reading the directory fails
+ * @throws IllegalStateException on duplicate versions or malformed names
+ */
+ private List getNewMigrations(int currentVersion) throws IOException, IllegalStateException {
+ if (schemaPath == null) {
+ log.warn("Migration check: skipping, no schema migration path set");
+ return List.of();
+ }
+
+ log.info("Migration check: checking for new migrations from {} ...", schemaPath);
+
+ List migrations = new ArrayList<>();
+ Files.walkFileTree(schemaPath, new SimpleFileVisitor<>() {
+ @Override
+ @SuppressWarnings("NullableProblems")
+ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) {
+ String name = file.getFileName().toString();
+ if (!name.endsWith(".sql")) {
+ log.warn("Migration check: ignore non-SQL file {}", name);
+ return FileVisitResult.CONTINUE;
+ }
+
+ Migration migration;
+ try {
+ migration = parseFileName(file);
+ if (migration.version <= currentVersion)
+ return FileVisitResult.CONTINUE;
+ } catch (IllegalStateException e) {
+ log.warn("Migration check: ignore malformed file name {} - {}", name, e.getMessage());
+ return FileVisitResult.CONTINUE;
+ }
+
+ migrations.add(migration);
+ return FileVisitResult.CONTINUE;
+ }
+ });
+
+ if (migrations.isEmpty()) {
+ log.info("Migration check: no new migrations found");
+ return List.of();
+ }
+
+ migrations.sort((m1, m2) -> {
+ if (m1.version == m2.version) {
+ log.error("Migration check: Migration file version must be unique. File names: {} and {}",
+ m1.fileName(), m2.fileName());
+ throw new IllegalStateException("Migration file version must be unique");
+ }
+
+ // noinspection ComparatorMethodParameterNotUsed
+ return Integer.compare(m1.version, m2.version);
+ });
+
+ return migrations;
+ }
+
+ /**
+ * Parses a migration file name into a {@link Migration}.
+ * Expected format: {@code _.sql}
+ *
+ * @param file path to the migration file
+ * @return parsed {@link Migration}
+ * @throws IllegalStateException if the name does not match the expected pattern
+ */
+ private static Migration parseFileName(Path file) {
+ String fileName = file.getFileName().toString();
+ String[] parts = fileName.split("_", 2);
+ if (parts.length != 2)
+ throw new IllegalStateException("Migration file name must be in format _.sql");
+
+ int version;
+ try {
+ version = Integer.parseInt(parts[0]);
+ } catch (NumberFormatException e) {
+ throw new IllegalStateException("Migration file name must be in format _.sql");
+ }
+
+ int dotIndex = parts[1].lastIndexOf('.');
+ String baseName = (dotIndex == -1) ? parts[1] : parts[1].substring(0, dotIndex);
+ if (baseName.isEmpty())
+ throw new IllegalStateException("Migration file name must be in format _.sql");
+
+ String description = baseName.replace('_', ' ');
+ return new Migration(version, description, file);
+ }
+
+ /**
+ * Reads the first non-empty line comment as a long description, if present.
+ *
+ * @param reader buffered reader positioned at the start of the file
+ * @return the description without the comment marker, or {@code null} if not present
+ * @throws IOException if reading fails
+ */
+ private static String readDescriptionComment(BufferedReader reader) throws IOException {
+ String description = null;
+
+ reader.mark(4096);
+ String line;
+ while ((line = reader.readLine()) != null) {
+ String trimmed = line.trim();
+ if (trimmed.isEmpty())
+ continue;
+
+ if (trimmed.startsWith("--"))
+ description = trimmed.substring(2).trim();
+
+ break;
+ }
+ reader.reset();
+ return description;
+ }
+
+ /**
+ * Reads the next full SQL statement from a reader, correctly handling:
+ *
+ * PostgreSQL dollar-quoted blocks ($$ or $tag$)
+ * SQLite/PostgreSQL BEGIN...END; blocks (including nesting)
+ * Line and block comments
+ * Quoted strings and identifiers
+ *
+ *
+ * @param reader buffered reader over a SQL script
+ * @return the next statement including the trailing semicolon, or {@code null} if EOF
+ * @throws IOException if reading fails
+ */
+ private static String nextStatement(BufferedReader reader) throws IOException {
+ StringBuilder statement = new StringBuilder();
+ String line;
+ String currentDollarTag = null;
+ boolean inSingleQuote = false;
+ boolean inDoubleQuote = false;
+ boolean inBlockComment = false;
+ int beginEndDepth = 0;
+
+ while ((line = reader.readLine()) != null) {
+ String trimmed = line.trim();
+ if (trimmed.isEmpty())
+ continue;
+
+ if (trimmed.startsWith("--"))
+ continue;
+
+ int i = 0;
+ while (i < line.length()) {
+ char c = line.charAt(i);
+
+ // Handle entering/exiting block comments
+ if (!inSingleQuote && !inDoubleQuote && !inBlockComment && i + 1 < line.length()
+ && line.charAt(i) == '/' && line.charAt(i + 1) == '*') {
+ inBlockComment = true;
+ i += 2;
+ continue;
+ }
+ if (inBlockComment) {
+ if (i + 1 < line.length() && line.charAt(i) == '*' && line.charAt(i + 1) == '/') {
+ inBlockComment = false;
+ i += 2;
+ } else {
+ i++;
+ }
+ continue;
+ }
+
+ // Handle line comments
+ if (!inSingleQuote && !inDoubleQuote && !inBlockComment && i + 1 < line.length()
+ && line.charAt(i) == '-' && line.charAt(i + 1) == '-') {
+ // the rest of the line is a comment
+ break;
+ }
+
+ // Handle entering/leaving quotes
+ if (!inDoubleQuote && !inBlockComment && c == '\'' && currentDollarTag == null) {
+ inSingleQuote = !inSingleQuote;
+ i++;
+ continue;
+ }
+ if (!inSingleQuote && !inBlockComment && c == '"' && currentDollarTag == null) {
+ inDoubleQuote = !inDoubleQuote;
+ i++;
+ continue;
+ }
+
+ // Handle entering/leaving dollar-quoted blocks($$ or $tag$)
+ if (!inSingleQuote && !inDoubleQuote && !inBlockComment && c == '$') {
+ // Try to detect a tag like $tag$
+ int j = i + 1;
+ while (j < line.length() && Character.isLetterOrDigit(line.charAt(j))) j++;
+ if (j < line.length() && line.charAt(j) == '$') {
+ String tag = line.substring(i, j + 1);
+ if (currentDollarTag == null) {
+ currentDollarTag = tag; // entering
+ } else if (currentDollarTag.equals(tag)) {
+ currentDollarTag = null; // leaving
+ }
+ i = j + 1;
+ continue;
+ }
+ }
+
+ // Detect BEGIN/END keywords outside of quotes/comments
+ if (!inSingleQuote && !inDoubleQuote && !inBlockComment && currentDollarTag == null) {
+ // detect BEGIN
+ if (startsKeyword(line, i, "BEGIN")) {
+ beginEndDepth++;
+ } else if (startsKeyword(line, i, "END")) {
+ if (beginEndDepth > 0) {
+ beginEndDepth--;
+ }
+ }
+ }
+
+ // Detect statement terminator only when safe
+ if (!inSingleQuote && !inDoubleQuote && !inBlockComment && currentDollarTag == null && c == ';') {
+ if (beginEndDepth == 0) {
+ statement.append(line, 0, i + 1).append('\n');
+ return statement.toString().trim();
+ }
+ }
+
+ i++;
+ }
+
+ statement.append(line).append('\n');
+ }
+
+ if (statement.toString().trim().isEmpty())
+ return null;
+ else
+ return statement.toString();
+ }
+
+ private static boolean startsKeyword(String line, int pos, String keyword) {
+ int len = keyword.length();
+ if (pos + len > line.length())
+ return false;
+
+ String sub = line.substring(pos, pos + len);
+ if (!sub.equalsIgnoreCase(keyword))
+ return false;
+
+ // make sure not part of the longer word
+ boolean beforeOk = (pos == 0) || !Character.isLetterOrDigit(line.charAt(pos - 1));
+ boolean afterOk = (pos + len == line.length()) || !Character.isLetterOrDigit(line.charAt(pos + len));
+ return beforeOk && afterOk;
+ }
+
+ /**
+ * Applies a single migration inside a transaction and persists the new schema version.
+ *
+ * @param migration the migration to apply
+ * @return a future completing with the new {@link SchemaVersion} when done
+ */
+ private Future applyMigration(Migration migration) {
+ log.info("Migration: applying migration version {} from {}...", migration.version, migration.fileName());
+
+ long begin = System.currentTimeMillis();
+ return withTransaction(connection -> {
+ Promise promise = Promise.promise();
+ Future chain = Future.succeededFuture();
+ try (BufferedReader reader = new BufferedReader(new FileReader(migration.file()))) {
+ String longDescription = readDescriptionComment(reader);
+ if (longDescription != null)
+ migration.setDescription(longDescription);
+
+ String statement;
+ while ((statement = nextStatement(reader)) != null) {
+ final String sql = statement;
+ log.trace("Migration: executing statement {}", sql);
+ chain = chain.compose(v -> connection.query(sql).execute().mapEmpty());
+ }
+ } catch (IOException e) {
+ return Future.failedFuture(new IllegalStateException("Failed to read migration file", e));
+ }
+
+ chain.compose(v -> {
+ long duration = System.currentTimeMillis() - begin;
+ log.info("Migration: applied migration file {} in {} ms", migration.fileName(), duration);
+ log.debug("Migration: updating schema version...");
+ SchemaVersion newVersion = new SchemaVersion(migration.version, migration.description,
+ "", begin, duration, true);
+ return connection.preparedQuery(insertSchemaVersion()).execute(
+ Tuple.of(newVersion.version,
+ newVersion.description,
+ newVersion.appliedBy,
+ newVersion.appliedAt,
+ newVersion.consumedTime,
+ newVersion.success)
+ ).andThen(ar -> {
+ if (ar.succeeded())
+ log.debug("Migration: schema version updated to version {}", migration.version);
+ else
+ log.error("Migration: failed to update schema version", ar.cause());
+ }).map(newVersion);
+ }).onComplete(promise);
+
+ return promise.future();
+ });
+ }
+
+ /**
+ * Creates schema_versions table if it does not exist.
+ *
+ * @return DDL for creating the schema_versions table if it does not exist
+ */
+ protected String createSchemaVersionTable() {
+ return createSchemaVersionTable;
+ }
+
+ /**
+ * Selects the latest successful schema version.
+ *
+ * @return SQL for selecting the latest successful schema version
+ */
+ protected String selectSchemaVersion() {
+ return selectSchemaVersion;
+ }
+
+ /**
+ * Inserts a new schema version record.
+ * Chooses the correct INSERT statement placeholder style for the detected database.
+ *
+ * @return parameterized INSERT SQL suitable for the target database
+ */
+ protected String insertSchemaVersion() {
+ if (databaseProductName.toLowerCase().contains("postgres"))
+ return insertSchemaVersionWithIndexedParameters;
+ else
+ return insertSchemaVersionWithQuestionMarks;
+ }
+
+ private static final String createSchemaVersionTable = """
+ CREATE TABLE IF NOT EXISTS schema_versions(
+ version INTEGER PRIMARY KEY,
+ description VARCHAR(512) UNIQUE DEFAULT NULL,
+ applied_by VARCHAR(128),
+ applied_at BIGINT NOT NULL,
+ consumed_time BIGINT DEFAULT 0,
+ success BOOLEAN NOT NULL)
+ """;
+
+ private static final String selectSchemaVersion = """
+ SELECT * FROM schema_versions
+ WHERE success = TRUE
+ ORDER BY version DESC
+ LIMIT 1
+ """;
+
+ private static final String insertSchemaVersionWithQuestionMarks = """
+ INSERT INTO schema_versions(version, description, applied_by, applied_at, consumed_time, success)
+ VALUES(?, ?, ?, ?, ?, ?)
+ """;
+
+ private static final String insertSchemaVersionWithIndexedParameters = """
+ INSERT INTO schema_versions(version, description, applied_by, applied_at, consumed_time, success)
+ VALUES($1, $2, $3, $4, $5, $6)
+ """;
+}
\ No newline at end of file
diff --git a/api/src/main/java/io/bosonnetwork/vertx/VertxCaffeine.java b/api/src/main/java/io/bosonnetwork/vertx/VertxCaffeine.java
index 497d98f..cd96dd7 100644
--- a/api/src/main/java/io/bosonnetwork/vertx/VertxCaffeine.java
+++ b/api/src/main/java/io/bosonnetwork/vertx/VertxCaffeine.java
@@ -22,12 +22,11 @@
package io.bosonnetwork.vertx;
-import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Executor;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.Scheduler;
-
+import io.vertx.core.Promise;
import io.vertx.core.Vertx;
/**
@@ -63,12 +62,12 @@ public static Caffeine newBuilder(Vertx vertx) {
* Custom Caffeine Scheduler that schedules tasks using Vert.x timers.
*
* The scheduled task is executed on the provided executor after the specified delay.
- * Completion is signaled via a {@link CompletableFuture}, which is completed when the task finishes
+ * Completion is signaled via a {@link VertxFuture}, which is completed when the task finishes
* or completed exceptionally if an error occurs.
*
*/
Scheduler vertxScheduler = (executor, runnable, delay, unit) -> {
- CompletableFuture> future = new CompletableFuture<>();
+ Promise> promise = Promise.promise();
vertx.setTimer(unit.toMillis(delay), (tid) -> {
// When the timer fires, execute the scheduled task on the provided executor.
@@ -76,14 +75,14 @@ public static Caffeine newBuilder(Vertx vertx) {
executor.execute(() -> {
try {
runnable.run();
- future.complete(null);
+ promise.complete(null);
} catch (Exception e) {
- future.completeExceptionally(e);
+ promise.fail(e);
}
});
});
- return future;
+ return VertxFuture.of(promise.future());
};
return Caffeine.newBuilder()
diff --git a/api/src/main/java/io/bosonnetwork/vertx/VertxDatabase.java b/api/src/main/java/io/bosonnetwork/vertx/VertxDatabase.java
new file mode 100644
index 0000000..12584cd
--- /dev/null
+++ b/api/src/main/java/io/bosonnetwork/vertx/VertxDatabase.java
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2023 - bosonnetwork.io
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package io.bosonnetwork.vertx;
+
+import java.util.List;
+import java.util.function.Function;
+
+import io.vertx.core.Future;
+import io.vertx.sqlclient.Pool;
+import io.vertx.sqlclient.PreparedQuery;
+import io.vertx.sqlclient.Query;
+import io.vertx.sqlclient.Row;
+import io.vertx.sqlclient.RowSet;
+import io.vertx.sqlclient.SqlClient;
+import io.vertx.sqlclient.SqlConnection;
+import io.vertx.sqlclient.TransactionRollbackException;
+
+/**
+ * Abstraction over a Vert.x {@link SqlClient} providing convenience helpers for
+ * querying, prepared querying, connection/transaction handling, and small mapping utilities.
+ *
+ * Implementations must supply the underlying {@link #getClient()} which can be either a {@link SqlConnection}
+ * or a {@link Pool}. All helpers delegate to this client in a safe, Vert.x-friendly way.
+ *
+ */
+public interface VertxDatabase {
+ /**
+ * Returns the underlying Vert.x SQL client, either a {@link SqlConnection} or a {@link Pool}.
+ *
+ * @return the backing SQL client
+ */
+ SqlClient getClient();
+
+ /**
+ * Attempts to retrieve the database product name from the current connection.
+ * If the driver does not support reading metadata, returns {@code "Unknown"}.
+ *
+ * @return a future completing with the database product name
+ */
+ default Future getDatabaseProductName() {
+ return withConnection(c -> {
+ String name;
+ try {
+ name = c.databaseMetadata().productName();
+ } catch (UnsupportedOperationException e) {
+ name = "Unknown";
+ }
+
+ return Future.succeededFuture(name);
+ });
+ }
+
+ /**
+ * Creates a simple text query using the underlying client.
+ *
+ * @param sql SQL text to execute
+ * @return a Vert.x {@link Query} for the provided SQL
+ */
+ default Query> query(String sql) {
+ return getClient().query(sql);
+ }
+
+ /**
+ * Creates a prepared query using the underlying client.
+ *
+ * @param sql SQL text with placeholders
+ * @return a Vert.x {@link PreparedQuery} for the provided SQL
+ */
+ default PreparedQuery> preparedQuery(String sql) {
+ return getClient().preparedQuery(sql);
+ }
+
+ /**
+ * Executes the provided function within a database transaction.
+ *
+ * If the underlying client is a {@link Pool}, a connection is obtained and a transaction is started.
+ * If it is a {@link SqlConnection}, the transaction is started on that connection directly.
+ * The transaction is committed if the returned future succeeds, otherwise it is rolled back.
+ *
+ *
+ * @param function a function receiving a {@link SqlConnection} and returning a future result
+ * @param result type
+ * @return a future completing with the function result after commit, or failing after rollback
+ */
+ default Future withTransaction(Function> function) {
+ if (getClient() instanceof SqlConnection c) {
+ return withTransaction(c, function);
+ } else if (getClient() instanceof Pool p) {
+ return p.withTransaction(function);
+ } else {
+ return Future.failedFuture(new IllegalStateException("Client must be an instance of SqlConnection or Pool"));
+ }
+ }
+
+ private Future withTransaction(SqlConnection connection, Function> function) {
+ return connection.begin().compose(tx ->
+ function.apply(connection).compose(
+ res -> tx.commit().compose(v -> Future.succeededFuture(res)),
+ err -> {
+ if (err instanceof TransactionRollbackException) {
+ return Future.failedFuture(err);
+ } else {
+ return tx.rollback().compose(
+ v -> Future.failedFuture(err),
+ failure -> Future.failedFuture(err));
+ }
+ }));
+ }
+
+ /**
+ * Executes the provided function with a {@link SqlConnection}.
+ *
+ * If the underlying client is a {@link Pool}, a connection is acquired and automatically closed
+ * when the returned future completes.
+ *
+ *
+ * @param function work to perform on a connection
+ * @param result type
+ * @return a future completing with the function result
+ */
+ default Future withConnection(Function> function) {
+ if (getClient() instanceof SqlConnection c) {
+ return function.apply(c);
+ } else if (getClient() instanceof Pool p) {
+ return p.getConnection().compose(c ->
+ function.apply(c).onComplete(ar -> c.close())
+ );
+ } else {
+ return Future.failedFuture(new IllegalStateException("Client must be an instance of SqlConnection or Pool"));
+ }
+ }
+
+ /**
+ * Extracts the first boolean value from the first row or returns a default when empty.
+ *
+ * @param rowSet result set
+ * @param defaultValue value to return when the set is empty
+ * @return the found boolean or the default
+ */
+ static boolean findBoolean(RowSet rowSet, boolean defaultValue) {
+ return rowSet.size() != 0 ? rowSet.iterator().next().getBoolean(0) : defaultValue;
+ }
+
+ /**
+ * Extracts the first boolean value from the first row, or {@code false} when empty.
+ *
+ * @param rowSet result set
+ * @return the found boolean or {@code false}
+ */
+ static boolean findBoolean(RowSet rowSet) {
+ return findBoolean(rowSet, false);
+ }
+
+ /**
+ * Extracts the first integer value from the first row or returns a default when empty.
+ *
+ * @param rowSet result set
+ * @param defaultValue value to return when the set is empty
+ * @return the found integer or the default
+ */
+ static int findInteger(RowSet rowSet, int defaultValue) {
+ return rowSet.size() != 0 ? rowSet.iterator().next().getInteger(0) : defaultValue;
+ }
+
+ /**
+ * Extracts the first integer value from the first row, or {@code 0} when empty.
+ *
+ * @param rowSet result set
+ * @return the found integer or {@code 0}
+ */
+ static int findInteger(RowSet rowSet) {
+ return findInteger(rowSet, 0);
+ }
+
+ /**
+ * Extracts the first long value from the first row or returns a default when empty.
+ *
+ * @param rowSet result set
+ * @param defaultValue value to return when the set is empty
+ * @return the found long or the default
+ */
+ static long findLong(RowSet rowSet, long defaultValue) {
+ return rowSet.size() != 0 ? rowSet.iterator().next().getLong(0) : defaultValue;
+ }
+
+ /**
+ * Extracts the first long value from the first row, or {@code 0L} when empty.
+ *
+ * @param rowSet result set
+ * @return the found long or {@code 0L}
+ */
+ static long findLong(RowSet rowSet) {
+ return findLong(rowSet, 0);
+ }
+
+ /**
+ * Maps the first row to a value using the provided mapper, or returns the given default when empty.
+ *
+ * @param rowSet result set
+ * @param mapper row mapper
+ * @param defaultValue value to return when the set is empty
+ * @param mapped type
+ * @return mapped value or the default
+ */
+ static T findUniqueOrDefault(RowSet rowSet, Function mapper, T defaultValue) {
+ return rowSet.size() != 0 ? mapper.apply(rowSet.iterator().next()) : defaultValue;
+ }
+
+ /**
+ * Maps the first row to a value using the provided mapper, or returns {@code null} when empty.
+ *
+ * @param rowSet result set
+ * @param mapper row mapper
+ * @param mapped type
+ * @return mapped value or {@code null}
+ */
+ static T findUnique(RowSet rowSet, Function mapper) {
+ return findUniqueOrDefault(rowSet, mapper, null);
+ }
+
+ /**
+ * Maps all rows in the given {@link RowSet} to a list using the provided mapper.
+ *
+ * @param rowSet result set
+ * @param mapper row mapper
+ * @param element type
+ * @return list of mapped values (possibly empty)
+ */
+ static List findMany(RowSet rowSet, Function mapper) {
+ return rowSet.stream().map(mapper).toList();
+ }
+}
\ No newline at end of file
diff --git a/api/src/test/java/io/bosonnetwork/vertx/VersionedSchemaTests.java b/api/src/test/java/io/bosonnetwork/vertx/VersionedSchemaTests.java
new file mode 100644
index 0000000..ffd7e6c
--- /dev/null
+++ b/api/src/test/java/io/bosonnetwork/vertx/VersionedSchemaTests.java
@@ -0,0 +1,90 @@
+package io.bosonnetwork.vertx;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Stream;
+
+import io.vertx.core.Vertx;
+import io.vertx.jdbcclient.JDBCConnectOptions;
+import io.vertx.jdbcclient.JDBCPool;
+import io.vertx.pgclient.PgBuilder;
+import io.vertx.pgclient.PgConnectOptions;
+import io.vertx.sqlclient.PoolOptions;
+import io.vertx.sqlclient.SqlClient;
+
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
+import io.vertx.junit5.Timeout;
+import io.vertx.junit5.VertxExtension;
+import io.vertx.junit5.VertxTestContext;
+
+import io.bosonnetwork.utils.FileUtils;
+
+@ExtendWith(VertxExtension.class)
+public class VersionedSchemaTests {
+ private static final Path testRoot = Path.of(System.getProperty("java.io.tmpdir"), "boson");
+ private static final Path testDir = Path.of(testRoot.toString(), "utils", "VersionedSchemaTests");
+
+ private static final List databases = new ArrayList<>();
+
+ @BeforeAll
+ static void setup(Vertx vertx, VertxTestContext context) throws Exception {
+ Files.createDirectories(testDir);
+
+ var sqliteURL = "jdbc:sqlite:" + testDir.resolve("test.db");
+ JDBCConnectOptions sqliteConnectOptions = new JDBCConnectOptions()
+ .setJdbcUrl(sqliteURL);
+ // Single connection recommended for SQLite
+ PoolOptions sqlitePoolOptions = new PoolOptions().setMaxSize(1);
+ SqlClient sqliteClient = JDBCPool.pool(vertx, sqliteConnectOptions, sqlitePoolOptions);
+ databases.add(Arguments.of("sqlite", sqliteClient));
+
+ var postgresURL = "postgresql://jingyu:secret@localhost:5432/test";
+ PgConnectOptions pgConnectOptions = PgConnectOptions.fromUri(postgresURL);
+ PoolOptions pgPoolOptions = new PoolOptions().setMaxSize(8);
+ SqlClient pgClient = PgBuilder.pool()
+ .with(pgPoolOptions)
+ .connectingTo(pgConnectOptions)
+ .using(vertx)
+ .build();
+ // databases.add(Arguments.of("postgres", pgClient));
+
+ context.completeNow();
+ }
+
+ @AfterAll
+ static void teardown() throws Exception {
+ FileUtils.deleteFile(testRoot);
+ }
+
+ static Stream testDatabaseProvider() {
+ return databases.stream();
+ }
+
+ @ParameterizedTest(name = "{0}")
+ @MethodSource("testDatabaseProvider")
+ @Timeout(value = 2, timeUnit = TimeUnit.MINUTES)
+ void testMigrate(String name, SqlClient client, VertxTestContext context) {
+ Path schemaPath = Path.of(getClass().getClassLoader().getResource("db/" + name).getPath());
+
+ VersionedSchema schema = VersionedSchema.init(client, schemaPath);
+ schema.migrate().onComplete(context.succeeding(v -> {
+ context.verify(() -> {
+ var sv = schema.getCurrentVersion();
+ assertEquals(10, sv.version());
+ assertEquals("Trigger: log message insertions into audit_log", sv.description());
+ });
+
+ context.completeNow();
+ }));
+ }
+}
\ No newline at end of file
diff --git a/api/src/test/resources/db/postgres/10_add_trigger.sql b/api/src/test/resources/db/postgres/10_add_trigger.sql
new file mode 100644
index 0000000..e9684ef
--- /dev/null
+++ b/api/src/test/resources/db/postgres/10_add_trigger.sql
@@ -0,0 +1,23 @@
+-- Trigger: log message insertions into audit_log
+CREATE OR REPLACE FUNCTION log_message_insert()
+RETURNS TRIGGER AS $$
+BEGIN
+ INSERT INTO audit_log(event_type, event_data)
+ VALUES ('MESSAGE_CREATED', NEW.content);
+ RETURN NEW;
+END;
+$$ LANGUAGE plpgsql;
+
+-- Create trigger only if it does not already exist
+DO $$
+BEGIN
+ IF NOT EXISTS (
+ SELECT 1 FROM pg_trigger WHERE tgname = 'trg_log_message_insert'
+ ) THEN
+ CREATE TRIGGER trg_log_message_insert
+ AFTER INSERT ON messages
+ FOR EACH ROW
+ EXECUTE FUNCTION log_message_insert();
+ END IF;
+END;
+$$;
\ No newline at end of file
diff --git a/api/src/test/resources/db/postgres/1_init_schema.sql b/api/src/test/resources/db/postgres/1_init_schema.sql
new file mode 100644
index 0000000..ac00235
--- /dev/null
+++ b/api/src/test/resources/db/postgres/1_init_schema.sql
@@ -0,0 +1,15 @@
+-- Initial schema creation
+CREATE TABLE IF NOT EXISTS users (
+ id SERIAL PRIMARY KEY,
+ name TEXT NOT NULL,
+ email TEXT UNIQUE,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+
+CREATE TABLE IF NOT EXISTS messages (
+ id SERIAL PRIMARY KEY,
+ user_id INTEGER NOT NULL,
+ content TEXT NOT NULL,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ FOREIGN KEY (user_id) REFERENCES users (id)
+);
\ No newline at end of file
diff --git a/api/src/test/resources/db/postgres/2_add_index.sql b/api/src/test/resources/db/postgres/2_add_index.sql
new file mode 100644
index 0000000..da5c2e7
--- /dev/null
+++ b/api/src/test/resources/db/postgres/2_add_index.sql
@@ -0,0 +1,3 @@
+-- Add index for faster lookup
+CREATE INDEX IF NOT EXISTS idx_messages_user_id ON messages(user_id);
+CREATE INDEX IF NOT EXISTS idx_users_email ON users(email);
\ No newline at end of file
diff --git a/api/src/test/resources/db/postgres/3_insert_sample_data.sql b/api/src/test/resources/db/postgres/3_insert_sample_data.sql
new file mode 100644
index 0000000..458fc82
--- /dev/null
+++ b/api/src/test/resources/db/postgres/3_insert_sample_data.sql
@@ -0,0 +1,12 @@
+-- Insert some test data
+INSERT INTO users (name, email)
+SELECT 'Alice', 'alice@example.com'
+WHERE NOT EXISTS (SELECT 1 FROM users WHERE email = 'alice@example.com');
+
+INSERT INTO users (name, email)
+SELECT 'Bob', 'bob@example.com'
+WHERE NOT EXISTS (SELECT 1 FROM users WHERE email = 'bob@example.com');
+
+INSERT INTO messages (user_id, content)
+SELECT u.id, 'Hello from ' || u.name FROM users u
+WHERE NOT EXISTS (SELECT 1 FROM messages WHERE content LIKE 'Hello from%');
\ No newline at end of file
diff --git a/api/src/test/resources/db/postgres/4_add_profile_table.sql b/api/src/test/resources/db/postgres/4_add_profile_table.sql
new file mode 100644
index 0000000..4241ce0
--- /dev/null
+++ b/api/src/test/resources/db/postgres/4_add_profile_table.sql
@@ -0,0 +1,6 @@
+CREATE TABLE IF NOT EXISTS user_profiles (
+ user_id INTEGER PRIMARY KEY,
+ bio TEXT,
+ avatar_url TEXT,
+ FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
+);
\ No newline at end of file
diff --git a/api/src/test/resources/db/postgres/5_add_last_login_column.sql b/api/src/test/resources/db/postgres/5_add_last_login_column.sql
new file mode 100644
index 0000000..47b7884
--- /dev/null
+++ b/api/src/test/resources/db/postgres/5_add_last_login_column.sql
@@ -0,0 +1,2 @@
+-- Add new column safely if it doesn’t exist
+ALTER TABLE users ADD COLUMN last_login TIMESTAMP DEFAULT NULL;
\ No newline at end of file
diff --git a/api/src/test/resources/db/postgres/6_normalize_email_case.sql b/api/src/test/resources/db/postgres/6_normalize_email_case.sql
new file mode 100644
index 0000000..87d92b5
--- /dev/null
+++ b/api/src/test/resources/db/postgres/6_normalize_email_case.sql
@@ -0,0 +1,4 @@
+-- Normalize user emails to lowercase
+UPDATE users
+SET email = LOWER(email)
+WHERE email != LOWER(email);
\ No newline at end of file
diff --git a/api/src/test/resources/db/postgres/7_add_message_likes_table.sql b/api/src/test/resources/db/postgres/7_add_message_likes_table.sql
new file mode 100644
index 0000000..4ece356
--- /dev/null
+++ b/api/src/test/resources/db/postgres/7_add_message_likes_table.sql
@@ -0,0 +1,8 @@
+CREATE TABLE IF NOT EXISTS message_likes (
+ message_id INTEGER NOT NULL,
+ user_id INTEGER NOT NULL,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ PRIMARY KEY (message_id, user_id),
+ FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE,
+ FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
+);
\ No newline at end of file
diff --git a/api/src/test/resources/db/postgres/8_add_view_user_messages_view.sql b/api/src/test/resources/db/postgres/8_add_view_user_messages_view.sql
new file mode 100644
index 0000000..1a80f67
--- /dev/null
+++ b/api/src/test/resources/db/postgres/8_add_view_user_messages_view.sql
@@ -0,0 +1,12 @@
+-- Create a view combining users and messages
+DROP VIEW IF EXISTS view_user_messages;
+
+CREATE VIEW view_user_messages AS
+SELECT
+ u.id AS user_id,
+ u.name AS username,
+ m.id AS message_id,
+ m.content,
+ m.created_at
+FROM users u
+JOIN messages m ON u.id = m.user_id;
\ No newline at end of file
diff --git a/api/src/test/resources/db/postgres/9_add_audit_log_table.sql b/api/src/test/resources/db/postgres/9_add_audit_log_table.sql
new file mode 100644
index 0000000..6d4cb12
--- /dev/null
+++ b/api/src/test/resources/db/postgres/9_add_audit_log_table.sql
@@ -0,0 +1,6 @@
+CREATE TABLE IF NOT EXISTS audit_log (
+ id SERIAL PRIMARY KEY,
+ event_type TEXT NOT NULL,
+ event_data TEXT,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
\ No newline at end of file
diff --git a/api/src/test/resources/db/sqlite/10_add_trigger.sql b/api/src/test/resources/db/sqlite/10_add_trigger.sql
new file mode 100644
index 0000000..1e4553c
--- /dev/null
+++ b/api/src/test/resources/db/sqlite/10_add_trigger.sql
@@ -0,0 +1,7 @@
+-- Trigger: log message insertions into audit_log
+CREATE TRIGGER IF NOT EXISTS trg_log_message_insert
+AFTER INSERT ON messages
+BEGIN
+ INSERT INTO audit_log(event_type, event_data)
+ VALUES ('MESSAGE_CREATED', NEW.content);
+END;
\ No newline at end of file
diff --git a/api/src/test/resources/db/sqlite/1_init_schema.sql b/api/src/test/resources/db/sqlite/1_init_schema.sql
new file mode 100644
index 0000000..60c336b
--- /dev/null
+++ b/api/src/test/resources/db/sqlite/1_init_schema.sql
@@ -0,0 +1,15 @@
+-- Initial schema creation
+CREATE TABLE IF NOT EXISTS users (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ name TEXT NOT NULL,
+ email TEXT UNIQUE,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+
+CREATE TABLE IF NOT EXISTS messages (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ user_id INTEGER NOT NULL,
+ content TEXT NOT NULL,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ FOREIGN KEY (user_id) REFERENCES users (id)
+);
\ No newline at end of file
diff --git a/api/src/test/resources/db/sqlite/2_add_index.sql b/api/src/test/resources/db/sqlite/2_add_index.sql
new file mode 100644
index 0000000..da5c2e7
--- /dev/null
+++ b/api/src/test/resources/db/sqlite/2_add_index.sql
@@ -0,0 +1,3 @@
+-- Add index for faster lookup
+CREATE INDEX IF NOT EXISTS idx_messages_user_id ON messages(user_id);
+CREATE INDEX IF NOT EXISTS idx_users_email ON users(email);
\ No newline at end of file
diff --git a/api/src/test/resources/db/sqlite/3_insert_sample_data.sql b/api/src/test/resources/db/sqlite/3_insert_sample_data.sql
new file mode 100644
index 0000000..458fc82
--- /dev/null
+++ b/api/src/test/resources/db/sqlite/3_insert_sample_data.sql
@@ -0,0 +1,12 @@
+-- Insert some test data
+INSERT INTO users (name, email)
+SELECT 'Alice', 'alice@example.com'
+WHERE NOT EXISTS (SELECT 1 FROM users WHERE email = 'alice@example.com');
+
+INSERT INTO users (name, email)
+SELECT 'Bob', 'bob@example.com'
+WHERE NOT EXISTS (SELECT 1 FROM users WHERE email = 'bob@example.com');
+
+INSERT INTO messages (user_id, content)
+SELECT u.id, 'Hello from ' || u.name FROM users u
+WHERE NOT EXISTS (SELECT 1 FROM messages WHERE content LIKE 'Hello from%');
\ No newline at end of file
diff --git a/api/src/test/resources/db/sqlite/4_add_profile_table.sql b/api/src/test/resources/db/sqlite/4_add_profile_table.sql
new file mode 100644
index 0000000..4241ce0
--- /dev/null
+++ b/api/src/test/resources/db/sqlite/4_add_profile_table.sql
@@ -0,0 +1,6 @@
+CREATE TABLE IF NOT EXISTS user_profiles (
+ user_id INTEGER PRIMARY KEY,
+ bio TEXT,
+ avatar_url TEXT,
+ FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
+);
\ No newline at end of file
diff --git a/api/src/test/resources/db/sqlite/5_add_last_login_column.sql b/api/src/test/resources/db/sqlite/5_add_last_login_column.sql
new file mode 100644
index 0000000..47b7884
--- /dev/null
+++ b/api/src/test/resources/db/sqlite/5_add_last_login_column.sql
@@ -0,0 +1,2 @@
+-- Add new column safely if it doesn’t exist
+ALTER TABLE users ADD COLUMN last_login TIMESTAMP DEFAULT NULL;
\ No newline at end of file
diff --git a/api/src/test/resources/db/sqlite/6_normalize_email_case.sql b/api/src/test/resources/db/sqlite/6_normalize_email_case.sql
new file mode 100644
index 0000000..87d92b5
--- /dev/null
+++ b/api/src/test/resources/db/sqlite/6_normalize_email_case.sql
@@ -0,0 +1,4 @@
+-- Normalize user emails to lowercase
+UPDATE users
+SET email = LOWER(email)
+WHERE email != LOWER(email);
\ No newline at end of file
diff --git a/api/src/test/resources/db/sqlite/7_add_message_likes_table.sql b/api/src/test/resources/db/sqlite/7_add_message_likes_table.sql
new file mode 100644
index 0000000..4ece356
--- /dev/null
+++ b/api/src/test/resources/db/sqlite/7_add_message_likes_table.sql
@@ -0,0 +1,8 @@
+CREATE TABLE IF NOT EXISTS message_likes (
+ message_id INTEGER NOT NULL,
+ user_id INTEGER NOT NULL,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ PRIMARY KEY (message_id, user_id),
+ FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE,
+ FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
+);
\ No newline at end of file
diff --git a/api/src/test/resources/db/sqlite/8_add_view_user_messages_view.sql b/api/src/test/resources/db/sqlite/8_add_view_user_messages_view.sql
new file mode 100644
index 0000000..39b3d4c
--- /dev/null
+++ b/api/src/test/resources/db/sqlite/8_add_view_user_messages_view.sql
@@ -0,0 +1,10 @@
+-- Create a view combining users and messages
+CREATE VIEW IF NOT EXISTS view_user_messages AS
+SELECT
+ u.id AS user_id,
+ u.name AS username,
+ m.id AS message_id,
+ m.content,
+ m.created_at
+FROM users u
+JOIN messages m ON u.id = m.user_id;
\ No newline at end of file
diff --git a/api/src/test/resources/db/sqlite/9_add_audit_log_table.sql b/api/src/test/resources/db/sqlite/9_add_audit_log_table.sql
new file mode 100644
index 0000000..dadc3e9
--- /dev/null
+++ b/api/src/test/resources/db/sqlite/9_add_audit_log_table.sql
@@ -0,0 +1,6 @@
+CREATE TABLE IF NOT EXISTS audit_log (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ event_type TEXT NOT NULL,
+ event_data TEXT,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
\ No newline at end of file
From 04cd1cad4dbfc95afe0a2d17cc02cb4daf2ca590 Mon Sep 17 00:00:00 2001
From: Jingyu
Date: Thu, 27 Nov 2025 10:21:11 +0800
Subject: [PATCH 4/4] Add types for the super node operation
---
.../java/io/bosonnetwork/NodeBlacklist.java | 44 ++
.../java/io/bosonnetwork/database/Filter.java | 448 ++++++++++++++++++
.../io/bosonnetwork/database/Ordering.java | 200 ++++++++
.../io/bosonnetwork/database/Pagination.java | 162 +++++++
.../{vertx => database}/VersionedSchema.java | 51 +-
.../{vertx => database}/VertxDatabase.java | 55 ++-
.../service/ClientAuthenticator.java | 11 +
.../io/bosonnetwork/service/ClientDevice.java | 21 +
.../io/bosonnetwork/service/ClientUser.java | 27 ++
.../java/io/bosonnetwork/service/Clients.java | 22 +
.../bosonnetwork/service/FederatedNode.java | 35 ++
.../service/FederatedService.java | 27 ++
.../io/bosonnetwork/service/Federation.java | 27 ++
.../service/FederationAuthenticator.java | 11 +
.../main/java/io/bosonnetwork/utils/Json.java | 19 +-
.../io/bosonnetwork/database/FilterTests.java | 165 +++++++
.../VersionedSchemaTests.java | 2 +-
.../kademlia/storage/DataStorageTests.java | 6 +-
18 files changed, 1290 insertions(+), 43 deletions(-)
create mode 100644 api/src/main/java/io/bosonnetwork/NodeBlacklist.java
create mode 100644 api/src/main/java/io/bosonnetwork/database/Filter.java
create mode 100644 api/src/main/java/io/bosonnetwork/database/Ordering.java
create mode 100644 api/src/main/java/io/bosonnetwork/database/Pagination.java
rename api/src/main/java/io/bosonnetwork/{vertx => database}/VersionedSchema.java (93%)
rename api/src/main/java/io/bosonnetwork/{vertx => database}/VertxDatabase.java (83%)
create mode 100644 api/src/main/java/io/bosonnetwork/service/ClientAuthenticator.java
create mode 100644 api/src/main/java/io/bosonnetwork/service/ClientDevice.java
create mode 100644 api/src/main/java/io/bosonnetwork/service/ClientUser.java
create mode 100644 api/src/main/java/io/bosonnetwork/service/Clients.java
create mode 100644 api/src/main/java/io/bosonnetwork/service/FederatedNode.java
create mode 100644 api/src/main/java/io/bosonnetwork/service/FederatedService.java
create mode 100644 api/src/main/java/io/bosonnetwork/service/Federation.java
create mode 100644 api/src/main/java/io/bosonnetwork/service/FederationAuthenticator.java
create mode 100644 api/src/test/java/io/bosonnetwork/database/FilterTests.java
rename api/src/test/java/io/bosonnetwork/{vertx => database}/VersionedSchemaTests.java (98%)
diff --git a/api/src/main/java/io/bosonnetwork/NodeBlacklist.java b/api/src/main/java/io/bosonnetwork/NodeBlacklist.java
new file mode 100644
index 0000000..de838d5
--- /dev/null
+++ b/api/src/main/java/io/bosonnetwork/NodeBlacklist.java
@@ -0,0 +1,44 @@
+package io.bosonnetwork;
+
+public interface NodeBlacklist {
+ /**
+ * Checks if the specified host is banned.
+ *
+ * @param host The IP host or hostname to check.
+ * @return true if the host is banned, false otherwise.
+ */
+ boolean isBanned(String host);
+
+ /**
+ * Checks if the specified ID is banned.
+ *
+ * @param id The ID to check.
+ * @return true if the ID is banned, false otherwise.
+ */
+ boolean isBanned(Id id);
+
+ /**
+ * Checks if the specified host or ID is banned.
+ *
+ * @param id The ID to check.
+ * @param host The IP host or hostname to check.
+ * @return true if the host or ID is banned, false otherwise.
+ */
+ default boolean isBanned(Id id, String host) {
+ return isBanned(id) || isBanned(host);
+ }
+
+ /**
+ * Adds a host to the blacklist.
+ *
+ * @param host The IP host or hostname to ban.
+ */
+ void ban(String host);
+
+ /**
+ * Adds an ID to the blacklist.
+ *
+ * @param id The ID to ban.
+ */
+ void ban(Id id);
+}
\ No newline at end of file
diff --git a/api/src/main/java/io/bosonnetwork/database/Filter.java b/api/src/main/java/io/bosonnetwork/database/Filter.java
new file mode 100644
index 0000000..4c00ca6
--- /dev/null
+++ b/api/src/main/java/io/bosonnetwork/database/Filter.java
@@ -0,0 +1,448 @@
+package io.bosonnetwork.database;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Objects;
+import java.util.stream.Collectors;
+
+/**
+ * SQL WHERE clause builder with safe parameter binding.
+ * Supports AND / OR composition and multiple operators.
+ */
+public class Filter {
+ /** A filter that represents no condition (always true). */
+ public static final Filter NONE = new Filter();
+
+ private Filter() {}
+
+ /**
+ * Creates a filter from a raw SQL string.
+ *
+ * WARNING: This method does not perform parameter binding or validation.
+ * Use with caution to avoid SQL injection vulnerabilities.
+ *
+ *
+ * @param sql the raw SQL string
+ * @return a Filter containing the raw SQL
+ */
+ public static Filter raw(String sql) {
+ return new Raw(sql);
+ }
+
+ /**
+ * Creates an equality filter (column = #{paramName}).
+ *
+ * @param column the database column name
+ * @param paramName the parameter name to bind
+ * @param value the value to bind
+ * @return a Filter representing the equality condition
+ */
+ public static Filter eq(String column, String paramName, Object value) {
+ Objects.requireNonNull(column);
+ Objects.requireNonNull(paramName);
+ validateColumn(column);
+ return new Binary(column, "=", paramName, value);
+ }
+
+ /**
+ * Creates an equality filter (column = #{paramName}).
+ *
+ * @param column the database column name
+ * @param value the value to bind
+ * @return a Filter representing the equality condition
+ */
+ public static Filter eq(String column, Object value) {
+ return eq(column, column, value);
+ }
+
+ /**
+ * Creates a non-equality filter (column <> #{paramName}).
+ *
+ * @param column the database column name
+ * @param paramName the parameter name to bind
+ * @param value the value to bind
+ * @return a Filter representing the non-equality condition
+ */
+ public static Filter ne(String column, String paramName, Object value) {
+ Objects.requireNonNull(column);
+ Objects.requireNonNull(paramName);
+ validateColumn(column);
+ return new Binary(column, "<>", paramName, value);
+ }
+
+ /**
+ * Creates a non-equality filter (column <> #{paramName}).
+ *
+ * @param column the database column name
+ * @param value the value to bind
+ * @return a Filter representing the non-equality condition
+ */
+ public static Filter ne(String column, Object value) {
+ return ne(column, column, value);
+ }
+
+ /**
+ * Creates a less-than filter (column < #{paramName}).
+ *
+ * @param column the database column name
+ * @param paramName the parameter name to bind
+ * @param value the value to bind
+ * @return a Filter representing the less-than condition
+ */
+ public static Filter lt(String column, String paramName, Object value) {
+ Objects.requireNonNull(column);
+ Objects.requireNonNull(paramName);
+ validateColumn(column);
+ return new Binary(column, "<", paramName, value);
+ }
+
+ /**
+ * Creates a less-than filter (column < #{paramName}).
+ *
+ * @param column the database column name
+ * @param value the value to bind
+ * @return a Filter representing the less-than condition
+ */
+ public static Filter lt(String column, Object value) {
+ return lt(column, column, value);
+ }
+
+ /**
+ * Creates a less-than-or-equal filter (column <= #{paramName}).
+ *
+ * @param column the database column name
+ * @param paramName the parameter name to bind
+ * @param value the value to bind
+ * @return a Filter representing the less-than-or-equal condition
+ */
+ public static Filter lte(String column, String paramName, Object value) {
+ Objects.requireNonNull(column);
+ Objects.requireNonNull(paramName);
+ validateColumn(column);
+ return new Binary(column, "<=", paramName, value);
+ }
+
+ /**
+ * Creates a less-than-or-equal filter (column <= #{paramName}).
+ *
+ * @param column the database column name
+ * @param value the value to bind
+ * @return a Filter representing the less-than-or-equal condition
+ */
+ public static Filter lte(String column, Object value) {
+ return lte(column, column, value);
+ }
+
+ /**
+ * Creates a greater-than filter (column > #{paramName}).
+ *
+ * @param column the database column name
+ * @param paramName the parameter name to bind
+ * @param value the value to bind
+ * @return a Filter representing the greater-than condition
+ */
+ public static Filter gt(String column, String paramName, Object value) {
+ Objects.requireNonNull(column);
+ Objects.requireNonNull(paramName);
+ validateColumn(column);
+ return new Binary(column, ">", paramName, value);
+ }
+
+ /**
+ * Creates a greater-than filter (column > #{paramName}).
+ *
+ * @param column the database column name
+ * @param value the value to bind
+ * @return a Filter representing the greater-than condition
+ */
+ public static Filter gt(String column, Object value) {
+ return gt(column, column, value);
+ }
+
+ /**
+ * Creates a greater-than-or-equal filter (column >= #{paramName}).
+ *
+ * @param column the database column name
+ * @param paramName the parameter name to bind
+ * @param value the value to bind
+ * @return a Filter representing the greater-than-or-equal condition
+ */
+ public static Filter gte(String column, String paramName, Object value) {
+ Objects.requireNonNull(column);
+ Objects.requireNonNull(paramName);
+ validateColumn(column);
+ return new Binary(column, ">=", paramName, value);
+ }
+
+ /**
+ * Creates a greater-than-or-equal filter (column >= #{paramName}).
+ *
+ * @param column the database column name
+ * @param value the value to bind
+ * @return a Filter representing the greater-than-or-equal condition
+ */
+ public static Filter gte(String column, Object value) {
+ return gte(column, column, value);
+ }
+
+ /**
+ * Creates a LIKE filter (column LIKE #{paramName}).
+ *
+ * @param column the database column name
+ * @param paramName the parameter name to bind
+ * @param value the value to bind
+ * @return a Filter representing the LIKE condition
+ */
+ public static Filter like(String column, String paramName, Object value) {
+ Objects.requireNonNull(column);
+ Objects.requireNonNull(paramName);
+ validateColumn(column);
+ return new Binary(column, "LIKE", paramName, value);
+ }
+
+ /**
+ * Creates a LIKE filter (column LIKE #{paramName}).
+ *
+ * @param column the database column name
+ * @param value the value to bind
+ * @return a Filter representing the LIKE condition
+ */
+ public static Filter like(String column, Object value) {
+ return like(column, column, value);
+ }
+
+ /**
+ * Creates a filter checking if the column is NULL.
+ *
+ * @param column the database column name
+ * @return a Filter representing the IS NULL condition
+ */
+ public static Filter isNull(String column) {
+ Objects.requireNonNull(column);
+ validateColumn(column);
+ return new Unary(column, "IS NULL");
+ }
+
+ /**
+ * Creates a filter checking if the column is NOT NULL.
+ *
+ * @param column the database column name
+ * @return a Filter representing the IS NOT NULL condition
+ */
+ public static Filter isNotNull(String column) {
+ Objects.requireNonNull(column);
+ validateColumn(column);
+ return new Unary(column, "IS NOT NULL");
+ }
+
+ /**
+ * Creates an IN filter (column IN (#{paramName1}, #{paramName2}, ...)).
+ *
+ * @param column the database column name
+ * @param params the collection of parameters to bind
+ * @return a Filter representing the IN condition
+ */
+ public static Filter in(String column, Map params) {
+ Objects.requireNonNull(column);
+ validateColumn(column);
+ if (params == null || params.isEmpty()) // empty IN always false
+ return new Raw(" 1 = 0");
+
+ return new In(column, Collections.unmodifiableMap(params));
+ }
+
+ /**
+ * Combines multiple filters with the AND operator.
+ *
+ * @param filters the filters to combine
+ * @return a Filter representing the conjunction of the given filters
+ */
+ public static Filter and(Filter... filters) {
+ if (filters == null || filters.length == 0)
+ return Filter.NONE;
+
+ if (filters.length == 1)
+ return filters[0];
+
+ return new Combine("AND", filters);
+ }
+
+ /**
+ * Combines multiple filters with the OR operator.
+ *
+ * @param filters the filters to combine
+ * @return a Filter representing the disjunction of the given filters
+ */
+ public static Filter or(Filter... filters) {
+ if (filters == null || filters.length == 0)
+ return Filter.NONE;
+
+ if (filters.length == 1)
+ return filters[0];
+
+ return new Combine("OR", filters);
+ }
+
+ /**
+ * Generates the SQL string for this filter.
+ *
+ * @return the SQL string
+ */
+ public String toSqlTemplate() {
+ return " 1 = 1";
+ }
+
+ /**
+ * Checks if this filter is empty (i.e., represents no condition).
+ *
+ * @return true if the filter is empty, false otherwise
+ */
+ public boolean isEmpty() {
+ return true;
+ }
+
+
+ public Map getParams() {
+ return Map.of();
+ }
+
+ /**
+ * Validates that the column name contains only safe characters.
+ *
+ * @param column the column name to validate
+ * @throws IllegalArgumentException if the column name is invalid
+ */
+ private static void validateColumn(String column) {
+ // Only letters, digits, and underscore allowed (safe for SQL identifiers)
+ if (!column.matches("^[A-Za-z_][A-Za-z0-9_]*(?:\\.[A-Za-z_][A-Za-z0-9_]*)?$"))
+ throw new IllegalArgumentException("Invalid SQL column name: " + column);
+ }
+
+ /**
+ * WARNING: raw() is not parameter-safe. Use at your own risk.
+ */
+ private static class Raw extends Filter {
+ private final String sql;
+
+ private Raw(String sql) {
+ this.sql = sql;
+ }
+
+ @Override
+ public String toSqlTemplate() {
+ return sql == null ? "" : sql;
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return sql == null || sql.isEmpty();
+ }
+ }
+
+ private static class Unary extends Filter {
+ private final String column;
+ private final String operator;
+
+ private Unary(String column, String operator) {
+ this.column = column;
+ this.operator = operator;
+ }
+
+ @Override
+ public String toSqlTemplate() {
+ return " " + column + " " + operator;
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return false;
+ }
+ }
+
+ private static class Binary extends Filter {
+ private final String column;
+ private final String operator;
+ private final String paramName;
+ private final Object value;
+
+ private Binary(String column, String operator, String paramName, Object value) {
+ this.column = column;
+ this.operator = operator;
+ this.paramName = paramName;
+ this.value = value;
+ }
+
+ @Override
+ public String toSqlTemplate() {
+ return " " + column + " " + operator + " #{" + paramName + "}";
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return false;
+ }
+
+ @Override
+ public Map getParams() {
+ return Map.of(paramName, value);
+ }
+ }
+
+ private static class In extends Filter {
+ private final String column;
+ private final Map params;
+
+ private In(String column, Map params) {
+ this.column = column;
+ this.params = params;
+ }
+
+ @Override
+ public String toSqlTemplate() {
+ return " " + column + params.keySet().stream()
+ .map(n -> "#{" + n + '}')
+ .collect(Collectors.joining(", ", " IN (", ")"));
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return false;
+ }
+
+ @Override
+ public Map getParams() {
+ return params;
+ }
+ }
+
+ private static class Combine extends Filter {
+ private final String op;
+ private final Filter[] filters;
+
+ private Combine(String op, Filter[] filters) {
+ this.op = op;
+ this.filters = filters;
+ }
+
+ @Override
+ public String toSqlTemplate() {
+ return Arrays.stream(filters)
+ .map(Filter::toSqlTemplate)
+ .collect(Collectors.joining(" " + op, " (", ")"));
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return false;
+ }
+
+ @Override
+ public Map getParams() {
+ return Arrays.stream(filters)
+ .map(Filter::getParams)
+ .flatMap(m -> m.entrySet().stream())
+ .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
+ }
+ }
+}
\ No newline at end of file
diff --git a/api/src/main/java/io/bosonnetwork/database/Ordering.java b/api/src/main/java/io/bosonnetwork/database/Ordering.java
new file mode 100644
index 0000000..fc14bb1
--- /dev/null
+++ b/api/src/main/java/io/bosonnetwork/database/Ordering.java
@@ -0,0 +1,200 @@
+package io.bosonnetwork.database;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Objects;
+import java.util.stream.Collectors;
+
+/**
+ * A helper class for building SQL ORDER BY clauses safely.
+ * Supports multiple fields and prevents SQL injection by validating column names.
+ *
+ * Example:
+ * Ordering order = Ordering.by("name").asc()
+ * .then("created").desc();
+ *
+ * String sql = order.toSql(); // " ORDER BY name ASC, created DESC"
+ *
+ */
+public class Ordering {
+ /** Special instance representing no ordering. Method toSql() returns an empty string. */
+ public static final Ordering NONE = new Ordering(Collections.emptyList());
+
+ private final List fields;
+
+ /** Sort direction. */
+ public enum Direction {
+ /** Ascending. */
+ ASC,
+ /** Descending. */
+ DESC
+ }
+
+ /**
+ * Represents a column and its sorting direction in an SQL ORDER BY clause.
+ * A Field is used to specify the sorting criteria for a query. Each Field contains:
+ * - A column, representing the name of the database column to sort by.
+ * - A direction, indicating whether the sorting should be ascending or descending.
+ *
+ * @param column representing the name of the database column to sort by.
+ * @param direction indicating whether the sorting should be ascending or descending.
+ */
+ public record Field(String column, Direction direction) {
+ }
+
+ private Ordering(List fields) {
+ this.fields = Collections.unmodifiableList(fields);
+ }
+
+
+ /**
+ * Start an ordering chain.
+ *
+ * @param column the name of the column to sort by
+ * @param direction the direction to sort in, either ASC or DESC
+ * @return a new Builder instance configured with the specified column and direction
+ */
+ public static Builder by(String column, Direction direction) {
+ return new Builder(column, direction);
+ }
+
+ /**
+ * Start an ordering chain.
+ *
+ * @param column the first column to sort by with default direction ASC
+ * @return a new Builder instance configured with the specified column and direction
+ */
+ public static Builder by(String column) {
+ return new Builder(column, Direction.ASC);
+ }
+
+ /**
+ * Generates the SQL ORDER BY clause.
+ *
+ * @return SQL order by subclause like " ORDER BY name ASC, created DESC", or empty string if no fields.
+ */
+ public String toSql() {
+ if (fields.isEmpty()) return "";
+
+ StringBuilder sb = new StringBuilder(" ORDER BY ");
+ for (int i = 0; i < fields.size(); i++) {
+ Field f = fields.get(i);
+ sb.append(f.column).append(" ").append(f.direction);
+ if (i < fields.size() - 1) {
+ sb.append(", ");
+ }
+ }
+ return sb.toString();
+ }
+
+ /**
+ * Generates a unique identifier string representing the ordering configuration
+ * based on the fields. If no fields are present, it returns "none".
+ *
+ * @return a string in the format "orderBy___..." or "none" if no fields are defined
+ */
+ public String identifier() {
+ if (fields.isEmpty())
+ return "none";
+
+ return fields.stream().map(f -> f.column + '_' + f.direction)
+ .collect(Collectors.joining("_", "orderBy_", ""));
+ }
+
+ /**
+ * Checks if the ordering instance contains no fields.
+ *
+ * @return true if the ordering has no fields; false otherwise.
+ */
+ public boolean isEmpty() {
+ return fields.isEmpty();
+ }
+
+ /**
+ * Returns the list of fields in this ordering.
+ *
+ * @return the list of fields
+ */
+ public List fields() {
+ return fields;
+ }
+
+ /**
+ * Builder for {@link Ordering}.
+ */
+ public static final class Builder {
+ private final List list = new ArrayList<>();
+
+ private Builder(String column, Direction direction) {
+ Objects.requireNonNull(column);
+ validateColumn(column);
+ list.add(new Field(column, direction)); // default
+ }
+
+ /**
+ * Sets the direction of the current field to ascending.
+ *
+ * @return this builder
+ */
+ public Builder asc() {
+ update(Direction.ASC);
+ return this;
+ }
+
+ /**
+ * Sets the direction of the current field to descending.
+ *
+ * @return this builder
+ */
+ public Builder desc() {
+ update(Direction.DESC);
+ return this;
+ }
+
+ /**
+ * Add a new field ordering after the previous one.
+ *
+ * @param column the next column to sort by
+ * @param direction the next direction to sort in, either ASC or DESC
+ * @return this builder
+ */
+ public Builder then(String column, Direction direction) {
+ Objects.requireNonNull(column);
+ validateColumn(column);
+ list.add(new Field(column, direction));
+ return this;
+ }
+
+ /**
+ * Add a new field ordering after the previous one.
+ *
+ * @param column the next column to sort by with default direction ASC
+ * @return this builder
+ */
+ public Builder then(String column) {
+ return then(column, Direction.ASC); // default direction
+ }
+
+ private void update(Direction dir) {
+ int last = list.size() - 1;
+ Field current = list.get(last);
+ list.set(last, new Field(current.column, dir));
+ }
+
+ /**
+ * Builds the {@link Ordering} instance.
+ *
+ * @return the new Ordering instance
+ */
+ public Ordering build() {
+ return new Ordering(list);
+ }
+ }
+
+ private static void validateColumn(String column) {
+ // Only letters, digits, and underscore allowed (safe for SQL identifiers)
+ if (!column.matches("^[A-Za-z_][A-Za-z0-9_]*(?:\\.[A-Za-z_][A-Za-z0-9_]*)?$"))
+ throw new IllegalArgumentException("Invalid SQL column name: " + column);
+ }
+}
\ No newline at end of file
diff --git a/api/src/main/java/io/bosonnetwork/database/Pagination.java b/api/src/main/java/io/bosonnetwork/database/Pagination.java
new file mode 100644
index 0000000..44ee3f0
--- /dev/null
+++ b/api/src/main/java/io/bosonnetwork/database/Pagination.java
@@ -0,0 +1,162 @@
+package io.bosonnetwork.database;
+
+import java.util.Map;
+
+/**
+ * Helper class for building SQL LIMIT/OFFSET clauses safely.
+ *
+ * Example:
+ * Pagination p = Pagination.page(3, 20); // pageIndex=3, pageSize=20
+ * p.toSql(); // " OFFSET 40 LIMIT 20"
+ */
+public class Pagination {
+ public static final Pagination NONE = new Pagination(0, 0);
+
+ private final long offset;
+ private final long limit;
+
+ private Pagination(long offset, long limit) {
+ if (offset < 0)
+ throw new IllegalArgumentException("offset must be >= 0");
+
+ if (limit < 0)
+ throw new IllegalArgumentException("limit must be >= 0");
+
+ this.offset = offset;
+ this.limit = limit;
+ }
+
+ /**
+ * Create Pagination using explicit limit/offset.
+ *
+ * @param offset the number of rows to skip
+ * @param limit the maximum number of rows to return
+ * @return a new Pagination instance
+ */
+ public static Pagination of(long offset, long limit) {
+ if (offset == 0 && limit == 0)
+ return NONE;
+
+ return new Pagination(offset, limit);
+ }
+
+ /**
+ * Create Pagination using 1-based page index and page size.
+ *
+ * pageIndex = 1 -> first page
+ *
+ *
+ * @param pageIndex the 1-based page index
+ * @param pageSize the size of the page
+ * @return a new Pagination instance
+ */
+ public static Pagination page(long pageIndex, long pageSize) {
+ if (pageSize <= 0)
+ throw new IllegalArgumentException("pageSize must be > 0");
+
+ if (pageIndex <= 0)
+ throw new IllegalArgumentException("pageIndex must be >= 1");
+
+ long offset = (pageIndex - 1) * pageSize;
+ return new Pagination(offset, pageSize);
+ }
+
+ /**
+ * Generates the SQL LIMIT/OFFSET clause.
+ *
+ * @return SQL fragment like " OFFSET 40 LIMIT 20".
+ * If offset and limit are both 0, returns "" (meaning no limit applied).
+ */
+ public String toSql() {
+ if (offset == 0 && limit == 0)
+ return ""; // caller may omit OFFSET/LIMIT completely
+
+ return " OFFSET " + offset + " LIMIT " + limit;
+ }
+
+ /**
+ * Generates a parameterized SQL LIMIT/OFFSET clause.
+ *
+ * @return A SQL fragment like " OFFSET #{offset} LIMIT #{limit}".
+ * If offset and limit are both 0, returns an empty string to indicate no limit is applied.
+ */
+ public String toSqlTemplate() {
+ if (offset == 0 && limit == 0)
+ return ""; // caller may omit OFFSET/LIMIT completely
+
+ return " LIMIT #{limit} OFFSET #{offset}";
+ }
+
+ /**
+ * Converts the pagination information into a map representation that can be used in Vert.x SqlTemplates.
+ * The map includes "offset" and "limit" keys if their values are non-zero.
+ * If both offset and limit are zero, an empty map is returned.
+ *
+ * @return a map containing the pagination parameters with keys "offset" and "limit",
+ * or an empty map if both values are zero.
+ */
+ public Map getParams() {
+ return (offset == 0 && limit == 0) ? Map.of() : Map.of("offset", offset, "limit", limit);
+ }
+
+ /**
+ * Identifies the type of pagination being used.
+ * If both offset and limit are 0, it returns "none", indicating no pagination.
+ * Otherwise, it returns "paginated", indicating a paginated query.
+ *
+ * @return a string representing the pagination type, either "none" or "paginated".
+ */
+ public String identifier() {
+ return (offset == 0 && limit == 0) ? "none" : "paginated";
+ }
+
+ /**
+ * Returns the offset (rows to skip).
+ *
+ * @return the offset
+ */
+ public long offset() {
+ return offset;
+ }
+
+ /**
+ * Returns the limit (max rows).
+ *
+ * @return the limit
+ */
+ public long limit() {
+ return limit;
+ }
+
+ /**
+ * Returns the current 1-based page index.
+ *
+ * @return the 1-based page index as an integer. If no limit is applied, it defaults to 1.
+ */
+ public long page() {
+ if (limit == 0)
+ return 1; // For NONE or unlimited, treat as page 1
+
+ return (offset / limit) + 1;
+ }
+
+ /**
+ * Returns the size of the page (same as limit).
+ *
+ * @return the page size
+ */
+ public long pageSize() {
+ return limit;
+ }
+
+ /**
+ * Returns the page size to be used, defaulting to the given size if no limit is applied.
+ * If the current limit is 0, the provided size is returned; otherwise, the limit is returned.
+ *
+ * @param size the default page size to use if no limit is set
+ * @return the page size, either the provided size or the current limit
+ */
+ public long pageSizeOr(long size) {
+ return limit > 0 ? limit : size;
+ }
+}
\ No newline at end of file
diff --git a/api/src/main/java/io/bosonnetwork/vertx/VersionedSchema.java b/api/src/main/java/io/bosonnetwork/database/VersionedSchema.java
similarity index 93%
rename from api/src/main/java/io/bosonnetwork/vertx/VersionedSchema.java
rename to api/src/main/java/io/bosonnetwork/database/VersionedSchema.java
index 0dbb649..1d621f9 100644
--- a/api/src/main/java/io/bosonnetwork/vertx/VersionedSchema.java
+++ b/api/src/main/java/io/bosonnetwork/database/VersionedSchema.java
@@ -20,7 +20,7 @@
* SOFTWARE.
*/
-package io.bosonnetwork.vertx;
+package io.bosonnetwork.database;
import java.io.BufferedReader;
import java.io.File;
@@ -51,6 +51,7 @@
*
*/
public class VersionedSchema implements VertxDatabase {
+ private static final SchemaVersion EMPTY_VERSION = new SchemaVersion(0, "", "", 0, 0, true);
private final SqlClient client;
private final Path schemaPath;
private String databaseProductName;
@@ -111,6 +112,7 @@ public Path path() {
private VersionedSchema(SqlClient client, Path schemaPath) {
this.client = client;
this.schemaPath = schemaPath;
+ this.currentVersion = EMPTY_VERSION;
}
/**
@@ -203,6 +205,13 @@ private Future getSchemaVersion() {
.map(VersionedSchema::mapToSchemaVersion);
}
+ private static boolean getBoolean(Row row, String columnName) {
+ Object value = row.getValue(columnName);
+ return value instanceof Boolean b ? b :
+ (value instanceof Number n ? n.intValue() != 0 :
+ (value instanceof String s && Boolean.parseBoolean(s)));
+ }
+
private static SchemaVersion mapToSchemaVersion(RowSet rowSet) {
if (rowSet.size() == 0)
return null;
@@ -214,7 +223,7 @@ private static SchemaVersion mapToSchemaVersion(RowSet rowSet) {
String appliedBy = row.getString("applied_by");
long appliedAt = row.getLong("applied_at");
long consumedTime = row.getLong("consumed_time");
- boolean success = row.getBoolean("success");
+ boolean success = getBoolean(row, "success");
return new SchemaVersion(version, description, appliedBy, appliedAt, consumedTime, success);
}
@@ -492,8 +501,15 @@ private Future applyMigration(Migration migration) {
String statement;
while ((statement = nextStatement(reader)) != null) {
final String sql = statement;
- log.trace("Migration: executing statement {}", sql);
- chain = chain.compose(v -> connection.query(sql).execute().mapEmpty());
+
+ chain = chain.compose(v -> {
+ log.trace("Migration: executing statement {}", sql);
+ return connection.query(sql).execute()
+ .andThen(ar -> {
+ if (ar.failed())
+ log.error("Failed to execute SQL statement: {}", sql, ar.cause());
+ }).mapEmpty();
+ });
}
} catch (IOException e) {
return Future.failedFuture(new IllegalStateException("Failed to read migration file", e));
@@ -505,19 +521,20 @@ private Future applyMigration(Migration migration) {
log.debug("Migration: updating schema version...");
SchemaVersion newVersion = new SchemaVersion(migration.version, migration.description,
"", begin, duration, true);
- return connection.preparedQuery(insertSchemaVersion()).execute(
- Tuple.of(newVersion.version,
- newVersion.description,
- newVersion.appliedBy,
- newVersion.appliedAt,
- newVersion.consumedTime,
- newVersion.success)
- ).andThen(ar -> {
- if (ar.succeeded())
- log.debug("Migration: schema version updated to version {}", migration.version);
- else
- log.error("Migration: failed to update schema version", ar.cause());
- }).map(newVersion);
+ return connection.preparedQuery(insertSchemaVersion())
+ .execute(
+ Tuple.of(newVersion.version,
+ newVersion.description,
+ newVersion.appliedBy,
+ newVersion.appliedAt,
+ newVersion.consumedTime,
+ newVersion.success))
+ .map(newVersion);
+ }).andThen(ar -> {
+ if (ar.succeeded())
+ log.debug("Migration: schema version updated to version {}", migration.version);
+ else
+ log.error("Migration: failed to update schema version.", ar.cause());
}).onComplete(promise);
return promise.future();
diff --git a/api/src/main/java/io/bosonnetwork/vertx/VertxDatabase.java b/api/src/main/java/io/bosonnetwork/database/VertxDatabase.java
similarity index 83%
rename from api/src/main/java/io/bosonnetwork/vertx/VertxDatabase.java
rename to api/src/main/java/io/bosonnetwork/database/VertxDatabase.java
index 12584cd..e9bf4ff 100644
--- a/api/src/main/java/io/bosonnetwork/vertx/VertxDatabase.java
+++ b/api/src/main/java/io/bosonnetwork/database/VertxDatabase.java
@@ -20,7 +20,7 @@
* SOFTWARE.
*/
-package io.bosonnetwork.vertx;
+package io.bosonnetwork.database;
import java.util.List;
import java.util.function.Function;
@@ -33,6 +33,7 @@
import io.vertx.sqlclient.RowSet;
import io.vertx.sqlclient.SqlClient;
import io.vertx.sqlclient.SqlConnection;
+import io.vertx.sqlclient.SqlResult;
import io.vertx.sqlclient.TransactionRollbackException;
/**
@@ -103,10 +104,10 @@ default PreparedQuery> preparedQuery(String sql) {
* @return a future completing with the function result after commit, or failing after rollback
*/
default Future withTransaction(Function> function) {
- if (getClient() instanceof SqlConnection c) {
- return withTransaction(c, function);
- } else if (getClient() instanceof Pool p) {
+ if (getClient() instanceof Pool p) {
return p.withTransaction(function);
+ } else if (getClient() instanceof SqlConnection c) {
+ return withTransaction(c, function);
} else {
return Future.failedFuture(new IllegalStateException("Client must be an instance of SqlConnection or Pool"));
}
@@ -150,6 +151,13 @@ default Future withConnection(Function> function
}
}
+ private boolean getBoolean(Row row, int column) {
+ Object value = row.getValue(column);
+ return value instanceof Boolean b ? b :
+ (value instanceof Number n ? n.intValue() != 0 :
+ (value instanceof String s && Boolean.parseBoolean(s)));
+ }
+
/**
* Extracts the first boolean value from the first row or returns a default when empty.
*
@@ -157,8 +165,8 @@ default Future withConnection(Function> function
* @param defaultValue value to return when the set is empty
* @return the found boolean or the default
*/
- static boolean findBoolean(RowSet rowSet, boolean defaultValue) {
- return rowSet.size() != 0 ? rowSet.iterator().next().getBoolean(0) : defaultValue;
+ default boolean findBoolean(RowSet rowSet, boolean defaultValue) {
+ return rowSet.size() != 0 ? getBoolean(rowSet.iterator().next(),0) : defaultValue;
}
/**
@@ -167,7 +175,7 @@ static boolean findBoolean(RowSet rowSet, boolean defaultValue) {
* @param rowSet result set
* @return the found boolean or {@code false}
*/
- static boolean findBoolean(RowSet rowSet) {
+ default boolean findBoolean(RowSet rowSet) {
return findBoolean(rowSet, false);
}
@@ -178,7 +186,7 @@ static boolean findBoolean(RowSet rowSet) {
* @param defaultValue value to return when the set is empty
* @return the found integer or the default
*/
- static int findInteger(RowSet rowSet, int defaultValue) {
+ default int findInteger(RowSet rowSet, int defaultValue) {
return rowSet.size() != 0 ? rowSet.iterator().next().getInteger(0) : defaultValue;
}
@@ -188,7 +196,7 @@ static int findInteger(RowSet rowSet, int defaultValue) {
* @param rowSet result set
* @return the found integer or {@code 0}
*/
- static int findInteger(RowSet rowSet) {
+ default int findInteger(RowSet rowSet) {
return findInteger(rowSet, 0);
}
@@ -199,7 +207,7 @@ static int findInteger(RowSet rowSet) {
* @param defaultValue value to return when the set is empty
* @return the found long or the default
*/
- static long findLong(RowSet rowSet, long defaultValue) {
+ default long findLong(RowSet rowSet, long defaultValue) {
return rowSet.size() != 0 ? rowSet.iterator().next().getLong(0) : defaultValue;
}
@@ -209,7 +217,7 @@ static long findLong(RowSet rowSet, long defaultValue) {
* @param rowSet result set
* @return the found long or {@code 0L}
*/
- static long findLong(RowSet rowSet) {
+ default long findLong(RowSet rowSet) {
return findLong(rowSet, 0);
}
@@ -222,7 +230,7 @@ static long findLong(RowSet rowSet) {
* @param mapped type
* @return mapped value or the default
*/
- static T findUniqueOrDefault(RowSet rowSet, Function mapper, T defaultValue) {
+ default T findUniqueOrDefault(RowSet rowSet, Function mapper, T defaultValue) {
return rowSet.size() != 0 ? mapper.apply(rowSet.iterator().next()) : defaultValue;
}
@@ -234,7 +242,7 @@ static T findUniqueOrDefault(RowSet rowSet, Function mapper, T
* @param mapped type
* @return mapped value or {@code null}
*/
- static T findUnique(RowSet rowSet, Function mapper) {
+ default T findUnique(RowSet rowSet, Function mapper) {
return findUniqueOrDefault(rowSet, mapper, null);
}
@@ -246,7 +254,26 @@ static T findUnique(RowSet rowSet, Function mapper) {
* @param element type
* @return list of mapped values (possibly empty)
*/
- static List findMany(RowSet rowSet, Function mapper) {
+ default List findMany(RowSet rowSet, Function mapper) {
return rowSet.stream().map(mapper).toList();
}
+
+ /**
+ * Checks if the given SQL result affected any rows.
+ *
+ * @param result the SQL result to check
+ * @return true if at least one row was affected, false otherwise
+ */
+ default boolean hasEffectedRows(SqlResult> result) {
+ return result.rowCount() > 0;
+ }
+
+ /**
+ * Closes the database.
+ *
+ * @return a future completing when the connection is closed
+ */
+ default Future close() {
+ return getClient().close();
+ }
}
\ No newline at end of file
diff --git a/api/src/main/java/io/bosonnetwork/service/ClientAuthenticator.java b/api/src/main/java/io/bosonnetwork/service/ClientAuthenticator.java
new file mode 100644
index 0000000..608575d
--- /dev/null
+++ b/api/src/main/java/io/bosonnetwork/service/ClientAuthenticator.java
@@ -0,0 +1,11 @@
+package io.bosonnetwork.service;
+
+import java.util.concurrent.CompletableFuture;
+
+import io.bosonnetwork.Id;
+
+public interface ClientAuthenticator {
+ CompletableFuture authenticateUser(Id userId, byte[] nonce, byte[] signature);
+
+ CompletableFuture authenticateDevice(Id userId, Id deviceId, byte[] nonce, byte[] signature);
+}
\ No newline at end of file
diff --git a/api/src/main/java/io/bosonnetwork/service/ClientDevice.java b/api/src/main/java/io/bosonnetwork/service/ClientDevice.java
new file mode 100644
index 0000000..2ee25c9
--- /dev/null
+++ b/api/src/main/java/io/bosonnetwork/service/ClientDevice.java
@@ -0,0 +1,21 @@
+package io.bosonnetwork.service;
+
+import io.bosonnetwork.Id;
+
+public interface ClientDevice {
+ Id getId();
+
+ Id getUserId();
+
+ String getName();
+
+ String getApp();
+
+ long getCreated();
+
+ long getUpdated();
+
+ long getLastSeen();
+
+ String getLastAddress();
+}
\ No newline at end of file
diff --git a/api/src/main/java/io/bosonnetwork/service/ClientUser.java b/api/src/main/java/io/bosonnetwork/service/ClientUser.java
new file mode 100644
index 0000000..c133330
--- /dev/null
+++ b/api/src/main/java/io/bosonnetwork/service/ClientUser.java
@@ -0,0 +1,27 @@
+package io.bosonnetwork.service;
+
+import io.bosonnetwork.Id;
+
+public interface ClientUser {
+ Id getId();
+
+ boolean verifyPassphrase(String passphrase);
+
+ String getName();
+
+ String getAvatar();
+
+ String getEmail();
+
+ String getBio();
+
+ long getCreated();
+
+ long getUpdated();
+
+ boolean isAnnounce();
+
+ long getLastAnnounced();
+
+ String getPlanName();
+}
\ No newline at end of file
diff --git a/api/src/main/java/io/bosonnetwork/service/Clients.java b/api/src/main/java/io/bosonnetwork/service/Clients.java
new file mode 100644
index 0000000..eebecda
--- /dev/null
+++ b/api/src/main/java/io/bosonnetwork/service/Clients.java
@@ -0,0 +1,22 @@
+package io.bosonnetwork.service;
+
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+
+import io.bosonnetwork.Id;
+
+public interface Clients {
+ CompletableFuture extends ClientUser> getUser(Id userId);
+
+ CompletableFuture existsUser(Id userId);
+
+ CompletableFuture> getDevices(Id userId);
+
+ CompletableFuture extends ClientDevice> getDevice(Id deviceId);
+
+ CompletableFuture existsDevice(Id deviceId);
+
+ CompletableFuture existsDevice(Id userId, Id deviceId);
+
+ // CompletableFuture addDevice(Id deviceId, Id userId, String name, String app);
+}
\ No newline at end of file
diff --git a/api/src/main/java/io/bosonnetwork/service/FederatedNode.java b/api/src/main/java/io/bosonnetwork/service/FederatedNode.java
new file mode 100644
index 0000000..c3de0a3
--- /dev/null
+++ b/api/src/main/java/io/bosonnetwork/service/FederatedNode.java
@@ -0,0 +1,35 @@
+package io.bosonnetwork.service;
+
+import io.bosonnetwork.Id;
+
+public interface FederatedNode {
+ Id getId();
+
+ String getHost();
+
+ int getPort();
+
+ String getApiEndpoint();
+
+ String getSoftware();
+
+ String getVersion();
+
+ String getName();
+
+ String getLogo();
+
+ String getWebsite();
+
+ String getContact();
+
+ String getDescription();
+
+ boolean isTrusted();
+
+ int getReputation();
+
+ long getCreated();
+
+ long getUpdated();
+}
\ No newline at end of file
diff --git a/api/src/main/java/io/bosonnetwork/service/FederatedService.java b/api/src/main/java/io/bosonnetwork/service/FederatedService.java
new file mode 100644
index 0000000..5772b50
--- /dev/null
+++ b/api/src/main/java/io/bosonnetwork/service/FederatedService.java
@@ -0,0 +1,27 @@
+package io.bosonnetwork.service;
+
+import io.bosonnetwork.Id;
+
+public interface FederatedService {
+ Id getPeerId();
+
+ Id getNodeId();
+
+ Id getOriginId();
+
+ String getHost();
+
+ int getPort();
+
+ String getAlternativeEndpoint();
+
+ String getServiceId();
+
+ String getServiceName();
+
+ String getEndpoint();
+
+ long getCreated();
+
+ long getUpdated();
+}
\ No newline at end of file
diff --git a/api/src/main/java/io/bosonnetwork/service/Federation.java b/api/src/main/java/io/bosonnetwork/service/Federation.java
new file mode 100644
index 0000000..6afd184
--- /dev/null
+++ b/api/src/main/java/io/bosonnetwork/service/Federation.java
@@ -0,0 +1,27 @@
+package io.bosonnetwork.service;
+
+import java.util.concurrent.CompletableFuture;
+
+import io.bosonnetwork.Id;
+
+public interface Federation {
+ public CompletableFuture extends FederatedNode> getNode(Id nodeId, boolean federateIfNotExists);
+
+ default CompletableFuture extends FederatedNode> getNode(Id nodeId) {
+ return getNode(nodeId, false);
+ }
+
+ public CompletableFuture existsNode(Id nodeId);
+
+
+
+ // public CompletableFuture addNode(FederatedNode node);
+
+ // public CompletableFuture updateNode(FederatedNode node);
+
+ // public CompletableFuture removeNode(Id nodeId);
+
+ // public CompletableFuture> getAllServices(Id nodeId);
+
+ public CompletableFuture extends FederatedService> getService(Id peerId);
+}
\ No newline at end of file
diff --git a/api/src/main/java/io/bosonnetwork/service/FederationAuthenticator.java b/api/src/main/java/io/bosonnetwork/service/FederationAuthenticator.java
new file mode 100644
index 0000000..f017fad
--- /dev/null
+++ b/api/src/main/java/io/bosonnetwork/service/FederationAuthenticator.java
@@ -0,0 +1,11 @@
+package io.bosonnetwork.service;
+
+import java.util.concurrent.CompletableFuture;
+
+import io.bosonnetwork.Id;
+
+public interface FederationAuthenticator {
+ CompletableFuture authenticateNode(Id nodeId, byte[] nonce, byte[] signature);
+
+ CompletableFuture authenticatePeer(Id nodeId, Id peerId, byte[] nonce, byte[] signature);
+}
\ No newline at end of file
diff --git a/api/src/main/java/io/bosonnetwork/utils/Json.java b/api/src/main/java/io/bosonnetwork/utils/Json.java
index f93be4f..340e45b 100644
--- a/api/src/main/java/io/bosonnetwork/utils/Json.java
+++ b/api/src/main/java/io/bosonnetwork/utils/Json.java
@@ -113,7 +113,7 @@ public class Json {
* @return {@code true} if the parser is handling a binary format (CBOR), {@code false} otherwise
*/
public static boolean isBinaryFormat(JsonParser p) {
- // Now we only sport JSON, CBOR and TOML formats, CBOR is the only binary format
+ // Now we only sport JSON, CBOR and TOML formats; CBOR is the only binary format
return p instanceof CBORParser;
}
@@ -124,7 +124,7 @@ public static boolean isBinaryFormat(JsonParser p) {
* @return {@code true} if the generator is handling a binary format (CBOR), {@code false} otherwise
*/
public static boolean isBinaryFormat(JsonGenerator gen) {
- // Now we only sport JSON, CBOR and TOML formats, CBOR is the only binary format
+ // Now we only sport JSON, CBOR and TOML formats; CBOR is the only binary format
return gen instanceof CBORGenerator;
}
@@ -303,6 +303,7 @@ public Date deserialize(JsonParser p, DeserializationContext ctx) throws IOExcep
}
}
+
/**
* Returns the default date and time format for serializing and deserializing {@link java.util.Date} objects.
*
@@ -737,7 +738,7 @@ protected static SimpleModule bosonJsonModule() {
}
/**
- * Creates the Jackson JSON factory, without auto-close the source and target.
+ * Creates the Jackson JSON factory without auto-close the source and target.
*
* @return the {@code JsonFactory} object.
*/
@@ -753,7 +754,7 @@ public static JsonFactory jsonFactory() {
}
/**
- * Creates the Jackson CBOR factory, without auto-close the source and target.
+ * Creates the Jackson CBOR factory without auto-close the source and target.
*
* @return the {@code CBORFactory} object.
*/
@@ -776,6 +777,7 @@ public static CBORFactory cborFactory() {
public static ObjectMapper objectMapper() {
if (_objectMapper == null) {
_objectMapper = JsonMapper.builder(jsonFactory())
+ .enable(DeserializationFeature.USE_BIG_DECIMAL_FOR_FLOATS)
.disable(MapperFeature.AUTO_DETECT_CREATORS)
.disable(MapperFeature.AUTO_DETECT_FIELDS)
.disable(MapperFeature.AUTO_DETECT_GETTERS)
@@ -802,6 +804,7 @@ public static ObjectMapper objectMapper() {
public static CBORMapper cborMapper() {
if (_cborMapper == null) {
_cborMapper = CBORMapper.builder(cborFactory())
+ .enable(DeserializationFeature.USE_BIG_DECIMAL_FOR_FLOATS)
.disable(MapperFeature.AUTO_DETECT_CREATORS)
.disable(MapperFeature.AUTO_DETECT_FIELDS)
.disable(MapperFeature.AUTO_DETECT_GETTERS)
@@ -832,6 +835,7 @@ public static YAMLMapper yamlMapper() {
factory.disable(JsonParser.Feature.AUTO_CLOSE_SOURCE);
_yamlMapper = YAMLMapper.builder(factory)
+ .enable(DeserializationFeature.USE_BIG_DECIMAL_FOR_FLOATS)
.disable(MapperFeature.AUTO_DETECT_CREATORS)
.disable(MapperFeature.AUTO_DETECT_FIELDS)
.disable(MapperFeature.AUTO_DETECT_GETTERS)
@@ -1150,6 +1154,7 @@ public static void initializeBosonJsonModule() {
return; // already registered
DatabindCodec.mapper().registerModule(bosonJsonModule());
+ DatabindCodec.mapper().enable(DeserializationFeature.USE_BIG_DECIMAL_FOR_FLOATS);
}
/**
@@ -1341,7 +1346,7 @@ public JsonContext withSharedAttribute(Object key, Object value) {
/**
* Returns a new {@code JsonContext} with the specified shared attributes, replacing all previous shared attributes.
*
- * @param attributes the shared attributes to set (may be {@code null} or empty)
+ * @param attributes the shared attributes to set (maybe {@code null} or empty)
* @return a new context with the specified shared attributes
*/
@Override
@@ -1378,7 +1383,7 @@ public JsonContext withoutSharedAttribute(Object key) {
* the context is returned unchanged.
*
* @param key the per-call attribute key
- * @param value the per-call attribute value (may be {@code null}, see behavior above)
+ * @param value the per-call attribute value (maybe {@code null}, see behavior above)
* @return a new context with the updated per-call attribute
*/
@Override
@@ -1389,7 +1394,7 @@ public JsonContext withPerCallAttribute(Object key, Object value) {
if (_shared.containsKey(key)) {
value = NULL_SURROGATE;
} else if ((_nonShared == null) || !_nonShared.containsKey(key)) {
- // except if non-mutable shared list has no entry, we don't care
+ // except if an immutable shared list has no entry, we don't care
return this;
} else {
//noinspection RedundantCollectionOperation
diff --git a/api/src/test/java/io/bosonnetwork/database/FilterTests.java b/api/src/test/java/io/bosonnetwork/database/FilterTests.java
new file mode 100644
index 0000000..c59ce78
--- /dev/null
+++ b/api/src/test/java/io/bosonnetwork/database/FilterTests.java
@@ -0,0 +1,165 @@
+package io.bosonnetwork.database;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import org.junit.jupiter.api.Test;
+
+import io.bosonnetwork.utils.Json;
+
+public class FilterTests {
+ @Test
+ void testNone() {
+ Filter filter = Filter.NONE;
+ assertEquals(" 1 = 1", filter.toSqlTemplate());
+ assertTrue(filter.getParams().isEmpty());
+ }
+
+ @Test
+ void testEqual() {
+ Filter filter = Filter.eq("foo", "hello");
+ assertEquals(" foo = #{foo}", filter.toSqlTemplate());
+ assertEquals("hello", filter.getParams().get("foo"));
+ }
+
+ @Test
+ void testNotEqual() {
+ Filter filter = Filter.ne("foo", "world");
+ assertEquals(" foo <> #{foo}", filter.toSqlTemplate());
+ assertEquals("world", filter.getParams().get("foo"));
+ }
+
+ @Test
+ void testLessThan() {
+ Filter filter = Filter.lt("foo", 10);
+ assertEquals(" foo < #{foo}", filter.toSqlTemplate());
+ assertEquals(10, filter.getParams().get("foo"));
+ }
+
+ @Test
+ void testLessThanOrEqual() {
+ Filter filter = Filter.lte("foo", 15);
+ assertEquals(" foo <= #{foo}", filter.toSqlTemplate());
+ assertEquals(15, filter.getParams().get("foo"));
+ }
+
+ @Test
+ void testGreaterThan() {
+ Filter filter = Filter.gt("foo", 20);
+ assertEquals(" foo > #{foo}", filter.toSqlTemplate());
+ assertEquals(20, filter.getParams().get("foo"));
+ }
+
+ @Test
+ void testGreaterThanOrEqual() {
+ Filter filter = Filter.gte("foo", 25);
+ assertEquals(" foo >= #{foo}", filter.toSqlTemplate());
+ assertEquals(25, filter.getParams().get("foo"));
+ }
+
+ @Test
+ void testLike() {
+ Filter filter = Filter.like("foo", "ABC%");
+ assertEquals(" foo LIKE #{foo}", filter.toSqlTemplate());
+ assertEquals("ABC%", filter.getParams().get("foo"));
+ }
+
+ @Test
+ void testIsNull() {
+ Filter filter = Filter.isNull("foo");
+ assertEquals(" foo IS NULL", filter.toSqlTemplate());
+ }
+
+ @Test
+ void testIsNotNull() {
+ Filter filter = Filter.isNotNull("foo");
+ assertEquals(" foo IS NOT NULL", filter.toSqlTemplate());
+ }
+
+ @Test
+ void testIn() {
+ Filter filter = Filter.in("foo", Map.of());
+ assertEquals(" 1 = 0", filter.toSqlTemplate());
+
+ Map params = new LinkedHashMap<>();
+ params.put("type1", 1);
+ params.put("type2", 2);
+ params.put("type3", 3);
+
+ filter = Filter.in("foo", params);
+ assertEquals(" foo IN (#{type1}, #{type2}, #{type3})", filter.toSqlTemplate());
+ System.out.println(Json.toPrettyString(filter.getParams()));
+ assertEquals(params, filter.getParams());
+ }
+
+ @Test
+ void testAnd() {
+ Filter filter = Filter.and();
+ assertEquals(" 1 = 1", filter.toSqlTemplate());
+
+ filter = Filter.and(Filter.eq("foo", 10));
+ assertEquals(" foo = #{foo}", filter.toSqlTemplate());
+
+ Map inParams = new LinkedHashMap<>();
+ inParams.put("qux1", "QUX1");
+ inParams.put("qux2", "QUX2");
+ inParams.put("qux3", "QUX3");
+
+ filter = Filter.and(
+ Filter.eq("foo", 10),
+ Filter.lte("bar", 20),
+ Filter.isNull("baz"),
+ Filter.in("qux", inParams));
+
+ assertEquals(" ( foo = #{foo} AND bar <= #{bar} AND baz IS NULL AND qux IN (#{qux1}, #{qux2}, #{qux3}))", filter.toSqlTemplate());
+
+ Map params = filter.getParams();
+ System.out.println(Json.toPrettyString(params));
+ assertEquals(5, params.size());
+ assertEquals(10, params.get("foo"));
+ assertEquals(20, params.get("bar"));
+ assertEquals("QUX1", params.get("qux1"));
+ assertEquals("QUX2", params.get("qux2"));
+ assertEquals("QUX3", params.get("qux3"));
+ }
+
+ @Test
+ void testOr() {
+ Filter filter = Filter.and();
+ assertEquals(" 1 = 1", filter.toSqlTemplate());
+
+ filter = Filter.and(Filter.eq("foo", "foobar"));
+ assertEquals(" foo = #{foo}", filter.toSqlTemplate());
+
+ Map inParams = new LinkedHashMap<>();
+ inParams.put("qux1", "QUX1");
+ inParams.put("qux2", "QUX2");
+ inParams.put("qux3", "QUX3");
+
+ filter = Filter.or(
+ Filter.eq("foo", 10),
+ Filter.lte("bar", 20),
+ Filter.isNull("baz"),
+ Filter.in("qux", inParams));
+
+ assertEquals(" ( foo = #{foo} OR bar <= #{bar} OR baz IS NULL OR qux IN (#{qux1}, #{qux2}, #{qux3}))", filter.toSqlTemplate());
+
+ Map params = filter.getParams();
+ System.out.println(Json.toPrettyString(params));
+ assertEquals(5, params.size());
+ assertEquals(10, params.get("foo"));
+ assertEquals(20, params.get("bar"));
+ assertEquals("QUX1", params.get("qux1"));
+ assertEquals("QUX2", params.get("qux2"));
+ assertEquals("QUX3", params.get("qux3"));
+ }
+
+ @Test
+ void testRaw() {
+ Filter filter = Filter.raw("foo = bar");
+ assertEquals("foo = bar", filter.toSqlTemplate());
+ }
+}
\ No newline at end of file
diff --git a/api/src/test/java/io/bosonnetwork/vertx/VersionedSchemaTests.java b/api/src/test/java/io/bosonnetwork/database/VersionedSchemaTests.java
similarity index 98%
rename from api/src/test/java/io/bosonnetwork/vertx/VersionedSchemaTests.java
rename to api/src/test/java/io/bosonnetwork/database/VersionedSchemaTests.java
index ffd7e6c..309b828 100644
--- a/api/src/test/java/io/bosonnetwork/vertx/VersionedSchemaTests.java
+++ b/api/src/test/java/io/bosonnetwork/database/VersionedSchemaTests.java
@@ -1,4 +1,4 @@
-package io.bosonnetwork.vertx;
+package io.bosonnetwork.database;
import static org.junit.jupiter.api.Assertions.assertEquals;
diff --git a/dht/src/test/java/io/bosonnetwork/kademlia/storage/DataStorageTests.java b/dht/src/test/java/io/bosonnetwork/kademlia/storage/DataStorageTests.java
index 938d320..cc8f9bf 100644
--- a/dht/src/test/java/io/bosonnetwork/kademlia/storage/DataStorageTests.java
+++ b/dht/src/test/java/io/bosonnetwork/kademlia/storage/DataStorageTests.java
@@ -103,15 +103,13 @@ static void setupDataStorage(Vertx vertx, VertxTestContext context) {
}));
futures.add(future2);
- /*
- var postgresqlURL = "postgresql://jingyu@localhost:5432/test";
+ var postgresqlURL = "postgresql://jingyu:secret@localhost:5432/test";
postgresStorage = new PostgresStorage(postgresqlURL);
var future3 = postgresStorage.initialize(vertx, valueExpiration, peerInfoExpiration).onComplete(context.succeeding(version -> {
context.verify(() -> assertEquals(CURRENT_SCHEMA_VERSION, version));
dataStorages.add(Arguments.of("PostgresStorage", postgresStorage));
}));
- futures.add(future3);
- */
+ // futures.add(future3);
Future.all(futures).onSuccess(unused -> {
try {